content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(ggplot2)
library(data.table)
library(dplyr)
library(tidyverse)
# Create file and download the zipfile
if(!file.exists("./DataProject")){dir.create("./DataProject")}
FileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(FileUrl, destfile = "./DataProject.zip")
# Unzip the file
unzip("Dataproject.zip", exdir="DataProject")
# Upload names of the variables
features <- read_delim('./DataProject/UCI HAR Dataset/features.txt', " ", col_names = F) %>% pull(2)
# Read the tables into R
# First the Train Data Files
x_train <- read_table("./DataProject/UCI HAR Dataset/train/X_train.txt", col_names = features)
y_train <- read_table("./DataProject/UCI HAR Dataset/train/y_train.txt", col_names = "activity", col_types = cols(col_factor(levels=1:6)))
subject_train <- read_table("DataProject/UCI HAR Dataset/train/subject_train.txt", col_names = "subject")
train <- bind_cols(subject_train,y_train, x_train) %>% mutate(set="train") %>% select(set, everything())
rm(list = c("y_train","x_train","subject_train"))
# Second the Test Data Files
x_test <- read_table("./DataProject/UCI HAR Dataset/test/X_test.txt", col_names = features)
y_test <- read_table("DataProject/UCI HAR Dataset/test/y_test.txt", col_names = "activity", col_types = cols(col_factor(levels=1:6)))
subject_test <- read_table("DataProject/UCI HAR Dataset/test/subject_test.txt", col_names = "subject")
test <- bind_cols(subject_test,y_test, x_test) %>% mutate(set="test") %>% select(set, everything())
rm(list = c("y_test","x_test","subject_test"))
# Merge the files
All_data <- bind_rows(train,test)
rm(list = c("test","train"))
# Extract only the measurements on the mean and standard deviation for each measurement.
All_data<- All_data %>% select(1:3, contains("mean()"), contains("std()"))
# Use descriptive activity names to name the activities in the data set
activity <- read_delim('./DataProject/UCI HAR Dataset/activity_labels.txt', " ", col_names = F) %>% pull(2)
levels(All_data$activity) <- activity
# Appropriately label the data set with descriptive variable names.
names(All_data)
names(All_data)<-gsub("Acc", "Accelerometer", names(All_data))
names(All_data)<-gsub("Gyro", "Gyroscope", names(All_data))
names(All_data)<-gsub("BodyBody", "Body", names(All_data))
names(All_data)<-gsub("Mag", "Magnitude", names(All_data))
names(All_data)<-gsub("^t", "Time", names(All_data))
names(All_data)<-gsub("^f", "Frequency", names(All_data))
names(All_data)<-gsub("tBody", "TimeBody", names(All_data))
names(All_data)<-gsub("-mean()", "Mean", names(All_data), ignore.case = TRUE)
names(All_data)<-gsub("-std()", "STD", names(All_data), ignore.case = TRUE)
names(All_data)<-gsub("-freq()", "Frequency", names(All_data), ignore.case = TRUE)
names(All_data)<-gsub("gravity", "Gravity", names(All_data))
# From the data set in step 4, create a second, independent tidy data set with the average of each variable for each activity and each subject.
All_data_avg <- All_data %>% group_by(subject, activity) %>% summarise_at(-(1:3),mean,na.rm = T)
# Create a textfile
write.table(All_data_avg, file="tidy_dataset.txt", row.names = FALSE)
| /run_analysis.R | no_license | RosalieDud/Getting-and-Cleaning-Data-Course-Project | R | false | false | 3,286 | r | library(ggplot2)
library(data.table)
library(dplyr)
library(tidyverse)
# Create file and download the zipfile
if(!file.exists("./DataProject")){dir.create("./DataProject")}
FileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(FileUrl, destfile = "./DataProject.zip")
# Unzip the file
unzip("Dataproject.zip", exdir="DataProject")
# Upload names of the variables
features <- read_delim('./DataProject/UCI HAR Dataset/features.txt', " ", col_names = F) %>% pull(2)
# Read the tables into R
# First the Train Data Files
x_train <- read_table("./DataProject/UCI HAR Dataset/train/X_train.txt", col_names = features)
y_train <- read_table("./DataProject/UCI HAR Dataset/train/y_train.txt", col_names = "activity", col_types = cols(col_factor(levels=1:6)))
subject_train <- read_table("DataProject/UCI HAR Dataset/train/subject_train.txt", col_names = "subject")
train <- bind_cols(subject_train,y_train, x_train) %>% mutate(set="train") %>% select(set, everything())
rm(list = c("y_train","x_train","subject_train"))
# Second the Test Data Files
x_test <- read_table("./DataProject/UCI HAR Dataset/test/X_test.txt", col_names = features)
y_test <- read_table("DataProject/UCI HAR Dataset/test/y_test.txt", col_names = "activity", col_types = cols(col_factor(levels=1:6)))
subject_test <- read_table("DataProject/UCI HAR Dataset/test/subject_test.txt", col_names = "subject")
test <- bind_cols(subject_test,y_test, x_test) %>% mutate(set="test") %>% select(set, everything())
rm(list = c("y_test","x_test","subject_test"))
# Merge the files
All_data <- bind_rows(train,test)
rm(list = c("test","train"))
# Extract only the measurements on the mean and standard deviation for each measurement.
All_data<- All_data %>% select(1:3, contains("mean()"), contains("std()"))
# Use descriptive activity names to name the activities in the data set
activity <- read_delim('./DataProject/UCI HAR Dataset/activity_labels.txt', " ", col_names = F) %>% pull(2)
levels(All_data$activity) <- activity
# Appropriately label the data set with descriptive variable names.
names(All_data)
names(All_data)<-gsub("Acc", "Accelerometer", names(All_data))
names(All_data)<-gsub("Gyro", "Gyroscope", names(All_data))
names(All_data)<-gsub("BodyBody", "Body", names(All_data))
names(All_data)<-gsub("Mag", "Magnitude", names(All_data))
names(All_data)<-gsub("^t", "Time", names(All_data))
names(All_data)<-gsub("^f", "Frequency", names(All_data))
names(All_data)<-gsub("tBody", "TimeBody", names(All_data))
names(All_data)<-gsub("-mean()", "Mean", names(All_data), ignore.case = TRUE)
names(All_data)<-gsub("-std()", "STD", names(All_data), ignore.case = TRUE)
names(All_data)<-gsub("-freq()", "Frequency", names(All_data), ignore.case = TRUE)
names(All_data)<-gsub("gravity", "Gravity", names(All_data))
# From the data set in step 4, create a second, independent tidy data set with the average of each variable for each activity and each subject.
All_data_avg <- All_data %>% group_by(subject, activity) %>% summarise_at(-(1:3),mean,na.rm = T)
# Create a textfile
write.table(All_data_avg, file="tidy_dataset.txt", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regression_plot.R
\name{is_mean_enclosed}
\alias{is_mean_enclosed}
\title{Check mean enclosure}
\usage{
is_mean_enclosed(data, x, y, conf.level = 0.99)
}
\arguments{
\item{data}{data object}
\item{x}{age variable in \code{data}}
\item{y}{expression variable in \code{data}}
\item{conf.level}{confidence level}
}
\value{
Returns \code{TRUE} only if for every set of upper and lower pointwise
confidence limits, the mean expression is between these bounds, \code{FALSE} if
the mean expression falls outside of the confidence limits for at least one
point.
}
\description{
Is the mean expression fully enclosed by the loess confidence bands?
}
\examples{
is_mean_enclosed(mtcars, "wt", "qsec", conf.level = 0.99)
is_mean_enclosed(mtcars, "wt", "qsec", conf.level = 0.95)
}
\author{
Derek Chiu
}
| /BrCa_Age_Associated_TMA/Packages/ageassn/man/is_mean_enclosed.Rd | no_license | BCCRCMO/BrCa_AgeAssociations | R | false | true | 873 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regression_plot.R
\name{is_mean_enclosed}
\alias{is_mean_enclosed}
\title{Check mean enclosure}
\usage{
is_mean_enclosed(data, x, y, conf.level = 0.99)
}
\arguments{
\item{data}{data object}
\item{x}{age variable in \code{data}}
\item{y}{expression variable in \code{data}}
\item{conf.level}{confidence level}
}
\value{
Returns \code{TRUE} only if for every set of upper and lower pointwise
confidence limits, the mean expression is between these bounds, \code{FALSE} if
the mean expression falls outside of the confidence limits for at least one
point.
}
\description{
Is the mean expression fully enclosed by the loess confidence bands?
}
\examples{
is_mean_enclosed(mtcars, "wt", "qsec", conf.level = 0.99)
is_mean_enclosed(mtcars, "wt", "qsec", conf.level = 0.95)
}
\author{
Derek Chiu
}
|
# change the size of the points
p + geom_point(aes(population/10^6, total), size = 3) +
geom_text(aes(population/10^6, total, label = abb))
# move text labels slightly to the right
p + geom_point(aes(population/10^6, total), size = 3) +
geom_text(aes(population/10^6, total, label = abb), nudge_x = 1)
# simplify code by adding global aesthetic
p <- murders %>% ggplot(aes(population/10^6, total, label = abb))
p + geom_point(size = 3) +
geom_text(nudge_x = 1.5)
# local aesthetics override global aesthetics
p + geom_point(size = 3) +
geom_text(aes(x = 10, y = 800, label = "Hello there!")) | /tinkering/Ag.r | no_license | Brandon780/scripts_ | R | false | false | 602 | r | # change the size of the points
p + geom_point(aes(population/10^6, total), size = 3) +
geom_text(aes(population/10^6, total, label = abb))
# move text labels slightly to the right
p + geom_point(aes(population/10^6, total), size = 3) +
geom_text(aes(population/10^6, total, label = abb), nudge_x = 1)
# simplify code by adding global aesthetic
p <- murders %>% ggplot(aes(population/10^6, total, label = abb))
p + geom_point(size = 3) +
geom_text(nudge_x = 1.5)
# local aesthetics override global aesthetics
p + geom_point(size = 3) +
geom_text(aes(x = 10, y = 800, label = "Hello there!")) |
#' Dimension Stability Analysis of \code{\link[EGAnet]{EGA}}
#'
#' \code{bootEGA} Estimates the number of dimensions of \emph{n} bootstraps
#' using the empirical (partial) correlation matrix (parametric) or resampling from
#' the empirical dataset (non-parametric). It also estimates a typical
#' median network structure, which is formed by the median or mean pairwise (partial)
#' correlations over the \emph{n} bootstraps.
#'
#' @param data Matrix or data frame.
#' Includes the variables to be used in the \code{bootEGA} analysis
#'
#' @param n Numeric integer.
#' Number of replica samples to generate from the bootstrap analysis.
#' At least \code{500} is recommended
#'
#' @param model Character.
#' A string indicating the method to use.
#' Defaults to \code{"glasso"}.
#'
#' Current options are:
#'
#' \itemize{
#'
#' \item{\strong{\code{"glasso"}}}
#' {Estimates the Gaussian graphical model using graphical LASSO with
#' extended Bayesian information criterion to select optimal regularization parameter.
#' See \code{\link[EGAnet]{EBICglasso.qgraph}}}
#'
#' \item{\strong{\code{"TMFG"}}}
#' {Estimates a Triangulated Maximally Filtered Graph.
#' See \code{\link[NetworkToolbox]{TMFG}}}
#'
#' }
#'
#' @param algorithm A string indicating the algorithm to use.
#' Current options are:
#'
#' \itemize{
#'
#' \item{\strong{\code{walktrap}}}
#' {Computes the Walktrap algorithm using \code{\link[igraph]{cluster_walktrap}}}
#'
#' \item{\strong{\code{louvain}}}
#' {Computes the Walktrap algorithm using \code{\link[igraph]{cluster_louvain}}}
#'
#' }
#'
#' @param type Character.
#' A string indicating the type of bootstrap to use.
#'
#' Current options are:
#'
#' \itemize{
#'
#' \item{\strong{\code{"parametric"}}}
#' {Generates \code{n} new datasets (multivariate normal random distributions) based on the
#' original dataset, via the \code{\link[mvtnorm]{Mvnorm}} function of the mvtnorm package}
#'
#' \item{\strong{\code{"resampling"}}}
#' {Generates n random subsamples of the original data}
#'
#' }
#'
#' @param typicalStructure Boolean.
#' If \code{TRUE}, returns the typical network of partial correlations
#' (estimated via graphical lasso or via TMFG) and estimates its dimensions.
#' The "typical network" is the median of all pairwise correlations over the \emph{n} bootstraps.
#' Defaults to \code{TRUE}
#'
#' @param plot.typicalStructure Boolean.
#' If \code{TRUE}, returns a plot of the typical network (partial correlations),
#' which is the median of all pairwise correlations over the \emph{n} bootstraps,
#' and its estimated dimensions.
#' Defaults to \code{TRUE}
#'
#' @param ncores Numeric.
#' Number of cores to use in computing results.
#' Defaults to \code{parallel::detectCores() / 2} or half of your
#' computer's processing power.
#' Set to \code{1} to not use parallel computing.
#' Recommended to use maximum number of cores minus one
#'
#' If you're unsure how many cores your computer has,
#' then use the following code: \code{parallel::detectCores()}
#'
#' @param ... Additional arguments to be passed to \code{\link{EBICglasso.qgraph}}
#' or \code{\link[NetworkToolbox]{TMFG}}
#'
#' @return Returns a list containing:
#'
#' \item{n}{Number of replica samples in bootstrap}
#'
#' \item{boot.ndim}{Number of dimensions identified in each replica sample}
#'
#' \item{boot.wc}{Item allocation for each replica sample}
#'
#' \item{bootGraphs}{Networks of each replica sample}
#'
#' \item{summary.table}{Summary table containing number of replica samples, median,
#' standard deviation, standard error, 95\% confidence intervals, and quantiles (lower = 2.5\% and upper = 97.5\%)}
#'
#' \item{frequency}{Proportion of times the number of dimensions was identified
#' (e.g., .85 of 1,000 = 850 times that specific number of dimensions was found)}
#'
#' \item{EGA}{Output of the original \code{\link[EGAnet]{EGA}} results}
#'
#' \item{typicalGraph}{A list containing:
#'
#' \itemize{
#'
#' \item{\strong{\code{graph}}}
#' {Network matrix of the median network structure}
#'
#' \item{\strong{\code{typical.dim.variables}}}
#' {An ordered matrix of item allocation}
#'
#' \item{\strong{\code{wc}}}
#' {Item allocation of the median network}
#'
#' }
#' }
#'
#' @author Hudson F. Golino <hfg9s at virginia.edu> and Alexander P. Christensen <alexpaulchristensen@gmail.com>
#'
#' @examples
#'
#' # Load data
#' wmt <- wmt2[,7:24]
#'
#' \dontrun{
#'
#' # bootEGA glasso example
#' boot.wmt <- bootEGA(data = wmt, n = 500, typicalStructure = TRUE,
#' plot.typicalStructure = TRUE, model = "glasso", type = "parametric", ncores = 4)
#' }
#'
#' # Load data
#' intwl <- intelligenceBattery[,8:66]
#'
#' \dontrun{
#' # bootEGA TMFG example
#' boot.intwl <- bootEGA(data = intelligenceBattery[,8:66], n = 500, typicalStructure = TRUE,
#' plot.typicalStructure = TRUE, model = "TMFG", type = "parametric", ncores = 4)
#'
#' }
#'
#' @references
#' Christensen, A. P., & Golino, H. F. (2019).
#' Estimating the stability of the number of factors via Bootstrap Exploratory Graph Analysis: A tutorial.
#' \emph{PsyArXiv}.
#' doi:\href{https://doi.org/10.31234/osf.io/9deay}{10.31234/osf.io/9deay}
#'
#' @seealso \code{\link[EGAnet]{EGA}} to estimate the number of dimensions of an instrument using EGA
#' and \code{\link[EGAnet]{CFA}} to verify the fit of the structure suggested by EGA using confirmatory factor analysis.
#'
#' @importFrom stats cov median sd qt quantile
#'
#' @export
#'
# Bootstrap EGA
# Updated 11.05.2020
bootEGA <- function(data, n,
model = c("glasso", "TMFG"), algorithm = c("walktrap", "louvain"),
type = c("parametric", "resampling"),
typicalStructure = TRUE, plot.typicalStructure = TRUE, ncores, ...) {
#### MISSING ARGUMENTS HANDLING ####
if(missing(model))
{model <- "glasso"
}else{model <- match.arg(model)}
if(missing(algorithm))
{algorithm <- "walktrap"
}else{algorithm <- match.arg(algorithm)}
if(missing(type))
{type <- "parametric"
}else{type <- match.arg(type)}
if(missing(ncores))
{ncores <- ceiling(parallel::detectCores() / 2)
}else{ncores}
#### MISSING ARGUMENTS HANDLING ####
#number of cases
cases <- nrow(data)
#set inverse covariance matrix for parametric approach
if(type=="parametric") # Use a parametric approach:
{
if(model=="glasso")
{
g <- -EBICglasso.qgraph(qgraph::cor_auto(data), n = cases, lambda.min.ratio = 0.1, returnAllResults = FALSE, ...)
diag(g) <- 1
}else if(model=="TMFG")
{
g <- -NetworkToolbox::LoGo(data, normal = TRUE, partial=TRUE, ...)
diag(g) <- 1
}
}
#initialize data list
datalist <- list()
#initialize count
count <- 0
#let user know data generation has started
message("\nGenerating data...", appendLF = FALSE)
repeat{
#increase count
count <- count + 1
#generate data
if(type == "parametric")
{datalist[[count]] <- mvtnorm::rmvnorm(cases, sigma = corpcor::pseudoinverse(g))
}else if(type == "resampling")
{datalist[[count]] <- data[sample(1:cases, replace=TRUE),]}
#break out of repeat
if(count == n)
{break}
}
#let user know data generation has ended
message("done", appendLF = TRUE)
#initialize correlation matrix list
corlist <- list()
#let user know data generation has started
message("\nComputing correlation matrices...\n", appendLF = FALSE)
#Parallel processing
cl <- parallel::makeCluster(ncores)
#Export variables
parallel::clusterExport(cl = cl,
varlist = c("datalist", "corlist", "cases", ...),
envir=environment())
#Compute correlation matrices
corlist <- pbapply::pblapply(X = datalist, cl = cl,
FUN = qgraph::cor_auto)
#let user know data generation has started
message("Estimating networks...\n", appendLF = FALSE)
#Estimate networks
if(model == "glasso")
{
boots <- pbapply::pblapply(X = corlist, cl = cl,
FUN = EBICglasso.qgraph,
n = cases,
lambda.min.ratio = 0.1,
returnAllResults = FALSE,
...)
}else if(model == "TMFG")
{
boots <- pbapply::pblapply(X = corlist, cl = cl,
FUN = NetworkToolbox::TMFG,
normal = TRUE,
...)
for(i in 1:n)
{boots[[i]] <- boots[[i]]$A}
}
parallel::stopCluster(cl)
#let user know results are being computed
message("Computing results...", appendLF = FALSE)
bootGraphs <- vector("list", n)
for (i in 1:n) {
bootGraphs[[i]] <- boots[[i]]
colnames(bootGraphs[[i]]) <- colnames(data)
rownames(bootGraphs[[i]]) <- colnames(data)
}
boot.igraph <- vector("list", n)
for (l in 1:n) {
boot.igraph[[l]] <- NetworkToolbox::convert2igraph(abs(bootGraphs[[l]]))
}
boot.wc <- vector("list", n)
for (m in 1:n) {
boot.wc[[m]] <- switch(algorithm,
walktrap = igraph::cluster_walktrap(boot.igraph[[m]]),
louvain = igraph::cluster_louvain(boot.igraph[[m]])
)
}
boot.ndim <- matrix(NA, nrow = n, ncol = 2)
for (m in 1:n) {
boot.ndim[m, 2] <- max(boot.wc[[m]]$membership)
}
colnames(boot.ndim) <- c("Boot.Number", "N.Dim")
boot.ndim[, 1] <- seq_len(n)
if (typicalStructure == TRUE) {
if(model=="glasso")
{typical.Structure <- apply(simplify2array(bootGraphs),1:2, median)
}else if(model=="TMFG")
{typical.Structure <- apply(simplify2array(bootGraphs),1:2, mean)}
typical.igraph <- NetworkToolbox::convert2igraph(abs(typical.Structure))
typical.wc <- switch(algorithm,
walktrap = igraph::cluster_walktrap(typical.igraph),
louvain = igraph::cluster_louvain(typical.igraph)
)
typical.ndim <- max(typical.wc$membership)
dim.variables <- data.frame(items = colnames(data), dimension = typical.wc$membership)
}
if (plot.typicalStructure == TRUE) {
plot.typical.ega <- qgraph::qgraph(typical.Structure, layout = "spring",
vsize = 6, groups = as.factor(typical.wc$membership))
}
Median <- median(boot.ndim[, 2])
se.boot <- sd(boot.ndim[, 2])
ciMult <- qt(0.95/2 + 0.5, nrow(boot.ndim) - 1)
ci <- se.boot * ciMult
quant <- quantile(boot.ndim[,2], c(.025, .975), na.rm = TRUE)
summary.table <- data.frame(n.Boots = n, median.dim = Median,
SE.dim = se.boot, CI.dim = ci,
Lower.CI = Median - ci, Upper.CI = Median + ci,
Lower.Quantile = quant[1], Upper.Quantile = quant[2])
row.names(summary.table) <- NULL
#compute frequency
dim.range <- range(boot.ndim[,2])
lik <- matrix(0, nrow = diff(dim.range)+1, ncol = 2)
colnames(lik) <- c("# of Factors", "Frequency")
count <- 0
for(i in seq(from=min(dim.range),to=max(dim.range),by=1))
{
count <- count + 1
lik[count,1] <- i
lik[count,2] <- length(which(boot.ndim[,2]==i))/n
}
#let user know results have been computed
message("done", appendLF = TRUE)
result <- list()
result$n <- n
result$boot.ndim <- boot.ndim
result$boot.wc <- boot.wc
result$bootGraphs <- bootGraphs
result$summary.table <- summary.table
result$frequency <- lik
result$EGA <- suppressMessages(suppressWarnings(EGA(data = data, model = model, plot.EGA = FALSE)))
# Typical structure
if (typicalStructure == TRUE) {
typicalGraph <- list()
typicalGraph$graph <- typical.Structure
typicalGraph$typical.dim.variables <- dim.variables[order(dim.variables[,2]), ]
typicalGraph$wc <- typical.wc$membership
result$typicalGraph <- typicalGraph
}
class(result) <- "bootEGA"
return(result)
}
#----
| /R/bootEGA.R | no_license | jmbh/EGAnet | R | false | false | 12,383 | r | #' Dimension Stability Analysis of \code{\link[EGAnet]{EGA}}
#'
#' \code{bootEGA} Estimates the number of dimensions of \emph{n} bootstraps
#' using the empirical (partial) correlation matrix (parametric) or resampling from
#' the empirical dataset (non-parametric). It also estimates a typical
#' median network structure, which is formed by the median or mean pairwise (partial)
#' correlations over the \emph{n} bootstraps.
#'
#' @param data Matrix or data frame.
#' Includes the variables to be used in the \code{bootEGA} analysis
#'
#' @param n Numeric integer.
#' Number of replica samples to generate from the bootstrap analysis.
#' At least \code{500} is recommended
#'
#' @param model Character.
#' A string indicating the method to use.
#' Defaults to \code{"glasso"}.
#'
#' Current options are:
#'
#' \itemize{
#'
#' \item{\strong{\code{"glasso"}}}
#' {Estimates the Gaussian graphical model using graphical LASSO with
#' extended Bayesian information criterion to select optimal regularization parameter.
#' See \code{\link[EGAnet]{EBICglasso.qgraph}}}
#'
#' \item{\strong{\code{"TMFG"}}}
#' {Estimates a Triangulated Maximally Filtered Graph.
#' See \code{\link[NetworkToolbox]{TMFG}}}
#'
#' }
#'
#' @param algorithm A string indicating the algorithm to use.
#' Current options are:
#'
#' \itemize{
#'
#' \item{\strong{\code{walktrap}}}
#' {Computes the Walktrap algorithm using \code{\link[igraph]{cluster_walktrap}}}
#'
#' \item{\strong{\code{louvain}}}
#' {Computes the Walktrap algorithm using \code{\link[igraph]{cluster_louvain}}}
#'
#' }
#'
#' @param type Character.
#' A string indicating the type of bootstrap to use.
#'
#' Current options are:
#'
#' \itemize{
#'
#' \item{\strong{\code{"parametric"}}}
#' {Generates \code{n} new datasets (multivariate normal random distributions) based on the
#' original dataset, via the \code{\link[mvtnorm]{Mvnorm}} function of the mvtnorm package}
#'
#' \item{\strong{\code{"resampling"}}}
#' {Generates n random subsamples of the original data}
#'
#' }
#'
#' @param typicalStructure Boolean.
#' If \code{TRUE}, returns the typical network of partial correlations
#' (estimated via graphical lasso or via TMFG) and estimates its dimensions.
#' The "typical network" is the median of all pairwise correlations over the \emph{n} bootstraps.
#' Defaults to \code{TRUE}
#'
#' @param plot.typicalStructure Boolean.
#' If \code{TRUE}, returns a plot of the typical network (partial correlations),
#' which is the median of all pairwise correlations over the \emph{n} bootstraps,
#' and its estimated dimensions.
#' Defaults to \code{TRUE}
#'
#' @param ncores Numeric.
#' Number of cores to use in computing results.
#' Defaults to \code{parallel::detectCores() / 2} or half of your
#' computer's processing power.
#' Set to \code{1} to not use parallel computing.
#' Recommended to use maximum number of cores minus one
#'
#' If you're unsure how many cores your computer has,
#' then use the following code: \code{parallel::detectCores()}
#'
#' @param ... Additional arguments to be passed to \code{\link{EBICglasso.qgraph}}
#' or \code{\link[NetworkToolbox]{TMFG}}
#'
#' @return Returns a list containing:
#'
#' \item{n}{Number of replica samples in bootstrap}
#'
#' \item{boot.ndim}{Number of dimensions identified in each replica sample}
#'
#' \item{boot.wc}{Item allocation for each replica sample}
#'
#' \item{bootGraphs}{Networks of each replica sample}
#'
#' \item{summary.table}{Summary table containing number of replica samples, median,
#' standard deviation, standard error, 95\% confidence intervals, and quantiles (lower = 2.5\% and upper = 97.5\%)}
#'
#' \item{frequency}{Proportion of times the number of dimensions was identified
#' (e.g., .85 of 1,000 = 850 times that specific number of dimensions was found)}
#'
#' \item{EGA}{Output of the original \code{\link[EGAnet]{EGA}} results}
#'
#' \item{typicalGraph}{A list containing:
#'
#' \itemize{
#'
#' \item{\strong{\code{graph}}}
#' {Network matrix of the median network structure}
#'
#' \item{\strong{\code{typical.dim.variables}}}
#' {An ordered matrix of item allocation}
#'
#' \item{\strong{\code{wc}}}
#' {Item allocation of the median network}
#'
#' }
#' }
#'
#' @author Hudson F. Golino <hfg9s at virginia.edu> and Alexander P. Christensen <alexpaulchristensen@gmail.com>
#'
#' @examples
#'
#' # Load data
#' wmt <- wmt2[,7:24]
#'
#' \dontrun{
#'
#' # bootEGA glasso example
#' boot.wmt <- bootEGA(data = wmt, n = 500, typicalStructure = TRUE,
#' plot.typicalStructure = TRUE, model = "glasso", type = "parametric", ncores = 4)
#' }
#'
#' # Load data
#' intwl <- intelligenceBattery[,8:66]
#'
#' \dontrun{
#' # bootEGA TMFG example
#' boot.intwl <- bootEGA(data = intelligenceBattery[,8:66], n = 500, typicalStructure = TRUE,
#' plot.typicalStructure = TRUE, model = "TMFG", type = "parametric", ncores = 4)
#'
#' }
#'
#' @references
#' Christensen, A. P., & Golino, H. F. (2019).
#' Estimating the stability of the number of factors via Bootstrap Exploratory Graph Analysis: A tutorial.
#' \emph{PsyArXiv}.
#' doi:\href{https://doi.org/10.31234/osf.io/9deay}{10.31234/osf.io/9deay}
#'
#' @seealso \code{\link[EGAnet]{EGA}} to estimate the number of dimensions of an instrument using EGA
#' and \code{\link[EGAnet]{CFA}} to verify the fit of the structure suggested by EGA using confirmatory factor analysis.
#'
#' @importFrom stats cov median sd qt quantile
#'
#' @export
#'
# Bootstrap EGA
# Updated 11.05.2020
bootEGA <- function(data, n,
model = c("glasso", "TMFG"), algorithm = c("walktrap", "louvain"),
type = c("parametric", "resampling"),
typicalStructure = TRUE, plot.typicalStructure = TRUE, ncores, ...) {
#### MISSING ARGUMENTS HANDLING ####
if(missing(model))
{model <- "glasso"
}else{model <- match.arg(model)}
if(missing(algorithm))
{algorithm <- "walktrap"
}else{algorithm <- match.arg(algorithm)}
if(missing(type))
{type <- "parametric"
}else{type <- match.arg(type)}
if(missing(ncores))
{ncores <- ceiling(parallel::detectCores() / 2)
}else{ncores}
#### MISSING ARGUMENTS HANDLING ####
#number of cases
cases <- nrow(data)
#set inverse covariance matrix for parametric approach
if(type=="parametric") # Use a parametric approach:
{
if(model=="glasso")
{
g <- -EBICglasso.qgraph(qgraph::cor_auto(data), n = cases, lambda.min.ratio = 0.1, returnAllResults = FALSE, ...)
diag(g) <- 1
}else if(model=="TMFG")
{
g <- -NetworkToolbox::LoGo(data, normal = TRUE, partial=TRUE, ...)
diag(g) <- 1
}
}
#initialize data list
datalist <- list()
#initialize count
count <- 0
#let user know data generation has started
message("\nGenerating data...", appendLF = FALSE)
repeat{
#increase count
count <- count + 1
#generate data
if(type == "parametric")
{datalist[[count]] <- mvtnorm::rmvnorm(cases, sigma = corpcor::pseudoinverse(g))
}else if(type == "resampling")
{datalist[[count]] <- data[sample(1:cases, replace=TRUE),]}
#break out of repeat
if(count == n)
{break}
}
#let user know data generation has ended
message("done", appendLF = TRUE)
#initialize correlation matrix list
corlist <- list()
#let user know data generation has started
message("\nComputing correlation matrices...\n", appendLF = FALSE)
#Parallel processing
cl <- parallel::makeCluster(ncores)
#Export variables
parallel::clusterExport(cl = cl,
varlist = c("datalist", "corlist", "cases", ...),
envir=environment())
#Compute correlation matrices
corlist <- pbapply::pblapply(X = datalist, cl = cl,
FUN = qgraph::cor_auto)
#let user know data generation has started
message("Estimating networks...\n", appendLF = FALSE)
#Estimate networks
if(model == "glasso")
{
boots <- pbapply::pblapply(X = corlist, cl = cl,
FUN = EBICglasso.qgraph,
n = cases,
lambda.min.ratio = 0.1,
returnAllResults = FALSE,
...)
}else if(model == "TMFG")
{
boots <- pbapply::pblapply(X = corlist, cl = cl,
FUN = NetworkToolbox::TMFG,
normal = TRUE,
...)
for(i in 1:n)
{boots[[i]] <- boots[[i]]$A}
}
parallel::stopCluster(cl)
#let user know results are being computed
message("Computing results...", appendLF = FALSE)
bootGraphs <- vector("list", n)
for (i in 1:n) {
bootGraphs[[i]] <- boots[[i]]
colnames(bootGraphs[[i]]) <- colnames(data)
rownames(bootGraphs[[i]]) <- colnames(data)
}
boot.igraph <- vector("list", n)
for (l in 1:n) {
boot.igraph[[l]] <- NetworkToolbox::convert2igraph(abs(bootGraphs[[l]]))
}
boot.wc <- vector("list", n)
for (m in 1:n) {
boot.wc[[m]] <- switch(algorithm,
walktrap = igraph::cluster_walktrap(boot.igraph[[m]]),
louvain = igraph::cluster_louvain(boot.igraph[[m]])
)
}
boot.ndim <- matrix(NA, nrow = n, ncol = 2)
for (m in 1:n) {
boot.ndim[m, 2] <- max(boot.wc[[m]]$membership)
}
colnames(boot.ndim) <- c("Boot.Number", "N.Dim")
boot.ndim[, 1] <- seq_len(n)
if (typicalStructure == TRUE) {
if(model=="glasso")
{typical.Structure <- apply(simplify2array(bootGraphs),1:2, median)
}else if(model=="TMFG")
{typical.Structure <- apply(simplify2array(bootGraphs),1:2, mean)}
typical.igraph <- NetworkToolbox::convert2igraph(abs(typical.Structure))
typical.wc <- switch(algorithm,
walktrap = igraph::cluster_walktrap(typical.igraph),
louvain = igraph::cluster_louvain(typical.igraph)
)
typical.ndim <- max(typical.wc$membership)
dim.variables <- data.frame(items = colnames(data), dimension = typical.wc$membership)
}
if (plot.typicalStructure == TRUE) {
plot.typical.ega <- qgraph::qgraph(typical.Structure, layout = "spring",
vsize = 6, groups = as.factor(typical.wc$membership))
}
Median <- median(boot.ndim[, 2])
se.boot <- sd(boot.ndim[, 2])
ciMult <- qt(0.95/2 + 0.5, nrow(boot.ndim) - 1)
ci <- se.boot * ciMult
quant <- quantile(boot.ndim[,2], c(.025, .975), na.rm = TRUE)
summary.table <- data.frame(n.Boots = n, median.dim = Median,
SE.dim = se.boot, CI.dim = ci,
Lower.CI = Median - ci, Upper.CI = Median + ci,
Lower.Quantile = quant[1], Upper.Quantile = quant[2])
row.names(summary.table) <- NULL
#compute frequency
dim.range <- range(boot.ndim[,2])
lik <- matrix(0, nrow = diff(dim.range)+1, ncol = 2)
colnames(lik) <- c("# of Factors", "Frequency")
count <- 0
for(i in seq(from=min(dim.range),to=max(dim.range),by=1))
{
count <- count + 1
lik[count,1] <- i
lik[count,2] <- length(which(boot.ndim[,2]==i))/n
}
#let user know results have been computed
message("done", appendLF = TRUE)
result <- list()
result$n <- n
result$boot.ndim <- boot.ndim
result$boot.wc <- boot.wc
result$bootGraphs <- bootGraphs
result$summary.table <- summary.table
result$frequency <- lik
result$EGA <- suppressMessages(suppressWarnings(EGA(data = data, model = model, plot.EGA = FALSE)))
# Typical structure
if (typicalStructure == TRUE) {
typicalGraph <- list()
typicalGraph$graph <- typical.Structure
typicalGraph$typical.dim.variables <- dim.variables[order(dim.variables[,2]), ]
typicalGraph$wc <- typical.wc$membership
result$typicalGraph <- typicalGraph
}
class(result) <- "bootEGA"
return(result)
}
#----
|
\name{SCA}
\alias{SCA}
\alias{SCA.G3}
\alias{SCA.GE}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Specific Combining Ability
}
\description{
SCA effect to fit Hayman2, Griffing3 (SCA.G3), GE2 and GE3 (SCA.GE) models with \code{lm} function
}
\usage{
SCA(P1, P2, type = "fix", data)
SCA.G3(P1, P2, type = "fix", data)
}
\arguments{
\item{P1}{\code{a variable for the first parent}}
\item{P2}{\code{a variable for the second parent}}
\item{type}{\code{a variable for model selection. May be "fix" (fixed model) or "random" (random model).}}
\item{data}{\code{a 'data.frame' where to look for explanatory variables}}
}
\details{
a design matrix of all possible combinations between parentals with no selfs and no reciprocals
}
\references{
\cite{Onofri, A., Terzaroli, N. & Russi, L. Linear models for diallel crosses: a review with R functions. Theor Appl Genet (2020). https://doi.org/10.1007/s00122-020-03716-8}
}
\author{
Andrea Onofri \email{(andrea.onofri@unipg.it)}, Niccolo' Terzaroli \email{(n.terzaroli@gmail.com)}, Luigi Russi \email{(luigi.russi@unipg.it)}
}
\examples{
data("zhang05")
dMod <- lm(Yield ~ Env/Block + H.BAR(Par1, Par2) + VEi(Par1, Par2) +
Hi(Par1, Par2) + SCA(Par1, Par2) +
H.BAR(Par1, Par2):Env + VEi(Par1, Par2):Env +
Hi(Par1, Par2):Env + SCA(Par1, Par2):Env, data = zhang05)
anova(dMod)
}
\keyword{ ~diallel }
\keyword{ ~genetic effects }
| /man/SCA.Rd | no_license | spoicts/lmDiallel | R | false | false | 1,457 | rd | \name{SCA}
\alias{SCA}
\alias{SCA.G3}
\alias{SCA.GE}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Specific Combining Ability
}
\description{
SCA effect to fit Hayman2, Griffing3 (SCA.G3), GE2 and GE3 (SCA.GE) models with \code{lm} function
}
\usage{
SCA(P1, P2, type = "fix", data)
SCA.G3(P1, P2, type = "fix", data)
}
\arguments{
\item{P1}{\code{a variable for the first parent}}
\item{P2}{\code{a variable for the second parent}}
\item{type}{\code{a variable for model selection. May be "fix" (fixed model) or "random" (random model).}}
\item{data}{\code{a 'data.frame' where to look for explanatory variables}}
}
\details{
a design matrix of all possible combinations between parentals with no selfs and no reciprocals
}
\references{
\cite{Onofri, A., Terzaroli, N. & Russi, L. Linear models for diallel crosses: a review with R functions. Theor Appl Genet (2020). https://doi.org/10.1007/s00122-020-03716-8}
}
\author{
Andrea Onofri \email{(andrea.onofri@unipg.it)}, Niccolo' Terzaroli \email{(n.terzaroli@gmail.com)}, Luigi Russi \email{(luigi.russi@unipg.it)}
}
\examples{
data("zhang05")
dMod <- lm(Yield ~ Env/Block + H.BAR(Par1, Par2) + VEi(Par1, Par2) +
Hi(Par1, Par2) + SCA(Par1, Par2) +
H.BAR(Par1, Par2):Env + VEi(Par1, Par2):Env +
Hi(Par1, Par2):Env + SCA(Par1, Par2):Env, data = zhang05)
anova(dMod)
}
\keyword{ ~diallel }
\keyword{ ~genetic effects }
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/oesophagus.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.04,family="gaussian",standardize=FALSE)
sink('./oesophagus_018.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/oesophagus/oesophagus_018.R | no_license | esbgkannan/QSMART | R | false | false | 359 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/oesophagus.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.04,family="gaussian",standardize=FALSE)
sink('./oesophagus_018.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
fig05x014<-function(){
mass<-c(5.9,32.0,40.0,51.5,70.0,100.0,78.0,80.0,85.0,85.0,
110.0,115.0,125.0,130.0,120.0,120.0,130.0,135.0,110.0,130.0,
150.0,145.0,150.0,170.0,225.0,145.0,188.0,180.0,197.0,218.0,
300.0,260.0,265.0,250.0,250.0,300.0,320.0,514.0,556.0,840.0,
685.0,700.0,700.0,690.0,900.0,650.0,820.0,850.0,900.0,1015.0,
820.0,1100.0,1000.0,1100.0,1000.0,1000.0)
#
graphics.off()
windows(width=4.5,height=4.5,pointsize=12)
par(fin=c(4.45,4.45),pin=c(4.45,4.45),
mai=c(0.85,0.85,0.25,0.25),xaxs="r",bty="n")
#
knots<-sort(unique(mass))
n<-length(mass)
y<-cumsum(tabulate(match(mass,knots)))/n
#
plot(knots,y,col=gray(0.5),type="l",
lwd=1.5,xlab="Mass (g)",
ylab="Empirical Distribution Function",
main=NULL,xlim=c(0,1200),ylim=c(0,1))
#
dev.copy2eps(file="fig05x014.eps")
dev.copy2pdf(file="fig05x014.pdf")
}
| /graphicsforstatistics_2e_figures_scripts_r/Chapter 5/fig05x014.R | no_license | saqibarfeen/coding_time | R | false | false | 839 | r | fig05x014<-function(){
mass<-c(5.9,32.0,40.0,51.5,70.0,100.0,78.0,80.0,85.0,85.0,
110.0,115.0,125.0,130.0,120.0,120.0,130.0,135.0,110.0,130.0,
150.0,145.0,150.0,170.0,225.0,145.0,188.0,180.0,197.0,218.0,
300.0,260.0,265.0,250.0,250.0,300.0,320.0,514.0,556.0,840.0,
685.0,700.0,700.0,690.0,900.0,650.0,820.0,850.0,900.0,1015.0,
820.0,1100.0,1000.0,1100.0,1000.0,1000.0)
#
graphics.off()
windows(width=4.5,height=4.5,pointsize=12)
par(fin=c(4.45,4.45),pin=c(4.45,4.45),
mai=c(0.85,0.85,0.25,0.25),xaxs="r",bty="n")
#
knots<-sort(unique(mass))
n<-length(mass)
y<-cumsum(tabulate(match(mass,knots)))/n
#
plot(knots,y,col=gray(0.5),type="l",
lwd=1.5,xlab="Mass (g)",
ylab="Empirical Distribution Function",
main=NULL,xlim=c(0,1200),ylim=c(0,1))
#
dev.copy2eps(file="fig05x014.eps")
dev.copy2pdf(file="fig05x014.pdf")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_training.R
\name{xgb_data}
\alias{xgb_data}
\title{XGboost data}
\usage{
xgb_data(
dat_train,
target,
dat_test = NULL,
x_list = NULL,
prop = 0.7,
occur_time = NULL
)
}
\arguments{
\item{dat_train}{data.frame of train data. Default is NULL.}
\item{target}{name of target variable.}
\item{dat_test}{data.frame of test data. Default is NULL.}
\item{x_list}{names of independent variables of raw data. Default is NULL.}
\item{prop}{Percentage of train-data after the partition. Default: 0.7.}
\item{occur_time}{The name of the variable that represents the time at which each observation takes place.Default is NULL.}
}
\description{
\code{xgb_data} is for prepare data using in \code{\link{training_model}}.
}
| /man/xgb_data.Rd | no_license | cran/creditmodel | R | false | true | 838 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_training.R
\name{xgb_data}
\alias{xgb_data}
\title{XGboost data}
\usage{
xgb_data(
dat_train,
target,
dat_test = NULL,
x_list = NULL,
prop = 0.7,
occur_time = NULL
)
}
\arguments{
\item{dat_train}{data.frame of train data. Default is NULL.}
\item{target}{name of target variable.}
\item{dat_test}{data.frame of test data. Default is NULL.}
\item{x_list}{names of independent variables of raw data. Default is NULL.}
\item{prop}{Percentage of train-data after the partition. Default: 0.7.}
\item{occur_time}{The name of the variable that represents the time at which each observation takes place.Default is NULL.}
}
\description{
\code{xgb_data} is for prepare data using in \code{\link{training_model}}.
}
|
# Constructs the overall matrix of the model.
# pars: Paramter values to be used.
# sigmaType: Structure of the matrices in the model.
# 0 - Diagonal
# 1 - Exchangeable
# 2 - AR(1)
# kK: Number of random effects. This is the dimension of the overall matrix model.
# kR: Number of variance components.
# kLh: Number of subvariance components in each variance component. Each subvariance component has a covariance matrix.
# kLhi: Number of random effects in each subvariance component. This are the dimensions of the subvariance component matrices.
constructSigma <- function(pars, sigmaType, kK, kR, kLh, kLhi) {
# We call ovSigma the overall covariance matrix
# Dimension of overall sigma.
ovSigma <- matrix(0, kK, kK)
# counterPars has the position of the paramters that are in each sigma matrix.
counterPars <- 1
counterSubVar <- 1
# For each variance component we need to paste its variance matrix into ovSigma.
# sigmaDim has the dimension of the individual variance matrix per subvariance component.
counterDim <- 1
for (i in 1:kR) {
# Sigma is a diagonal matrix (one parameter.)
if (sigmaType[i] == 0) {
par1 <- pars[counterPars]
counterPars <- counterPars + 1
# Number of subvariance matrices: kLh[i]. These share one paramter.
for (j in 1:kLh[i]) {
dim0 <- kLhi[counterSubVar] # Dimension of current subvariance matrix
counterSubVar <- counterSubVar + 1 # Index of subvariance matrices
tmp_mat <- par1 * diag(dim0)
# print(counterDim:(counterDim + dim0 - 1))
# print(tmp_mat)
ovSigma[counterDim:(counterDim + dim0 - 1), counterDim:(counterDim + dim0 - 1)] <- tmp_mat
counterDim <- counterDim + dim0
}
}
# Exchangeable matrix. Two parameters, diagonal and off-diagonal.
if (sigmaType[i] == 1) {
# Number of subvariance matrices: kLh[i]. These share the two paramters but may be of different sizes.
par1 <- pars[counterPars]
par2 <- pars[counterPars + 1]
counterPars <- counterPars + 2
for (j in 1:kLh[i]) {
dim0 <- kLhi[counterSubVar] # Dimension of current subvariance matrix
counterSubVar <- counterSubVar + 1 # Index of subvariance matrices
tmp_mat <- par1 * diag(dim0)
tmp_mat[lower.tri(tmp_mat)] <- par2
tmp_mat[upper.tri(tmp_mat)] <- par2
# print(counterDim:(counterDim + dim0 - 1))
# print(tmp_mat)
ovSigma[counterDim:(counterDim + dim0 - 1), counterDim:(counterDim + dim0 - 1)] <- tmp_mat
counterDim <- counterDim + dim0
}
}
# AR(1) matrix. Two parameters, sigma^2 (1) and pho (2).
if (sigmaType[i] == 2) {
# Number of subvariance matrices: kLh[i]. These share the two paramters but may be of different sizes.
par1 <- pars[counterPars]
par2 <- pars[counterPars + 1]
counterPars <- counterPars + 2
for (j in 1:kLh[i]) {
dim0 <- kLhi[counterSubVar] # Dimension of current subvariance matrix
counterSubVar <- counterSubVar + 1 # Index of subvariance matrices
d0 <- abs(outer(1:dim0, 1:dim0, "-"))
tmp_mat <- par1 * par2^d0
# print(counterDim:(counterDim + dim0 - 1))
# print(tmp_mat)
ovSigma[counterDim:(counterDim + dim0 - 1), counterDim:(counterDim + dim0 - 1)] <- tmp_mat
counterDim <- counterDim + dim0
}
}
}
return(ovSigma)
}
| /R/constructSigma.R | no_license | cran/mcemGLM | R | false | false | 3,478 | r | # Constructs the overall matrix of the model.
# pars: Paramter values to be used.
# sigmaType: Structure of the matrices in the model.
# 0 - Diagonal
# 1 - Exchangeable
# 2 - AR(1)
# kK: Number of random effects. This is the dimension of the overall matrix model.
# kR: Number of variance components.
# kLh: Number of subvariance components in each variance component. Each subvariance component has a covariance matrix.
# kLhi: Number of random effects in each subvariance component. This are the dimensions of the subvariance component matrices.
constructSigma <- function(pars, sigmaType, kK, kR, kLh, kLhi) {
# We call ovSigma the overall covariance matrix
# Dimension of overall sigma.
ovSigma <- matrix(0, kK, kK)
# counterPars has the position of the paramters that are in each sigma matrix.
counterPars <- 1
counterSubVar <- 1
# For each variance component we need to paste its variance matrix into ovSigma.
# sigmaDim has the dimension of the individual variance matrix per subvariance component.
counterDim <- 1
for (i in 1:kR) {
# Sigma is a diagonal matrix (one parameter.)
if (sigmaType[i] == 0) {
par1 <- pars[counterPars]
counterPars <- counterPars + 1
# Number of subvariance matrices: kLh[i]. These share one paramter.
for (j in 1:kLh[i]) {
dim0 <- kLhi[counterSubVar] # Dimension of current subvariance matrix
counterSubVar <- counterSubVar + 1 # Index of subvariance matrices
tmp_mat <- par1 * diag(dim0)
# print(counterDim:(counterDim + dim0 - 1))
# print(tmp_mat)
ovSigma[counterDim:(counterDim + dim0 - 1), counterDim:(counterDim + dim0 - 1)] <- tmp_mat
counterDim <- counterDim + dim0
}
}
# Exchangeable matrix. Two parameters, diagonal and off-diagonal.
if (sigmaType[i] == 1) {
# Number of subvariance matrices: kLh[i]. These share the two paramters but may be of different sizes.
par1 <- pars[counterPars]
par2 <- pars[counterPars + 1]
counterPars <- counterPars + 2
for (j in 1:kLh[i]) {
dim0 <- kLhi[counterSubVar] # Dimension of current subvariance matrix
counterSubVar <- counterSubVar + 1 # Index of subvariance matrices
tmp_mat <- par1 * diag(dim0)
tmp_mat[lower.tri(tmp_mat)] <- par2
tmp_mat[upper.tri(tmp_mat)] <- par2
# print(counterDim:(counterDim + dim0 - 1))
# print(tmp_mat)
ovSigma[counterDim:(counterDim + dim0 - 1), counterDim:(counterDim + dim0 - 1)] <- tmp_mat
counterDim <- counterDim + dim0
}
}
# AR(1) matrix. Two parameters, sigma^2 (1) and pho (2).
if (sigmaType[i] == 2) {
# Number of subvariance matrices: kLh[i]. These share the two paramters but may be of different sizes.
par1 <- pars[counterPars]
par2 <- pars[counterPars + 1]
counterPars <- counterPars + 2
for (j in 1:kLh[i]) {
dim0 <- kLhi[counterSubVar] # Dimension of current subvariance matrix
counterSubVar <- counterSubVar + 1 # Index of subvariance matrices
d0 <- abs(outer(1:dim0, 1:dim0, "-"))
tmp_mat <- par1 * par2^d0
# print(counterDim:(counterDim + dim0 - 1))
# print(tmp_mat)
ovSigma[counterDim:(counterDim + dim0 - 1), counterDim:(counterDim + dim0 - 1)] <- tmp_mat
counterDim <- counterDim + dim0
}
}
}
return(ovSigma)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/synthetic-datasets.R
\name{getGrowthRateBasedDataset}
\alias{getGrowthRateBasedDataset}
\title{Convenience method to generate a random growth rate simulation}
\usage{
getGrowthRateBasedDataset(
growthSimulation = NULL,
bootstraps = 100,
seed = 100,
weekendEffect = 0.1,
Gt.mean = 5,
Gt.sd = 4,
periodic = FALSE,
name = "synthetic",
...
)
}
\arguments{
\item{growthSimulation}{- optional growth rate timeseries, if missing a random one is generated}
\item{bootstraps}{- ho many replicates}
\item{seed}{- what initial case loads}
\item{weekendEffect}{- any weekend effect?}
\item{Gt.mean}{- generation time mean}
\item{Gt.sd}{- generation time sd}
\item{periodic}{- a random periodic or}
\item{name}{- a name for the simulation}
\item{...}{}
}
\value{
a single strain random simulation with sensible parameters and no observation delay
}
\description{
Convenience method to generate a random growth rate simulation
}
| /r-library/man/getGrowthRateBasedDataset.Rd | permissive | terminological/jepidemic | R | false | true | 1,019 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/synthetic-datasets.R
\name{getGrowthRateBasedDataset}
\alias{getGrowthRateBasedDataset}
\title{Convenience method to generate a random growth rate simulation}
\usage{
getGrowthRateBasedDataset(
growthSimulation = NULL,
bootstraps = 100,
seed = 100,
weekendEffect = 0.1,
Gt.mean = 5,
Gt.sd = 4,
periodic = FALSE,
name = "synthetic",
...
)
}
\arguments{
\item{growthSimulation}{- optional growth rate timeseries, if missing a random one is generated}
\item{bootstraps}{- ho many replicates}
\item{seed}{- what initial case loads}
\item{weekendEffect}{- any weekend effect?}
\item{Gt.mean}{- generation time mean}
\item{Gt.sd}{- generation time sd}
\item{periodic}{- a random periodic or}
\item{name}{- a name for the simulation}
\item{...}{}
}
\value{
a single strain random simulation with sensible parameters and no observation delay
}
\description{
Convenience method to generate a random growth rate simulation
}
|
###### This is HW1 from Data Analytics Tools and Techniques
### Where the real magic happens
X_test = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/test/X_test.txt", sep="")
y_test = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/test/y_test.txt")
X_train = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/train/X_train.txt", sep="")
y_train = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/train/y_train.txt")
### Reading our features table >> Need to turn it into a list or vector
features = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/features.txt")
features$V1 <- NULL
### Removing the "()" from features
#features$V2 <- as.character(gsub("[\\()]", "", features$V2))
#features <- data.frame
#mc_test <- NULL
### Creating master_data >> Which is as of now just a merge of X_train and X_test
master_data <- rbind(X_train, X_test)
mc_test <- master_data
### Renaming Col Names with the features >> features$V2
#colnames(mc_test) <- features$V2
colnames(master_data) <- features$V2
### Creating Response >> Which is Y_test and Y_train merged
response <- rbind(y_train, y_test)
colnames(response) <- "response"
### Merging the master data with the response data
master_data <- cbind(master_data, response)
### A list of the subset of mean or std measurements >> Indexing the columns
feature_subset <- c(1:6,41:46,81:86,121:126,161:166,201:202,214:215,227:228,240:241,253:254,
266:271,294:296,345:350,373:375,424:429,452:454,503:504,513,516:517,526,
529:530,539,542:543,552,555:561,562)
### Applying our index list to our dataset
master_data <- master_data[,feature_subset]
### Reading in activity_labels
activity_labels <- read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/activity_labels.txt")
activity_labels$V1 <- NULL
### Created Function to recode Response
recode_response <- function(response) {
if (response == 1) return("WALKING") else
if (response == 2) return("WALKING_UPSTAIRS") else
if (response == 3) return("WALKING_DOWNSTAIRS") else
if (response == 4) return("SITTING") else
if (response == 5) return("STANDING") else
if (response == 6) return("LAYING") else
return("ERROR")}
### Applying the label to the Response variable
mc_test <- master_data
#recode_responde(mc_test$response)
#mc_test$response <- sapply(mc_test$response, recode_response)
master_data$response <- sapply(master_data$response, recode_response)
######## Step 5 >> Average Variables
### Uploading the subject
subject_test <- read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/test/subject_test.txt")
subject_train <- read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/train/subject_train.txt")
subject <- rbind(subject_train, subject_test)
colnames(subject) <- "subject"
### merging the master_data and subject
subject_data <- cbind(subject, master_data)
#subject_data <- NULL
#### A for loop to get a count of obs in each subject
subject_number <- 1:24
i=1
for(i in subject_number){
info <- length(grep(i, subject_data$subject))
print(paste("subject",i, "has", info, "# of observations"))
i =+ 1
}
### Filtering our subject_data >> to get the mean
require(dplyr)
#sub_mc_test <- group_by(subject_data, subject, response)
subject_data_mean <- subject_data %>%
group_by(subject, response) %>%
summarize_all(funs(mean))
write.table(subject_data_mean, "C:/Users/Mike/Documents/Data Analytics Tools and Techniques/subject_data_mean.txt", row.names = FALSE)
| /run_analysis.R | no_license | MrMikeMahoney/Data_Analytics_Tools_Tech | R | false | false | 4,010 | r | ###### This is HW1 from Data Analytics Tools and Techniques
### Where the real magic happens
X_test = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/test/X_test.txt", sep="")
y_test = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/test/y_test.txt")
X_train = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/train/X_train.txt", sep="")
y_train = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/train/y_train.txt")
### Reading our features table >> Need to turn it into a list or vector
features = read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/features.txt")
features$V1 <- NULL
### Removing the "()" from features
#features$V2 <- as.character(gsub("[\\()]", "", features$V2))
#features <- data.frame
#mc_test <- NULL
### Creating master_data >> Which is as of now just a merge of X_train and X_test
master_data <- rbind(X_train, X_test)
mc_test <- master_data
### Renaming Col Names with the features >> features$V2
#colnames(mc_test) <- features$V2
colnames(master_data) <- features$V2
### Creating Response >> Which is Y_test and Y_train merged
response <- rbind(y_train, y_test)
colnames(response) <- "response"
### Merging the master data with the response data
master_data <- cbind(master_data, response)
### A list of the subset of mean or std measurements >> Indexing the columns
feature_subset <- c(1:6,41:46,81:86,121:126,161:166,201:202,214:215,227:228,240:241,253:254,
266:271,294:296,345:350,373:375,424:429,452:454,503:504,513,516:517,526,
529:530,539,542:543,552,555:561,562)
### Applying our index list to our dataset
master_data <- master_data[,feature_subset]
### Reading in activity_labels
activity_labels <- read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/activity_labels.txt")
activity_labels$V1 <- NULL
### Created Function to recode Response
recode_response <- function(response) {
if (response == 1) return("WALKING") else
if (response == 2) return("WALKING_UPSTAIRS") else
if (response == 3) return("WALKING_DOWNSTAIRS") else
if (response == 4) return("SITTING") else
if (response == 5) return("STANDING") else
if (response == 6) return("LAYING") else
return("ERROR")}
### Applying the label to the Response variable
mc_test <- master_data
#recode_responde(mc_test$response)
#mc_test$response <- sapply(mc_test$response, recode_response)
master_data$response <- sapply(master_data$response, recode_response)
######## Step 5 >> Average Variables
### Uploading the subject
subject_test <- read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/test/subject_test.txt")
subject_train <- read.table("C:/Users/Mike/Documents/Data Analytics Tools and Techniques/UCI_HAR_Dataset/UCI HAR Dataset/train/subject_train.txt")
subject <- rbind(subject_train, subject_test)
colnames(subject) <- "subject"
### merging the master_data and subject
subject_data <- cbind(subject, master_data)
#subject_data <- NULL
#### A for loop to get a count of obs in each subject
subject_number <- 1:24
i=1
for(i in subject_number){
info <- length(grep(i, subject_data$subject))
print(paste("subject",i, "has", info, "# of observations"))
i =+ 1
}
### Filtering our subject_data >> to get the mean
require(dplyr)
#sub_mc_test <- group_by(subject_data, subject, response)
subject_data_mean <- subject_data %>%
group_by(subject, response) %>%
summarize_all(funs(mean))
write.table(subject_data_mean, "C:/Users/Mike/Documents/Data Analytics Tools and Techniques/subject_data_mean.txt", row.names = FALSE)
|
test_that("s_group works", {
ind <- make_s_group(list(id = 1, month = "Jan"))
expect_equal(names(ind), c("id", "month"))
# burst names not duplicated
expect_error(make_s_group(list(id = 1, id = 1)), "group names are duplicated")
# There is an id column
expect_error(make_s_group(list(month = "Jan", Year = 2020)), "There is no `id` column in group names")
})
test_that("c_grouping works", {
burstz <- list(id = rep(1, 4), col1 = c(1, 1, 2, 2), col2 = c("A", "B", "B", "A"))
obj1 <- suppressWarnings(make_c_grouping(x = burstz, active_group = "id"))
expect_equal(sapply(obj1, function(x) x$col1), as.character(c("1", "1", "2", "2")))
cg <- make_c_grouping(x = burstz, active_group = "id")
expect_equal(attr(cg, "active_group"), "id")
expect_message(make_c_grouping(x = burstz))
# check levels
})
test_that("concatenate groups works", {
mb1 <- make_s_group(list(id = 1, month = "Jan"))
mb2 <- make_s_group(list(id = 1, month = "Feb"))
comb <- c(mb1, mb1, mb2, mb2)
expect_equal(attr(comb, "active_group"), c("id", "month"))
burst1 <- make_c_grouping(list(id = rep(1, 4), col1 = c(1, 1, 2, 2)))
burst2 <- make_c_grouping(list(id = rep(1, 4), col1 = c("A", "B", "B", "A")))
comb <- c(burst1, burst2)
expect_equal(attr(comb, "active_group"), c("id", "col1"))
burst1 <- make_c_grouping(list(id = rep(1, 4), col1 = c(1, 1, 2, 2)))
burst2 <- make_c_grouping(list(id = rep(1, 4), col2 = c("A", "B", "B", "A")))
expect_error(c(burst1, burst2), "Group names do not match")
})
test_that("active group can change correctly", {
burst1 <- make_c_grouping(list(id = rep(1, 4), col2 = c("A", "B", "B", "A")))
expect_equal(active_group(burst1), c("id", "col2"))
active_group(burst1) <- "id"
expect_equal(active_group(burst1), c("id"))
df1 <- data.frame(
id = c(1, 1, 1, 1),
month = c(1, 1, 2, 2),
x = c(27, 27, 27, 27),
y = c(-80, -81, -82, -83),
z = c(0, 1, 2, 3),
timez = as.POSIXct("2020-01-01 12:00:00", tz = "UTC") + 60 * 60 * (1:4)
)
my_sftrack <- as_sftrack(
data = df1, group = c("id", "month"),
time = "timez", active_group = c("id", "month"), coords = c("x", "y")
)
expect_equal(active_group(my_sftrack), c("id", "month"))
active_group(my_sftrack) <- "id"
expect_equal(active_group(my_sftrack), c("id"))
})
test_that("subset c_grouping", {
burst1 <- make_c_grouping(list(id = rep(1, 4), col2 = c("A", "B", "B", "A")))
expect_equal(class(burst1[1]), "c_grouping")
# subsetting via label name
expect_equal(length(burst1["1_B"]), 2)
# replace item in a multi_burst
burst1 <- make_c_grouping(list(id = rep(1, 5), col2 = c("A", "A", "C", "C", "A")))
burst1[2] <- s_group(list(id = 1, col2 = "C"))
expect_equal(class(burst1[[2]]), "s_group")
expect_equal(as.character(attr(burst1, "sort_index")[2]), "1_C")
# replacing an individual element in a multi_burst
burst1[[1]]$id <- 3
expect_equal(burst1 [[1]][[1]], "3")
# subset to 0
mb_attr <- attributes(burst1[F])
expect_equal(mb_attr, list(active_group = c("id", "col2"), sort_index = factor(NULL), class = "c_grouping"))
})
| /tests/testthat/test-burst_tj.R | permissive | jmsigner/sftrack | R | false | false | 3,112 | r |
test_that("s_group works", {
ind <- make_s_group(list(id = 1, month = "Jan"))
expect_equal(names(ind), c("id", "month"))
# burst names not duplicated
expect_error(make_s_group(list(id = 1, id = 1)), "group names are duplicated")
# There is an id column
expect_error(make_s_group(list(month = "Jan", Year = 2020)), "There is no `id` column in group names")
})
test_that("c_grouping works", {
burstz <- list(id = rep(1, 4), col1 = c(1, 1, 2, 2), col2 = c("A", "B", "B", "A"))
obj1 <- suppressWarnings(make_c_grouping(x = burstz, active_group = "id"))
expect_equal(sapply(obj1, function(x) x$col1), as.character(c("1", "1", "2", "2")))
cg <- make_c_grouping(x = burstz, active_group = "id")
expect_equal(attr(cg, "active_group"), "id")
expect_message(make_c_grouping(x = burstz))
# check levels
})
test_that("concatenate groups works", {
mb1 <- make_s_group(list(id = 1, month = "Jan"))
mb2 <- make_s_group(list(id = 1, month = "Feb"))
comb <- c(mb1, mb1, mb2, mb2)
expect_equal(attr(comb, "active_group"), c("id", "month"))
burst1 <- make_c_grouping(list(id = rep(1, 4), col1 = c(1, 1, 2, 2)))
burst2 <- make_c_grouping(list(id = rep(1, 4), col1 = c("A", "B", "B", "A")))
comb <- c(burst1, burst2)
expect_equal(attr(comb, "active_group"), c("id", "col1"))
burst1 <- make_c_grouping(list(id = rep(1, 4), col1 = c(1, 1, 2, 2)))
burst2 <- make_c_grouping(list(id = rep(1, 4), col2 = c("A", "B", "B", "A")))
expect_error(c(burst1, burst2), "Group names do not match")
})
test_that("active group can change correctly", {
burst1 <- make_c_grouping(list(id = rep(1, 4), col2 = c("A", "B", "B", "A")))
expect_equal(active_group(burst1), c("id", "col2"))
active_group(burst1) <- "id"
expect_equal(active_group(burst1), c("id"))
df1 <- data.frame(
id = c(1, 1, 1, 1),
month = c(1, 1, 2, 2),
x = c(27, 27, 27, 27),
y = c(-80, -81, -82, -83),
z = c(0, 1, 2, 3),
timez = as.POSIXct("2020-01-01 12:00:00", tz = "UTC") + 60 * 60 * (1:4)
)
my_sftrack <- as_sftrack(
data = df1, group = c("id", "month"),
time = "timez", active_group = c("id", "month"), coords = c("x", "y")
)
expect_equal(active_group(my_sftrack), c("id", "month"))
active_group(my_sftrack) <- "id"
expect_equal(active_group(my_sftrack), c("id"))
})
test_that("subset c_grouping", {
burst1 <- make_c_grouping(list(id = rep(1, 4), col2 = c("A", "B", "B", "A")))
expect_equal(class(burst1[1]), "c_grouping")
# subsetting via label name
expect_equal(length(burst1["1_B"]), 2)
# replace item in a multi_burst
burst1 <- make_c_grouping(list(id = rep(1, 5), col2 = c("A", "A", "C", "C", "A")))
burst1[2] <- s_group(list(id = 1, col2 = "C"))
expect_equal(class(burst1[[2]]), "s_group")
expect_equal(as.character(attr(burst1, "sort_index")[2]), "1_C")
# replacing an individual element in a multi_burst
burst1[[1]]$id <- 3
expect_equal(burst1 [[1]][[1]], "3")
# subset to 0
mb_attr <- attributes(burst1[F])
expect_equal(mb_attr, list(active_group = c("id", "col2"), sort_index = factor(NULL), class = "c_grouping"))
})
|
## KOOPERBERG.R
# KOOPERBERG BACKGROUND ADUSTMENT FOR GENEPIX DATA
kooperberg <- function (RG, a = TRUE, layout=RG$printer, verbose=TRUE)
# Kooperberg Bayesian background correction
# Matt Ritchie
# Charles Kooperberg contributed 'a' estimation functions (.getas, .varaux1, .varaux2)
# Last modified 31 October 2005
{
if(!is(RG,"RGList")) stop("RG must be an RGList object")
if(is.null(layout))
stop("\nNeed to specify array layout")
if (is.null(RG$other$"F635 SD") | is.null(RG$other$"B635 SD") | is.null(RG$other$"F532 SD") | is.null(RG$other$"B532 SD") | is.null(RG$other$"B532 Mean") | is.null(RG$other$"B635 Mean") |is.null(RG$other$"F Pixels") | is.null(RG$other$"B Pixels"))
stop("\nData missing from RG$other: re-run read.maimages with\n other=c(\"F635 SD\",\"B635 SD\",\"F532 SD\",\"B532 SD\",\"B532 Mean\",\"B635 Mean\",\"F Pixels\",\"B Pixels\")")
nslides <- dim(RG)[2]
ngenes <- RG$printer$ngrid.c * RG$printer$ngrid.r * RG$printer$nspot.r * RG$printer$nspot.c
for (i in 1:nslides) {
temp <- .bayesianAdjustedFG(RG, i, a)
RG$R[, i] <- temp$R
RG$G[, i] <- temp$G
if(verbose)
{
cat("Corrected array", i, "\n")
}
}
RG$Rb <- RG$Gb <- NULL
RG
}
.bayesianAdjustedFG <- function (RG, k, a = TRUE)
# Matt Ritchie
# 18 June 2003. Last modified 22 May 2006.
{
ngenes <- dim(RG)[1] # get number of probes
Y <- rep(0, ngenes)
RGmodel <- new("RGList", list(R = Y, G = Y, Rb=NULL, Gb=NULL))
if (a) {
aparams <- .getas(RG, k)
}
else {
aparams <- c(1, 1)
}
Rsfg = aparams[2] * RG$other$"F635 SD"[,k]/sqrt(RG$other$"F Pixels"[,k])
Rsbg = aparams[2] * RG$other$"B635 SD"[,k]/sqrt(RG$other$"B Pixels"[,k])
Gsfg = aparams[1] * RG$other$"F532 SD"[,k]/sqrt(RG$other$"F Pixels"[,k])
Gsbg = aparams[1] * RG$other$"B532 SD"[,k]/sqrt(RG$other$"B Pixels"[,k])
for (i in 1:ngenes) {
if (RG$R[i,k] > 0 & Rsbg[i] > 0) {
RGmodel$R[i] <- .expectedBayesianAdjustedFG(fg = RG$R[i,k],
bg = RG$Rb[i,k], sfg = Rsfg[i], sbg = Rsbg[i])
}
else {
RGmodel$R[i] <- RG$R[i,k]
}
if (RG$G[i,k] > 0 & Gsbg[i] > 0) {
RGmodel$G[i] <- .expectedBayesianAdjustedFG(fg = RG$G[i,k],
bg = RG$Gb[i,k], sfg = Gsfg[i], sbg = Gsbg[i])
}
else {
RGmodel$G[i] <- RG$G[i,k]
}
}
RGmodel$R[RGmodel$R > 2^16] <- NA
RGmodel$G[RGmodel$G > 2^16] <- NA
RGmodel
}
.getas <- function (RG, j)
{
b1 <- .varaux1(RG$other$"B532 Mean"[,j], RG$printer)
b2 <- .varaux1(RG$other$"B635 Mean"[,j], RG$printer)
c1 <- RG$other$"B532 SD"[,j]/sqrt(RG$other$"B Pixels"[,j])
c2 <- RG$other$"B635 SD"[,j]/sqrt(RG$other$"B Pixels"[,j])
m1 <- lm(b1 ~ c1 - 1, weights = 1/(c1 + 1))
m2 <- lm(b2 ~ c2 - 1, weights = 1/(c2 + 1))
c(m1$coefficients, m2$coefficients)
}
# Calculate empirical standard deviation for each spot (based on average of spot and 4 neighbours)
.varaux1 <- function (bg, layout)
{
numblocks <- layout$ngrid.c * layout$ngrid.r
block <- rep(1:numblocks, each=layout$nspot.r*layout$nspot.c)
uu <- .varaux2(bg, block, 1, layout$nspot.c, layout$nspot.r)
if (numblocks > 1) {
for (i in 2:numblocks) {
uu <- c(uu, .varaux2(bg, block, i, layout$nspot.c, layout$nspot.r))
}
}
uu
}
# Average the standard deviations
.varaux2 <- function (bg, block, i, ncols, nrows)
{
v1 <- bg[block == i]
v2 <- matrix(v1, ncol = ncols)
# mid grid spot variances
v4a <- v2[c(-1, -nrows), c(-1, -ncols)]
v4b <- v2[c(-1, -2), c(-1, -ncols)]
v4c <- v2[c(-1, -nrows), c(-1, -2)]
v4d <- v2[c(-(nrows - 1), -nrows), c(-1, -ncols)]
v4e <- v2[c(-1, -nrows), c(-(ncols - 1), -ncols)]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d), as.vector(v4e))
VAR <- matrix(0, ncol = ncols, nrow = nrows)
mid.var <- apply(v4x, 1, FUN = var)
VAR[2:(nrows - 1), 2:(ncols - 1)] <- sqrt(mid.var)
# edge spot variances
# top
v4a <- v2[1, c(-1, -ncols)]
v4b <- v2[1, c(-(ncols - 1), -ncols)]
v4c <- v2[2, c(-1, -ncols)]
v4d <- v2[1, c(-1, -2)]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d))
edge <- apply(v4x, 1, FUN = var)
VAR[1, 2:(ncols - 1)] <- sqrt(edge)
# bottom
v4a <- v2[nrows, c(-1, -ncols)]
v4b <- v2[nrows, c(-(ncols - 1), -ncols)]
v4c <- v2[nrows - 1, c(-1, -ncols)]
v4d <- v2[nrows, c(-1, -2)]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d))
edge <- apply(v4x, 1, FUN = var)
VAR[nrows, 2:(ncols - 1)] <- sqrt(edge)
# left
v4a <- v2[c(-1, -nrows), 1]
v4b <- v2[c(-(nrows - 1), -nrows), 1]
v4c <- v2[c(-1, -nrows), 2]
v4d <- v2[c(-1, -2), 1]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d))
edge <- apply(v4x, 1, FUN = var)
VAR[2:(nrows - 1), 1] <- sqrt(edge)
# right
v4a <- v2[c(-1, -nrows), ncols]
v4b <- v2[c(-(nrows - 1), -nrows), ncols]
v4c <- v2[c(-1, -nrows), ncols - 1]
v4d <- v2[c(-1, -2), ncols]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d))
edge <- apply(v4x, 1, FUN = var)
VAR[2:(nrows - 1), ncols] <- sqrt(edge)
# corners
v4x <- cbind(c(v2[1, 1], v2[1, ncols], v2[nrows, 1], v2[nrows,
ncols]), c(v2[1, 2], v2[1, ncols - 1], v2[nrows, 2],
v2[nrows, ncols - 1]), c(v2[2, 1], v2[2, ncols], v2[nrows -
1, 1], v2[nrows - 1, ncols]), c(v2[2, 2], v2[2, ncols -
1], v2[nrows - 1, 2], v2[nrows - 1, ncols - 1]))
corner <- apply(v4x, 1, FUN = var)
VAR[1, 1] <- sqrt(corner[1])
VAR[1, ncols] <- sqrt(corner[2])
VAR[nrows, 1] <- sqrt(corner[3])
VAR[nrows, ncols] <- sqrt(corner[4])
as.vector(VAR)
}
.expectedBayesianAdjustedFG <- function(fg, bg, sfg, sbg)
{
integrate(.numeratorBayesianAdjustedFG, ifelse((fg-bg-4*sqrt(sbg^2+sfg^2))<0, 0, fg-bg-4*sqrt(sbg^2+sfg^2)),
ifelse((fg-bg+4*sqrt(sfg^2+sbg^2))<0, 1000, fg-bg+4*sqrt(sfg^2+sbg^2)) , fg=fg, bg=bg, sfg=sfg, sbg=sbg, subdivisions=10000)$value/.denominatorBayesianAdjustedFG(fg, bg, sfg, sbg)
}
.numeratorBayesianAdjustedFG <- function(ut, fg, bg, sfg, sbg)
ut*exp(dnorm((fg-ut-bg)/sqrt(sfg^2+sbg^2), log=TRUE)+pnorm(((fg-ut)*sbg^2+bg*sfg^2)/(sbg*sfg*sqrt(sfg^2+sbg^2)), log.p=TRUE))
.denominatorBayesianAdjustedFG <- function(fg, bg, sfg, sbg)
{
sqrt(sfg^2+sbg^2) / sbg * integrate(.normalConvolution,
ifelse((bg-4*sbg)<0, 0, bg-4*sbg),
bg+4*sbg, fg=fg, bg=bg, sfg=sfg,
sbg=sbg, subdivisions=10000)$value
}
.normalConvolution <- function(v, fg, bg, sfg, sbg)
exp(pnorm((fg-v)/sfg, log.p=TRUE)+dnorm((bg-v)/sbg, log=TRUE))
| /R/background-kooperberg.R | no_license | hdeberg/limma | R | false | false | 6,960 | r | ## KOOPERBERG.R
# KOOPERBERG BACKGROUND ADUSTMENT FOR GENEPIX DATA
kooperberg <- function (RG, a = TRUE, layout=RG$printer, verbose=TRUE)
# Kooperberg Bayesian background correction
# Matt Ritchie
# Charles Kooperberg contributed 'a' estimation functions (.getas, .varaux1, .varaux2)
# Last modified 31 October 2005
{
if(!is(RG,"RGList")) stop("RG must be an RGList object")
if(is.null(layout))
stop("\nNeed to specify array layout")
if (is.null(RG$other$"F635 SD") | is.null(RG$other$"B635 SD") | is.null(RG$other$"F532 SD") | is.null(RG$other$"B532 SD") | is.null(RG$other$"B532 Mean") | is.null(RG$other$"B635 Mean") |is.null(RG$other$"F Pixels") | is.null(RG$other$"B Pixels"))
stop("\nData missing from RG$other: re-run read.maimages with\n other=c(\"F635 SD\",\"B635 SD\",\"F532 SD\",\"B532 SD\",\"B532 Mean\",\"B635 Mean\",\"F Pixels\",\"B Pixels\")")
nslides <- dim(RG)[2]
ngenes <- RG$printer$ngrid.c * RG$printer$ngrid.r * RG$printer$nspot.r * RG$printer$nspot.c
for (i in 1:nslides) {
temp <- .bayesianAdjustedFG(RG, i, a)
RG$R[, i] <- temp$R
RG$G[, i] <- temp$G
if(verbose)
{
cat("Corrected array", i, "\n")
}
}
RG$Rb <- RG$Gb <- NULL
RG
}
.bayesianAdjustedFG <- function (RG, k, a = TRUE)
# Matt Ritchie
# 18 June 2003. Last modified 22 May 2006.
{
ngenes <- dim(RG)[1] # get number of probes
Y <- rep(0, ngenes)
RGmodel <- new("RGList", list(R = Y, G = Y, Rb=NULL, Gb=NULL))
if (a) {
aparams <- .getas(RG, k)
}
else {
aparams <- c(1, 1)
}
Rsfg = aparams[2] * RG$other$"F635 SD"[,k]/sqrt(RG$other$"F Pixels"[,k])
Rsbg = aparams[2] * RG$other$"B635 SD"[,k]/sqrt(RG$other$"B Pixels"[,k])
Gsfg = aparams[1] * RG$other$"F532 SD"[,k]/sqrt(RG$other$"F Pixels"[,k])
Gsbg = aparams[1] * RG$other$"B532 SD"[,k]/sqrt(RG$other$"B Pixels"[,k])
for (i in 1:ngenes) {
if (RG$R[i,k] > 0 & Rsbg[i] > 0) {
RGmodel$R[i] <- .expectedBayesianAdjustedFG(fg = RG$R[i,k],
bg = RG$Rb[i,k], sfg = Rsfg[i], sbg = Rsbg[i])
}
else {
RGmodel$R[i] <- RG$R[i,k]
}
if (RG$G[i,k] > 0 & Gsbg[i] > 0) {
RGmodel$G[i] <- .expectedBayesianAdjustedFG(fg = RG$G[i,k],
bg = RG$Gb[i,k], sfg = Gsfg[i], sbg = Gsbg[i])
}
else {
RGmodel$G[i] <- RG$G[i,k]
}
}
RGmodel$R[RGmodel$R > 2^16] <- NA
RGmodel$G[RGmodel$G > 2^16] <- NA
RGmodel
}
.getas <- function (RG, j)
{
b1 <- .varaux1(RG$other$"B532 Mean"[,j], RG$printer)
b2 <- .varaux1(RG$other$"B635 Mean"[,j], RG$printer)
c1 <- RG$other$"B532 SD"[,j]/sqrt(RG$other$"B Pixels"[,j])
c2 <- RG$other$"B635 SD"[,j]/sqrt(RG$other$"B Pixels"[,j])
m1 <- lm(b1 ~ c1 - 1, weights = 1/(c1 + 1))
m2 <- lm(b2 ~ c2 - 1, weights = 1/(c2 + 1))
c(m1$coefficients, m2$coefficients)
}
# Calculate empirical standard deviation for each spot (based on average of spot and 4 neighbours)
.varaux1 <- function (bg, layout)
{
numblocks <- layout$ngrid.c * layout$ngrid.r
block <- rep(1:numblocks, each=layout$nspot.r*layout$nspot.c)
uu <- .varaux2(bg, block, 1, layout$nspot.c, layout$nspot.r)
if (numblocks > 1) {
for (i in 2:numblocks) {
uu <- c(uu, .varaux2(bg, block, i, layout$nspot.c, layout$nspot.r))
}
}
uu
}
# Average the standard deviations
.varaux2 <- function (bg, block, i, ncols, nrows)
{
v1 <- bg[block == i]
v2 <- matrix(v1, ncol = ncols)
# mid grid spot variances
v4a <- v2[c(-1, -nrows), c(-1, -ncols)]
v4b <- v2[c(-1, -2), c(-1, -ncols)]
v4c <- v2[c(-1, -nrows), c(-1, -2)]
v4d <- v2[c(-(nrows - 1), -nrows), c(-1, -ncols)]
v4e <- v2[c(-1, -nrows), c(-(ncols - 1), -ncols)]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d), as.vector(v4e))
VAR <- matrix(0, ncol = ncols, nrow = nrows)
mid.var <- apply(v4x, 1, FUN = var)
VAR[2:(nrows - 1), 2:(ncols - 1)] <- sqrt(mid.var)
# edge spot variances
# top
v4a <- v2[1, c(-1, -ncols)]
v4b <- v2[1, c(-(ncols - 1), -ncols)]
v4c <- v2[2, c(-1, -ncols)]
v4d <- v2[1, c(-1, -2)]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d))
edge <- apply(v4x, 1, FUN = var)
VAR[1, 2:(ncols - 1)] <- sqrt(edge)
# bottom
v4a <- v2[nrows, c(-1, -ncols)]
v4b <- v2[nrows, c(-(ncols - 1), -ncols)]
v4c <- v2[nrows - 1, c(-1, -ncols)]
v4d <- v2[nrows, c(-1, -2)]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d))
edge <- apply(v4x, 1, FUN = var)
VAR[nrows, 2:(ncols - 1)] <- sqrt(edge)
# left
v4a <- v2[c(-1, -nrows), 1]
v4b <- v2[c(-(nrows - 1), -nrows), 1]
v4c <- v2[c(-1, -nrows), 2]
v4d <- v2[c(-1, -2), 1]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d))
edge <- apply(v4x, 1, FUN = var)
VAR[2:(nrows - 1), 1] <- sqrt(edge)
# right
v4a <- v2[c(-1, -nrows), ncols]
v4b <- v2[c(-(nrows - 1), -nrows), ncols]
v4c <- v2[c(-1, -nrows), ncols - 1]
v4d <- v2[c(-1, -2), ncols]
v4x <- cbind(as.vector(v4a), as.vector(v4b), as.vector(v4c),
as.vector(v4d))
edge <- apply(v4x, 1, FUN = var)
VAR[2:(nrows - 1), ncols] <- sqrt(edge)
# corners
v4x <- cbind(c(v2[1, 1], v2[1, ncols], v2[nrows, 1], v2[nrows,
ncols]), c(v2[1, 2], v2[1, ncols - 1], v2[nrows, 2],
v2[nrows, ncols - 1]), c(v2[2, 1], v2[2, ncols], v2[nrows -
1, 1], v2[nrows - 1, ncols]), c(v2[2, 2], v2[2, ncols -
1], v2[nrows - 1, 2], v2[nrows - 1, ncols - 1]))
corner <- apply(v4x, 1, FUN = var)
VAR[1, 1] <- sqrt(corner[1])
VAR[1, ncols] <- sqrt(corner[2])
VAR[nrows, 1] <- sqrt(corner[3])
VAR[nrows, ncols] <- sqrt(corner[4])
as.vector(VAR)
}
.expectedBayesianAdjustedFG <- function(fg, bg, sfg, sbg)
{
integrate(.numeratorBayesianAdjustedFG, ifelse((fg-bg-4*sqrt(sbg^2+sfg^2))<0, 0, fg-bg-4*sqrt(sbg^2+sfg^2)),
ifelse((fg-bg+4*sqrt(sfg^2+sbg^2))<0, 1000, fg-bg+4*sqrt(sfg^2+sbg^2)) , fg=fg, bg=bg, sfg=sfg, sbg=sbg, subdivisions=10000)$value/.denominatorBayesianAdjustedFG(fg, bg, sfg, sbg)
}
.numeratorBayesianAdjustedFG <- function(ut, fg, bg, sfg, sbg)
ut*exp(dnorm((fg-ut-bg)/sqrt(sfg^2+sbg^2), log=TRUE)+pnorm(((fg-ut)*sbg^2+bg*sfg^2)/(sbg*sfg*sqrt(sfg^2+sbg^2)), log.p=TRUE))
.denominatorBayesianAdjustedFG <- function(fg, bg, sfg, sbg)
{
sqrt(sfg^2+sbg^2) / sbg * integrate(.normalConvolution,
ifelse((bg-4*sbg)<0, 0, bg-4*sbg),
bg+4*sbg, fg=fg, bg=bg, sfg=sfg,
sbg=sbg, subdivisions=10000)$value
}
.normalConvolution <- function(v, fg, bg, sfg, sbg)
exp(pnorm((fg-v)/sfg, log.p=TRUE)+dnorm((bg-v)/sbg, log=TRUE))
|
# 符号图提供的六种基本符号
set.seed(123)
par(mar = c(6, 1, 0.1, 0.1), las = 2)
plot(1:6, rep(1, 6), type = "n", axes = FALSE,
ann = FALSE, xlim = c(0.8, 7.2), ylim = c(0, 5),
panel.first = grid())
x = seq(1, 2, length = 4)
symbols(x, 1:4, circles = runif(4, 0.1, 0.6),
add = TRUE, bg = heat.colors(4), inches = FALSE)
symbols(x + 1, 1:4, squares = runif(4, 0.1,0.6), add = TRUE,
bg = terrain.colors(4), inches = FALSE)
symbols(x + 2, 1:4, rect = matrix(runif(8,0.1, 0.6), 4), add = TRUE,
bg = rainbow(4), inches = FALSE)
symbols(x + 3, 1:4, stars = matrix(runif(20, 0.1, 0.6), 4), add = TRUE,
bg = topo.colors(4), inches = FALSE)
symbols(x + 4, 1:4, therm = matrix(runif(12, 0.1, 0.7), 4), add = TRUE, inches = FALSE)
symbols(x + 5, 1:4, boxplot = matrix(runif(20, 0.1, 0.7), 4), add = TRUE, inches = FALSE)
axis(1, 1:6, c("circles", "squares", "rectangles",
"stars", "thermometers", "boxplots"), cex.axis = 0.85)
| /inst/examples/symbols_all.R | no_license | yihui/MSG | R | false | false | 966 | r | # 符号图提供的六种基本符号
set.seed(123)
par(mar = c(6, 1, 0.1, 0.1), las = 2)
plot(1:6, rep(1, 6), type = "n", axes = FALSE,
ann = FALSE, xlim = c(0.8, 7.2), ylim = c(0, 5),
panel.first = grid())
x = seq(1, 2, length = 4)
symbols(x, 1:4, circles = runif(4, 0.1, 0.6),
add = TRUE, bg = heat.colors(4), inches = FALSE)
symbols(x + 1, 1:4, squares = runif(4, 0.1,0.6), add = TRUE,
bg = terrain.colors(4), inches = FALSE)
symbols(x + 2, 1:4, rect = matrix(runif(8,0.1, 0.6), 4), add = TRUE,
bg = rainbow(4), inches = FALSE)
symbols(x + 3, 1:4, stars = matrix(runif(20, 0.1, 0.6), 4), add = TRUE,
bg = topo.colors(4), inches = FALSE)
symbols(x + 4, 1:4, therm = matrix(runif(12, 0.1, 0.7), 4), add = TRUE, inches = FALSE)
symbols(x + 5, 1:4, boxplot = matrix(runif(20, 0.1, 0.7), 4), add = TRUE, inches = FALSE)
axis(1, 1:6, c("circles", "squares", "rectangles",
"stars", "thermometers", "boxplots"), cex.axis = 0.85)
|
################################################################################
# LNR variance linear in mean parameterizaton
################################################################################
start.model <- function(dati,pnams,Bounds) {
p<- sfun(dati,pnams,pc=.01,aprior=.5,tertune=.5)
p$a <- min(p$v)/2 # mean ~ variance intercept
tmp <- cbind.data.frame(y=p$v-p$a,x=exp(p$m))
p$b <- max(c(coef(lm(y~x-1,tmp)),.0001)) # mean ~ variance slope
p$e <- -p$m # log evidence rate
p$c <- 0 # log criterion
p
}
M2P <- function(p) {
# called by sfun to change to parameters specified by model.rc
# not required in this case
p
}
P2Mfit <- function(p,pmap,D) {
p
}
P2Mnatural <- function(p,pmap,D) {
# changes to parameters used by density, CDF and ML etc.
p$m <- p$c-p$e
p$v <- p$a + p$b*exp(p$m)
p
}
| /ModelComparisons/SPENCER/rc/MP-LNR1.R | no_license | sccastro/R_data | R | false | false | 872 | r | ################################################################################
# LNR variance linear in mean parameterizaton
################################################################################
start.model <- function(dati,pnams,Bounds) {
p<- sfun(dati,pnams,pc=.01,aprior=.5,tertune=.5)
p$a <- min(p$v)/2 # mean ~ variance intercept
tmp <- cbind.data.frame(y=p$v-p$a,x=exp(p$m))
p$b <- max(c(coef(lm(y~x-1,tmp)),.0001)) # mean ~ variance slope
p$e <- -p$m # log evidence rate
p$c <- 0 # log criterion
p
}
M2P <- function(p) {
# called by sfun to change to parameters specified by model.rc
# not required in this case
p
}
P2Mfit <- function(p,pmap,D) {
p
}
P2Mnatural <- function(p,pmap,D) {
# changes to parameters used by density, CDF and ML etc.
p$m <- p$c-p$e
p$v <- p$a + p$b*exp(p$m)
p
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PullTrueProj.R
\name{PullTrueProj}
\alias{PullTrueProj}
\title{Pull True Projections
Not used anywhere... yet.}
\usage{
PullTrueProj(CTLNameList, out, MSEdir)
}
\arguments{
\item{CTLNameList}{Vector of CTL file names}
\item{out}{List output from \code{\link{ReadCTLfile}}}
\item{MSEdir}{Directory containing CTL files}
}
\value{
Plots comparing age-structured estimating models
}
\description{
Pull True Projections
Not used anywhere... yet.
}
\examples{
\dontrun{
MSEdir <- "~/GeneralMSE/Examples/Cod_5_AgeStructure"
OMNames <- c("Cod_AgeStructure_CTL","Cod_Age_Mvary_CTL","Cod_Age_Mvary_estM_CTL")
out <- ReadCTLfile(OMNames[1])
AgeStructureComp(out=out,
CTLNameList=OMNames,
MSEdir=MSEdir,
plotNames=c("Base","Fixed M","Estimate M"))
}
}
| /man/PullTrueProj.Rd | permissive | tengguangliang/GeMS | R | false | true | 873 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PullTrueProj.R
\name{PullTrueProj}
\alias{PullTrueProj}
\title{Pull True Projections
Not used anywhere... yet.}
\usage{
PullTrueProj(CTLNameList, out, MSEdir)
}
\arguments{
\item{CTLNameList}{Vector of CTL file names}
\item{out}{List output from \code{\link{ReadCTLfile}}}
\item{MSEdir}{Directory containing CTL files}
}
\value{
Plots comparing age-structured estimating models
}
\description{
Pull True Projections
Not used anywhere... yet.
}
\examples{
\dontrun{
MSEdir <- "~/GeneralMSE/Examples/Cod_5_AgeStructure"
OMNames <- c("Cod_AgeStructure_CTL","Cod_Age_Mvary_CTL","Cod_Age_Mvary_estM_CTL")
out <- ReadCTLfile(OMNames[1])
AgeStructureComp(out=out,
CTLNameList=OMNames,
MSEdir=MSEdir,
plotNames=c("Base","Fixed M","Estimate M"))
}
}
|
## workhorse function for compiling (collections of) exercises
exams <- function(file, n = 1, nsamp = NULL, dir = NULL, template = "plain",
inputs = NULL, header = list(Date = Sys.Date()), name = NULL,
quiet = TRUE, edir = NULL, tdir = NULL, control = NULL)
{
## convenience function
strip_path <- function(file)
sapply(strsplit(file, .Platform$file.sep), tail, 1)
## manage directories:
## - for producing several files an output directory is required
if((n > 1 | length(template) > 1) & is.null(dir)) stop("Please specify an output 'dir'.")
if(!is.null(dir) && !file.exists(dir) && !dir.create(dir))
stop(gettextf("Cannot create output directory '%s'.", dir))
## - further: dir (output), dir_orig (original), dir_temp (temp), dir_pkg (package)
if(!is.null(dir)) dir <- file_path_as_absolute(dir)
dir_orig <- getwd()
on.exit(setwd(dir_orig))
dir_temp <- if(is.null(tdir)) tempfile() else tdir
if(!file.exists(dir_temp) && !dir.create(dir_temp))
stop(gettextf("Cannot create temporary work directory '%s'.", dir_temp))
dir_pkg <- find.package("exams")
## number of available exercises in each element of 'file'
## and number of selected samples per element
nfile <- length(file)
if(is.null(nsamp)) nsamp <- 1
if(length(nsamp) < nfile) nsamp <- rep(nsamp, length.out = nfile)
navail <- sapply(file, length)
if(any(navail < nsamp)) {
ix <- which(navail < nsamp)
warning(paste("Only", navail[ix], "exercise(s) available in element", ix,
"of the 'file' argument. Sampling with replacement will be used in order to obtain",
nsamp[ix], "replications."))
}
## file pre-processing:
## - transform to vector (remember grouping IDs)
## - add paths (generate "foo", "foo.Rnw", "foo.tex", and "path/to/foo.Rnw")
## - check existence (use local files if they exist, otherwise take from package)
## - setup sampling (draw random configuration)
file_id <- rep(seq_along(file), navail)
file_raw <- unlist(file)
file_Rnw <- ifelse(
tolower(substr(file_raw, nchar(file_raw)-3, nchar(file_raw))) != ".rnw",
paste(file_raw, ".Rnw", sep = ""), file_raw)
file_base <- file_path_sans_ext(file_Rnw)
file_tex <- paste(file_base, ".tex", sep = "")
file_path <- if(is.null(edir)) file_Rnw else file.path(edir, file_Rnw)
file_path <- ifelse(file.exists(file_path),
file_path, file.path(dir_pkg, "exercises", file_path))
if(!all(file.exists(file_path))) stop(paste("The following files cannot be found: ",
paste(file_raw[!file.exists(file_path)], collapse = ", "), ".", sep = ""))
sample_id <- function() unlist(lapply(unique(file_id), function(i) {
wi <- file_id == i
if(sum(wi) > 1)
sample(which(wi), nsamp[i], replace = navail[i] < nsamp[i])
else
rep(which(wi), length.out = nsamp[i])
}))
## similarly: template pre-processing
template_raw <- template
template_tex <- template_path <- ifelse(
tolower(substr(template, nchar(template)-3, nchar(template))) != ".tex",
paste(template, ".tex", sep = ""), template)
template_base <- file_path_sans_ext(template_tex)
template_path <- ifelse(file.exists(template_tex),
template_tex, file.path(dir_pkg, "tex", template_tex))
if(!all(file.exists(template_path))) stop(paste("The following files cannot be found: ",
paste(template_raw[!file.exists(template_path)], collapse = ", "), ".", sep = ""))
## check for using old templates
if(file.path(dir_pkg, "tex", "exam2.tex") %in% template_path) {
template_path[template_path == file.path(dir_pkg, "tex", "exam.tex")] <- file.path(dir_pkg, "tex", "oexam.tex")
warning(paste(strwrap(paste(
"The template exam2.tex has been adapted to exams2pdf() and is not fully compatible",
"with exams() anymore. Template oexam.tex used instead."
), exdent = 2), collapse = "\n"))
}
if(file.path(dir_pkg, "tex", "solution.tex") %in% template_path) {
template_path[template_path == file.path(dir_pkg, "tex", "solution.tex")] <- file.path(dir_pkg, "tex", "osolution.tex")
warning(paste(strwrap(paste(
"The template solution.tex has been adapted to exams2pdf() and is not fully compatible",
"with exams() anymore. Template osolution.tex used instead."
), exdent = 2), collapse = "\n"))
}
## read template
template <- lapply(template_path, readLines)
## which input types in template?
input_types <- function(x) {
x <- x[grep("\\exinput", x, fixed = TRUE)]
if(length(x) < 1) stop("templates must specify at least one \\exinput{}")
as.vector(sapply(strsplit(sapply(strsplit(x,
paste("\\exinput{", sep = ""), fixed = TRUE), tail, 1), "}"), head, 1))
}
template_it <- lapply(template, input_types)
template_has_header <- sapply(template_it, function(x) "header" %in% x)
template_has_questionnaire <- sapply(template_it, function(x) "questionnaire" %in% x)
template_has_exercises <- sapply(template_it, function(x) "exercises" %in% x)
## output name processing
if(is.null(name)) name <- strip_path(template_base)
make_full_name <- function(name, id, type = "")
paste(name, gsub(" ", "0", format(c(n, id)))[-1], ifelse(type == "", "", "."), type, sep = "")
## convenience function for reading metainfo from compiled exercise
read_metainfo <- function(file) {
x <- readLines(file)
get_command <- function(command) {
cline <- x[grep(command, x, fixed = TRUE)]
if(length(cline) < 1) NULL else gsub("{", "", strsplit(strsplit(cline[1],
paste(command, "{", sep = ""), fixed = TRUE)[[1]][2], "}")[[1]], fixed = TRUE)
}
type <- match.arg(get_command("\\extype"), c("schoice", "mchoice", "num", "string"))
sol <- get_command("\\exsolution")
nam <- get_command("\\exname")
tol <- get_command("\\extol")
tol <- rep(if(is.null(tol)) 0 else as.numeric(tol), length.out = 2)
sol <- switch(type,
"schoice" = string2mchoice(sol, single = TRUE),
"mchoice" = string2mchoice(sol),
"num" = as.numeric(sol),
"string" = sol
)
slength <- length(sol)
string <- switch(type,
"schoice" = paste(nam, ": ", mchoice2print(sol), sep = ""),
"mchoice" = paste(nam, ": ", mchoice2print(sol), sep = ""),
"num" = if(max(tol) <= 0) {
paste(nam, ": ", sol, sep = "")
} else {
if(slength == 1) {
paste(nam, ": ", sol, " (", sol - tol[1], "--", sol + tol[2], ")", sep = "")
} else {
paste(nam, ": [", sol[1], ", ", sol[2], "] ([", sol[1] - tol[1], "--", sol[1] + tol[2], ", ",
sol[2] - tol[1], "--", sol[2] + tol[2], "])", sep = "")
}
},
"string" = paste(nam, ": ", paste(sol, collapse = "\n"), sep = "")
)
list(type = type,
length = slength,
solution = sol,
tolerance = tol,
string = string)
}
control.default <- list(mchoice.print = list(True = letters[1:5], False = rep("", 5)),
mchoice.symbol = c(True = "X", False = " "))
if (is.null(control)) control <- control.default
else if (is.list(control)) {
control <- c(control, control.default[!c("mchoice.print", "mchoice.symbol") %in% names(control)])
if (!all(sapply(control, function(x) identical(c("False", "True"), sort(names(x))))))
stop("'control' not correctly specified")
control$mchoice.print <- lapply(control$mchoice.print, function(x) rep(x, 5))
} else stop("'control' must be NULL or a list")
## convenience functions for writing LaTeX
mchoice2quest <- function(x) paste(" \\item \\exmchoice{",
paste(ifelse(x, control$mchoice.symbol[["True"]], control$mchoice.symbol[["False"]]), collapse = "}{"), "}", sep = "")
num2quest <- function(x) {
rval <- paste(" \\item \\exnum{",
paste(strsplit(format(c(100000.000, x), nsmall = 3, scientific = FALSE)[-1], "")[[1]][-7],
collapse = "}{"), "}", sep = "")
if(length(x) > 1) rval <- paste(rval, " \\\\\n \\exnum{",
paste(strsplit(format(c(100000.000, x), nsmall = 3, scientific = FALSE)[-1], "")[[2]][-7],
collapse = "}{"), "}", sep = "")
rval
}
string2quest <- function(x) paste(" \\item \\exstring{", x, "}", sep = "")
mchoice2print <- function(x) paste(ifelse(x, control$mchoice.print[["True"]], control$mchoice.print[["False"]]), collapse = "")
## take everything to temp dir
file.copy(file_path, dir_temp)
## including further inputs (if any)
if(!is.null(inputs)) {
inputs_path <- ifelse(file.exists(inputs), inputs, file.path(edir, inputs))
if(!all(file.exists(inputs_path))) stop(paste("The following inputs cannot be found: ",
paste(inputs[!file.exists(inputs_path)], collapse = ", "), ".", sep = ""))
file.copy(inputs_path, dir_temp)
}
setwd(dir_temp)
on.exit(unlink(dir_temp), add = TRUE)
## call Sweave and LaTeX, copy and collect results
metainfo <- list()
for(i in 1:n) {
## select exercise files, run Sweave, collect results
id <- sample_id()
for(j in id) Sweave(file_Rnw[j], quiet = quiet) ## FIXME: need envir argument
metainfo1 <- list()
for(j in seq_along(id)) metainfo1[[j]] <- read_metainfo(file_tex[id[j]])
names(metainfo1) <- file_base[id]
metainfo[[i]] <- metainfo1
## assign names
names(metainfo)[i] <- make_full_name(name[1], i)
out_tex <- make_full_name(name, i, type = "tex")
out_pdf <- make_full_name(name, i, type = "pdf")
## compile output files for all templates
for(j in seq_along(template)) {
tmpl <- template[[j]]
## input header
if(template_has_header[j]) {
wi <- grep("\\exinput{header}", tmpl, fixed = TRUE)
tmpl[wi] <- if(length(header) < 1) "" else paste("\\", names(header), "{",
sapply(header, function(x) if(is.function(x)) x(i) else as.character(x)), "}",
collapse = "\n", sep = "")
}
## input questionnaire
if(template_has_questionnaire[j]) {
typ1 <- sapply(metainfo1, function(x) x[["type"]])
sol1 <- lapply(metainfo1, function(x) x[["solution"]])
wi <- grep("\\exinput{questionnaire}", tmpl, fixed = TRUE)
tmpl[wi] <- paste(c(
"\\begin{enumerate}",
sapply(seq_along(typ1), function(i) switch(typ1[i],
schoice = mchoice2quest(sol1[[i]]),
mchoice = mchoice2quest(sol1[[i]]),
num = num2quest(sol1[[i]]),
string = string2quest(sol1[[i]]))),
"\\end{enumerate}", ""), collapse = "\n")
}
## input exercise tex
if(template_has_exercises[j]) {
wi <- grep("\\exinput{exercises}", tmpl, fixed = TRUE)
tmpl[wi] <- paste("\\input{", file_tex[id], "}", sep = "", collapse = "\n")
}
## create and compile output tex
writeLines(tmpl, out_tex[j])
texi2dvi(out_tex[j], pdf = TRUE, clean = TRUE, quiet = quiet)
}
## copy to output directory (or show)
if(!is.null(dir)) {
file.copy(out_pdf, dir, overwrite = TRUE)
} else {
if(.Platform$OS.type == "windows") shell.exec(file.path(dir_temp, out_pdf))
else system(paste(shQuote(getOption("pdfviewer")), shQuote(out_pdf)), wait = FALSE)
}
}
## collect and store meta information
class(metainfo) <- "exams_metainfo"
if(!is.null(dir)) {
save(metainfo, file = file.path(dir, "metainfo.rda"))
## metainfo_df <- as.data.frame(t(sapply(metainfo,
## function(x) as.vector(sapply(x, function(y) y$string)))))
## colnames(metainfo_df) <- paste("exercise", gsub(" ", "0", format(1:ncol(metainfo_df))), sep = "")
## write.table(metainfo_df, file = file.path(dir, "metainfo.csv"), sep = ",")
}
## return meta information invisibly
invisible(metainfo)
}
## print exams_metainfo objects
print.exams_metainfo <- function(x, which = NULL, ...) {
which <- if(is.null(which)) names(x) else {
if(is.numeric(which)) names(x)[which] else which
}
n <- length(x[[1]])
for(i in which) {
cat("\n", i, "\n", sep = "")
for(j in 1:n) {
cat(" ", format(c(n, j))[-1], ". ", x[[i]][[j]]$string, "\n", sep = "")
}
}
cat("\n")
invisible(x)
}
| /R/exams.R | no_license | MalloryJfeldman/exams | R | false | false | 12,064 | r | ## workhorse function for compiling (collections of) exercises
exams <- function(file, n = 1, nsamp = NULL, dir = NULL, template = "plain",
inputs = NULL, header = list(Date = Sys.Date()), name = NULL,
quiet = TRUE, edir = NULL, tdir = NULL, control = NULL)
{
## convenience function
strip_path <- function(file)
sapply(strsplit(file, .Platform$file.sep), tail, 1)
## manage directories:
## - for producing several files an output directory is required
if((n > 1 | length(template) > 1) & is.null(dir)) stop("Please specify an output 'dir'.")
if(!is.null(dir) && !file.exists(dir) && !dir.create(dir))
stop(gettextf("Cannot create output directory '%s'.", dir))
## - further: dir (output), dir_orig (original), dir_temp (temp), dir_pkg (package)
if(!is.null(dir)) dir <- file_path_as_absolute(dir)
dir_orig <- getwd()
on.exit(setwd(dir_orig))
dir_temp <- if(is.null(tdir)) tempfile() else tdir
if(!file.exists(dir_temp) && !dir.create(dir_temp))
stop(gettextf("Cannot create temporary work directory '%s'.", dir_temp))
dir_pkg <- find.package("exams")
## number of available exercises in each element of 'file'
## and number of selected samples per element
nfile <- length(file)
if(is.null(nsamp)) nsamp <- 1
if(length(nsamp) < nfile) nsamp <- rep(nsamp, length.out = nfile)
navail <- sapply(file, length)
if(any(navail < nsamp)) {
ix <- which(navail < nsamp)
warning(paste("Only", navail[ix], "exercise(s) available in element", ix,
"of the 'file' argument. Sampling with replacement will be used in order to obtain",
nsamp[ix], "replications."))
}
## file pre-processing:
## - transform to vector (remember grouping IDs)
## - add paths (generate "foo", "foo.Rnw", "foo.tex", and "path/to/foo.Rnw")
## - check existence (use local files if they exist, otherwise take from package)
## - setup sampling (draw random configuration)
file_id <- rep(seq_along(file), navail)
file_raw <- unlist(file)
file_Rnw <- ifelse(
tolower(substr(file_raw, nchar(file_raw)-3, nchar(file_raw))) != ".rnw",
paste(file_raw, ".Rnw", sep = ""), file_raw)
file_base <- file_path_sans_ext(file_Rnw)
file_tex <- paste(file_base, ".tex", sep = "")
file_path <- if(is.null(edir)) file_Rnw else file.path(edir, file_Rnw)
file_path <- ifelse(file.exists(file_path),
file_path, file.path(dir_pkg, "exercises", file_path))
if(!all(file.exists(file_path))) stop(paste("The following files cannot be found: ",
paste(file_raw[!file.exists(file_path)], collapse = ", "), ".", sep = ""))
sample_id <- function() unlist(lapply(unique(file_id), function(i) {
wi <- file_id == i
if(sum(wi) > 1)
sample(which(wi), nsamp[i], replace = navail[i] < nsamp[i])
else
rep(which(wi), length.out = nsamp[i])
}))
## similarly: template pre-processing
template_raw <- template
template_tex <- template_path <- ifelse(
tolower(substr(template, nchar(template)-3, nchar(template))) != ".tex",
paste(template, ".tex", sep = ""), template)
template_base <- file_path_sans_ext(template_tex)
template_path <- ifelse(file.exists(template_tex),
template_tex, file.path(dir_pkg, "tex", template_tex))
if(!all(file.exists(template_path))) stop(paste("The following files cannot be found: ",
paste(template_raw[!file.exists(template_path)], collapse = ", "), ".", sep = ""))
## check for using old templates
if(file.path(dir_pkg, "tex", "exam2.tex") %in% template_path) {
template_path[template_path == file.path(dir_pkg, "tex", "exam.tex")] <- file.path(dir_pkg, "tex", "oexam.tex")
warning(paste(strwrap(paste(
"The template exam2.tex has been adapted to exams2pdf() and is not fully compatible",
"with exams() anymore. Template oexam.tex used instead."
), exdent = 2), collapse = "\n"))
}
if(file.path(dir_pkg, "tex", "solution.tex") %in% template_path) {
template_path[template_path == file.path(dir_pkg, "tex", "solution.tex")] <- file.path(dir_pkg, "tex", "osolution.tex")
warning(paste(strwrap(paste(
"The template solution.tex has been adapted to exams2pdf() and is not fully compatible",
"with exams() anymore. Template osolution.tex used instead."
), exdent = 2), collapse = "\n"))
}
## read template
template <- lapply(template_path, readLines)
## which input types in template?
input_types <- function(x) {
x <- x[grep("\\exinput", x, fixed = TRUE)]
if(length(x) < 1) stop("templates must specify at least one \\exinput{}")
as.vector(sapply(strsplit(sapply(strsplit(x,
paste("\\exinput{", sep = ""), fixed = TRUE), tail, 1), "}"), head, 1))
}
template_it <- lapply(template, input_types)
template_has_header <- sapply(template_it, function(x) "header" %in% x)
template_has_questionnaire <- sapply(template_it, function(x) "questionnaire" %in% x)
template_has_exercises <- sapply(template_it, function(x) "exercises" %in% x)
## output name processing
if(is.null(name)) name <- strip_path(template_base)
make_full_name <- function(name, id, type = "")
paste(name, gsub(" ", "0", format(c(n, id)))[-1], ifelse(type == "", "", "."), type, sep = "")
## convenience function for reading metainfo from compiled exercise
read_metainfo <- function(file) {
x <- readLines(file)
get_command <- function(command) {
cline <- x[grep(command, x, fixed = TRUE)]
if(length(cline) < 1) NULL else gsub("{", "", strsplit(strsplit(cline[1],
paste(command, "{", sep = ""), fixed = TRUE)[[1]][2], "}")[[1]], fixed = TRUE)
}
type <- match.arg(get_command("\\extype"), c("schoice", "mchoice", "num", "string"))
sol <- get_command("\\exsolution")
nam <- get_command("\\exname")
tol <- get_command("\\extol")
tol <- rep(if(is.null(tol)) 0 else as.numeric(tol), length.out = 2)
sol <- switch(type,
"schoice" = string2mchoice(sol, single = TRUE),
"mchoice" = string2mchoice(sol),
"num" = as.numeric(sol),
"string" = sol
)
slength <- length(sol)
string <- switch(type,
"schoice" = paste(nam, ": ", mchoice2print(sol), sep = ""),
"mchoice" = paste(nam, ": ", mchoice2print(sol), sep = ""),
"num" = if(max(tol) <= 0) {
paste(nam, ": ", sol, sep = "")
} else {
if(slength == 1) {
paste(nam, ": ", sol, " (", sol - tol[1], "--", sol + tol[2], ")", sep = "")
} else {
paste(nam, ": [", sol[1], ", ", sol[2], "] ([", sol[1] - tol[1], "--", sol[1] + tol[2], ", ",
sol[2] - tol[1], "--", sol[2] + tol[2], "])", sep = "")
}
},
"string" = paste(nam, ": ", paste(sol, collapse = "\n"), sep = "")
)
list(type = type,
length = slength,
solution = sol,
tolerance = tol,
string = string)
}
control.default <- list(mchoice.print = list(True = letters[1:5], False = rep("", 5)),
mchoice.symbol = c(True = "X", False = " "))
if (is.null(control)) control <- control.default
else if (is.list(control)) {
control <- c(control, control.default[!c("mchoice.print", "mchoice.symbol") %in% names(control)])
if (!all(sapply(control, function(x) identical(c("False", "True"), sort(names(x))))))
stop("'control' not correctly specified")
control$mchoice.print <- lapply(control$mchoice.print, function(x) rep(x, 5))
} else stop("'control' must be NULL or a list")
## convenience functions for writing LaTeX
mchoice2quest <- function(x) paste(" \\item \\exmchoice{",
paste(ifelse(x, control$mchoice.symbol[["True"]], control$mchoice.symbol[["False"]]), collapse = "}{"), "}", sep = "")
num2quest <- function(x) {
rval <- paste(" \\item \\exnum{",
paste(strsplit(format(c(100000.000, x), nsmall = 3, scientific = FALSE)[-1], "")[[1]][-7],
collapse = "}{"), "}", sep = "")
if(length(x) > 1) rval <- paste(rval, " \\\\\n \\exnum{",
paste(strsplit(format(c(100000.000, x), nsmall = 3, scientific = FALSE)[-1], "")[[2]][-7],
collapse = "}{"), "}", sep = "")
rval
}
string2quest <- function(x) paste(" \\item \\exstring{", x, "}", sep = "")
mchoice2print <- function(x) paste(ifelse(x, control$mchoice.print[["True"]], control$mchoice.print[["False"]]), collapse = "")
## take everything to temp dir
file.copy(file_path, dir_temp)
## including further inputs (if any)
if(!is.null(inputs)) {
inputs_path <- ifelse(file.exists(inputs), inputs, file.path(edir, inputs))
if(!all(file.exists(inputs_path))) stop(paste("The following inputs cannot be found: ",
paste(inputs[!file.exists(inputs_path)], collapse = ", "), ".", sep = ""))
file.copy(inputs_path, dir_temp)
}
setwd(dir_temp)
on.exit(unlink(dir_temp), add = TRUE)
## call Sweave and LaTeX, copy and collect results
metainfo <- list()
for(i in 1:n) {
## select exercise files, run Sweave, collect results
id <- sample_id()
for(j in id) Sweave(file_Rnw[j], quiet = quiet) ## FIXME: need envir argument
metainfo1 <- list()
for(j in seq_along(id)) metainfo1[[j]] <- read_metainfo(file_tex[id[j]])
names(metainfo1) <- file_base[id]
metainfo[[i]] <- metainfo1
## assign names
names(metainfo)[i] <- make_full_name(name[1], i)
out_tex <- make_full_name(name, i, type = "tex")
out_pdf <- make_full_name(name, i, type = "pdf")
## compile output files for all templates
for(j in seq_along(template)) {
tmpl <- template[[j]]
## input header
if(template_has_header[j]) {
wi <- grep("\\exinput{header}", tmpl, fixed = TRUE)
tmpl[wi] <- if(length(header) < 1) "" else paste("\\", names(header), "{",
sapply(header, function(x) if(is.function(x)) x(i) else as.character(x)), "}",
collapse = "\n", sep = "")
}
## input questionnaire
if(template_has_questionnaire[j]) {
typ1 <- sapply(metainfo1, function(x) x[["type"]])
sol1 <- lapply(metainfo1, function(x) x[["solution"]])
wi <- grep("\\exinput{questionnaire}", tmpl, fixed = TRUE)
tmpl[wi] <- paste(c(
"\\begin{enumerate}",
sapply(seq_along(typ1), function(i) switch(typ1[i],
schoice = mchoice2quest(sol1[[i]]),
mchoice = mchoice2quest(sol1[[i]]),
num = num2quest(sol1[[i]]),
string = string2quest(sol1[[i]]))),
"\\end{enumerate}", ""), collapse = "\n")
}
## input exercise tex
if(template_has_exercises[j]) {
wi <- grep("\\exinput{exercises}", tmpl, fixed = TRUE)
tmpl[wi] <- paste("\\input{", file_tex[id], "}", sep = "", collapse = "\n")
}
## create and compile output tex
writeLines(tmpl, out_tex[j])
texi2dvi(out_tex[j], pdf = TRUE, clean = TRUE, quiet = quiet)
}
## copy to output directory (or show)
if(!is.null(dir)) {
file.copy(out_pdf, dir, overwrite = TRUE)
} else {
if(.Platform$OS.type == "windows") shell.exec(file.path(dir_temp, out_pdf))
else system(paste(shQuote(getOption("pdfviewer")), shQuote(out_pdf)), wait = FALSE)
}
}
## collect and store meta information
class(metainfo) <- "exams_metainfo"
if(!is.null(dir)) {
save(metainfo, file = file.path(dir, "metainfo.rda"))
## metainfo_df <- as.data.frame(t(sapply(metainfo,
## function(x) as.vector(sapply(x, function(y) y$string)))))
## colnames(metainfo_df) <- paste("exercise", gsub(" ", "0", format(1:ncol(metainfo_df))), sep = "")
## write.table(metainfo_df, file = file.path(dir, "metainfo.csv"), sep = ",")
}
## return meta information invisibly
invisible(metainfo)
}
## print exams_metainfo objects
print.exams_metainfo <- function(x, which = NULL, ...) {
which <- if(is.null(which)) names(x) else {
if(is.numeric(which)) names(x)[which] else which
}
n <- length(x[[1]])
for(i in which) {
cat("\n", i, "\n", sep = "")
for(j in 1:n) {
cat(" ", format(c(n, j))[-1], ". ", x[[i]][[j]]$string, "\n", sep = "")
}
}
cat("\n")
invisible(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{TSS_mm10}
\alias{TSS_mm10}
\title{TSS locations}
\format{A named vectors of lengths with one item per chromosome}
\source{
BSGenome package
}
\description{
A dataset containing chromosome sizes for common reference genome assemblies
}
| /man/TSS_mm10.Rd | no_license | tdanehy/GenomicDistributions | R | false | true | 326 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{TSS_mm10}
\alias{TSS_mm10}
\title{TSS locations}
\format{A named vectors of lengths with one item per chromosome}
\source{
BSGenome package
}
\description{
A dataset containing chromosome sizes for common reference genome assemblies
}
|
# %% ---------------------------------------------------------------
# This script computes the ranking corresponding to each round
# %% ---------------------------------------------------------------
# Global variables
DIR_IN <- "data-input/"
# %% ---------------------------------------------------------------
# Load packages
library(dplyr)
# %% ---------------------------------------------------------------
# Source functions
str_functions <- list.files("functions")
for (string in str_functions) {
source(paste0("functions/",
string))
}
# %% ---------------------------------------------------------------
# Init data frame
str_players <- get_players()
int_rows <- length(str_players)
int_columns <- 30
df_results <- init_empty_df(int_rows, int_columns, str_players)
# %% ---------------------------------------------------------------
# Getting files with "Ronda[0-9]"
str_dirs <- list.files(DIR_IN)
str_dirs <- grep(pattern = "Ronda[0-9]",
x = str_dirs,
value = TRUE)
# ordering (just in case)
str_dirs <- sort(str_dirs)
# full path
str_dirs <- paste0(DIR_IN,
str_dirs,
"/")
# %% ---------------------------------------------------------------
# feeding df_results
# Loop over directories
# debugging
string <- str_dirs[3]
for (string in str_dirs) {
str_round <- strsplit(x = string, split = "/")[[1]][2]
# Loop over files
str_files <- list.files(string)
if (length(str_files) > 7) {
stop("There are more than 7 files in ", string, ". Stop.")
}
# debugging
string_file <- str_files[1]
for (string_file in str_files) {
str_players_game <- string_file %>%
strsplit(split = "_vs_") %>%
unlist()
str_players_game <- gsub(pattern = ".pgn",
replacement = "",
x = str_players_game)
str_player_1 <- str_players_game[1]
str_player_2 <- str_players_game[2]
string_pgn <- readLines(paste0(string,
string_file))
# Sanity check: white
int_white <- grep(paste0("\\[White \"", str_player_1, "\"\\]"),
string_pgn)
if (length(int_white) != 1) {
stop("We canot find ", str_player_1, " as white player in ", paste0(string,
string_file))
}
# Sanity check: black
int_black <- grep(paste0("\\[Black \"", str_player_2, "\"\\]"),
string_pgn)
if (length(int_black) != 1) {
stop("We canot find ", str_player_2, " as black player in ", paste0(string,
string_file))
}
# Finding result
int_result <- grep(pattern = "^\\[Result \"", x = string_pgn)
if (length(int_result) != 1) {
stop("More than one line with result pattern in", paste0(string,
string_file))
}
string_result <- gsub(pattern = "^\\[Result \"",
replacement = "",
x = string_pgn[int_result])
string_result <- gsub(pattern = "\"\\]$",
replacement = "",
x = string_result)
if (string_result == "1/2-1/2") {
float_res_1 <- 0.5
float_res_2 <- 0.5
} else if (string_result == "1-0") {
float_res_1 <- 1
float_res_2 <- 0
} else if(string_result == "0-1") {
float_res_1 <- 0
float_res_2 <- 1
} else {
stop("The result in ", paste0(string ,"/", string_file), " is not 1/2-1/2, 1-0, or 0-1. Stop.")
}
df_results[row.names(df_results) == str_player_1,
str_round] <- float_res_1
df_results[row.names(df_results) == str_player_2,
str_round] <- float_res_2
}
}
# %% ---------------------------------------------------------------
# create and feed df_ranking
df_ranking <- df_results
int_max_round <- str_dirs %>%
max() %>%
strsplit(split = "/") %>%
lapply(FUN = function(x) {x[[2]]}) %>%
unlist() %>%
substring(first = 6,
last = 7) %>%
as.integer()
# debugging
str_player <- str_players[1]
for (str_player in str_players) {
for (int_col in 1:int_max_round) {
df_ranking[str_player,int_col] <- sum(df_results[str_player,1:int_col])
}
}
int_order <- order(df_ranking[,names(df_ranking)[int_max_round]],
decreasing = TRUE)
df_ranking <- df_ranking[int_order,]
# %% ---------------------------------------------------------------
# create data frame with number of games played by each player after
# n rounds
df_played <- init_empty_df(int_rows, int_columns, str_players)
# debugging
string <- str_dirs[1]
for (string in str_dirs) {
str_round <- strsplit(x = string, split = "/")[[1]][2]
str_files <- list.files(string)
if (length(str_files) == 7) {
for (string_file in str_files) {
for (str_pl in str_players) {
if (grepl(str_pl, string_file)) {
df_played[str_pl, str_round] <- 1
}
}
}
}
}
str_col <- "Ronda05"
for (str_col in sort(names(df_played), decreasing = T)) {
df_col <- select(df_played, "Ronda01":str_col) %>% mutate(!!str_col := rowSums(.))
df_played[str_col] <- df_col[str_col]
}
# %% ---------------------------------------------------------------
# last ranking
str_last_round <- strsplit(x = max(str_dirs), split = "/")[[1]][2]
df_last_ranking <- data.frame(players = row.names(df_ranking),
points = df_ranking[, str_last_round])
df_last_played <- data.frame(players = row.names(df_played),
played_games = df_played[, str_last_round])
df_last_ranking <- df_last_ranking %>%
left_join(df_last_played, by = "players")
# row.names(df_ranking)
# %% ---------------------------------------------------------------
# save output
write.csv(x = df_ranking,
file = paste0("data-output/ranking.csv"))
write.csv(x = df_played,
file = paste0("data-output/played_games.csv"))
write.csv(x = df_last_ranking,
file = paste0("data-output/ranking_", str_last_round, ".csv"))
| /main.R | no_license | VSablin/Zurich1953 | R | false | false | 6,554 | r | # %% ---------------------------------------------------------------
# This script computes the ranking corresponding to each round
# %% ---------------------------------------------------------------
# Global variables
DIR_IN <- "data-input/"
# %% ---------------------------------------------------------------
# Load packages
library(dplyr)
# %% ---------------------------------------------------------------
# Source functions
str_functions <- list.files("functions")
for (string in str_functions) {
source(paste0("functions/",
string))
}
# %% ---------------------------------------------------------------
# Init data frame
str_players <- get_players()
int_rows <- length(str_players)
int_columns <- 30
df_results <- init_empty_df(int_rows, int_columns, str_players)
# %% ---------------------------------------------------------------
# Getting files with "Ronda[0-9]"
str_dirs <- list.files(DIR_IN)
str_dirs <- grep(pattern = "Ronda[0-9]",
x = str_dirs,
value = TRUE)
# ordering (just in case)
str_dirs <- sort(str_dirs)
# full path
str_dirs <- paste0(DIR_IN,
str_dirs,
"/")
# %% ---------------------------------------------------------------
# feeding df_results
# Loop over directories
# debugging
string <- str_dirs[3]
for (string in str_dirs) {
str_round <- strsplit(x = string, split = "/")[[1]][2]
# Loop over files
str_files <- list.files(string)
if (length(str_files) > 7) {
stop("There are more than 7 files in ", string, ". Stop.")
}
# debugging
string_file <- str_files[1]
for (string_file in str_files) {
str_players_game <- string_file %>%
strsplit(split = "_vs_") %>%
unlist()
str_players_game <- gsub(pattern = ".pgn",
replacement = "",
x = str_players_game)
str_player_1 <- str_players_game[1]
str_player_2 <- str_players_game[2]
string_pgn <- readLines(paste0(string,
string_file))
# Sanity check: white
int_white <- grep(paste0("\\[White \"", str_player_1, "\"\\]"),
string_pgn)
if (length(int_white) != 1) {
stop("We canot find ", str_player_1, " as white player in ", paste0(string,
string_file))
}
# Sanity check: black
int_black <- grep(paste0("\\[Black \"", str_player_2, "\"\\]"),
string_pgn)
if (length(int_black) != 1) {
stop("We canot find ", str_player_2, " as black player in ", paste0(string,
string_file))
}
# Finding result
int_result <- grep(pattern = "^\\[Result \"", x = string_pgn)
if (length(int_result) != 1) {
stop("More than one line with result pattern in", paste0(string,
string_file))
}
string_result <- gsub(pattern = "^\\[Result \"",
replacement = "",
x = string_pgn[int_result])
string_result <- gsub(pattern = "\"\\]$",
replacement = "",
x = string_result)
if (string_result == "1/2-1/2") {
float_res_1 <- 0.5
float_res_2 <- 0.5
} else if (string_result == "1-0") {
float_res_1 <- 1
float_res_2 <- 0
} else if(string_result == "0-1") {
float_res_1 <- 0
float_res_2 <- 1
} else {
stop("The result in ", paste0(string ,"/", string_file), " is not 1/2-1/2, 1-0, or 0-1. Stop.")
}
df_results[row.names(df_results) == str_player_1,
str_round] <- float_res_1
df_results[row.names(df_results) == str_player_2,
str_round] <- float_res_2
}
}
# %% ---------------------------------------------------------------
# create and feed df_ranking
df_ranking <- df_results
int_max_round <- str_dirs %>%
max() %>%
strsplit(split = "/") %>%
lapply(FUN = function(x) {x[[2]]}) %>%
unlist() %>%
substring(first = 6,
last = 7) %>%
as.integer()
# debugging
str_player <- str_players[1]
for (str_player in str_players) {
for (int_col in 1:int_max_round) {
df_ranking[str_player,int_col] <- sum(df_results[str_player,1:int_col])
}
}
int_order <- order(df_ranking[,names(df_ranking)[int_max_round]],
decreasing = TRUE)
df_ranking <- df_ranking[int_order,]
# %% ---------------------------------------------------------------
# create data frame with number of games played by each player after
# n rounds
df_played <- init_empty_df(int_rows, int_columns, str_players)
# debugging
string <- str_dirs[1]
for (string in str_dirs) {
str_round <- strsplit(x = string, split = "/")[[1]][2]
str_files <- list.files(string)
if (length(str_files) == 7) {
for (string_file in str_files) {
for (str_pl in str_players) {
if (grepl(str_pl, string_file)) {
df_played[str_pl, str_round] <- 1
}
}
}
}
}
str_col <- "Ronda05"
for (str_col in sort(names(df_played), decreasing = T)) {
df_col <- select(df_played, "Ronda01":str_col) %>% mutate(!!str_col := rowSums(.))
df_played[str_col] <- df_col[str_col]
}
# %% ---------------------------------------------------------------
# last ranking
str_last_round <- strsplit(x = max(str_dirs), split = "/")[[1]][2]
df_last_ranking <- data.frame(players = row.names(df_ranking),
points = df_ranking[, str_last_round])
df_last_played <- data.frame(players = row.names(df_played),
played_games = df_played[, str_last_round])
df_last_ranking <- df_last_ranking %>%
left_join(df_last_played, by = "players")
# row.names(df_ranking)
# %% ---------------------------------------------------------------
# save output
write.csv(x = df_ranking,
file = paste0("data-output/ranking.csv"))
write.csv(x = df_played,
file = paste0("data-output/played_games.csv"))
write.csv(x = df_last_ranking,
file = paste0("data-output/ranking_", str_last_round, ".csv"))
|
library(shinydashboard)
library(tidyverse)
library(ggplot2)
library(dplyr)
library(usmap)
library(plotly)
library(DT)
library(shinyWidgets)
# load datset of prescription counts by state
nba_85_to_now_grouped <- readRDS('data/85_to_now_grouped.rds')
nba_14_to_present_merged <- readRDS('data/nba_14_to_present_merged.rds')
nba_14_to_present_merged <- nba_14_to_present_merged %>%
mutate(
time_zone = case_when(
start_et == "7:00p" | start_et == "7:30p" ~ "Eastern",
start_et == "8:00p" | start_et == "8:30p" ~ "Central",
start_et == "9:00p" | start_et == "9:30p" ~ "Mountain",
start_et == "10:00p" | start_et == "10:30p" ~ "Pacific",
TRUE ~ "Other"
)
)
teams_85 <- unique(nba_85_to_now_grouped$team)
team_14 <- sort(unique(nba_14_to_present_merged$team))
season_options_tab1 <- unique(nba_14_to_present_merged$year)
season_options_tab2 <- unique(nba_14_to_present_merged$year)
years <- 1984:2019
| /nba-stats-shiny-dashboard/global.R | no_license | amarsee/nba-effects-of-rest-on-team-stats | R | false | false | 967 | r | library(shinydashboard)
library(tidyverse)
library(ggplot2)
library(dplyr)
library(usmap)
library(plotly)
library(DT)
library(shinyWidgets)
# load datset of prescription counts by state
nba_85_to_now_grouped <- readRDS('data/85_to_now_grouped.rds')
nba_14_to_present_merged <- readRDS('data/nba_14_to_present_merged.rds')
nba_14_to_present_merged <- nba_14_to_present_merged %>%
mutate(
time_zone = case_when(
start_et == "7:00p" | start_et == "7:30p" ~ "Eastern",
start_et == "8:00p" | start_et == "8:30p" ~ "Central",
start_et == "9:00p" | start_et == "9:30p" ~ "Mountain",
start_et == "10:00p" | start_et == "10:30p" ~ "Pacific",
TRUE ~ "Other"
)
)
teams_85 <- unique(nba_85_to_now_grouped$team)
team_14 <- sort(unique(nba_14_to_present_merged$team))
season_options_tab1 <- unique(nba_14_to_present_merged$year)
season_options_tab2 <- unique(nba_14_to_present_merged$year)
years <- 1984:2019
|
#' Replicate expression and return results in a list.
#'
#' Evalulate expression n times then combine results into a list
#'
#' This function runs an expression multiple times, and combines the
#' result into a list. If there are no results, then this function will return
#' a list of length 0 (\code{list()}). This function is equivalent to
#' \code{\link{replicate}}, but will always return results as a list.
#'
#'
#' @keywords manip
#' @param .n number of times to evaluate the expression
#' @param .expr expression to evaluate
#' @param .progress name of the progress bar to use, see \code{\link{create_progress_bar}}
#' @return list of results
#' @export
#' @references Hadley Wickham (2011). The Split-Apply-Combine Strategy for
#' Data Analysis. Journal of Statistical Software, 40(1), 1-29.
#' \url{http://www.jstatsoft.org/v40/i01/}.
#' @examples
#' mods <- rlply(100, lm(y ~ x, data=data.frame(x=rnorm(100), y=rnorm(100))))
#' hist(laply(mods, function(x) summary(x)$r.squared))
rlply <- function(.n, .expr, .progress = "none") {
res <- .rlply_worker(.n, .progress,
eval.parent(substitute(function() .expr)))
res
}
.rlply_worker <- function(.n, .progress, .expr_wrap, .print = FALSE,
.discard = FALSE) {
if (!is.vector(.n, "numeric") || length(.n) > 1L)
stop(".n must be an integer vector of length 1")
if (.n == 0L)
return (list())
progress <- create_progress_bar(.progress)
progress$init(.n)
on.exit(progress$term())
if (.print) {
wrap <- function(f) function() { print(f()) }
} else {
wrap <- identity
}
# The logic below is responsible for ascertaining that .expr is evaluated
# exactly .n times, whether it's a function or an expression. (See GitHub
# issue #158.) When the function .rlply_worker is called, the .expr_wrap
# argument is a function that returns the .expr argument passed to the calling
# r*ply function. The .wrapped_expr_to_fun function will convert the
# .expr_wrap argument to a list that contains a function and the result of the
# first evaluation, which is necessary because there seems to be no other way
# to find out if .expr is a function or an expression without evaluating it at
# least once. After that, only .n - 1 further evaluations are necessary.
#
# In addition, results are printed and/or discareded depending on the `wrap`
# function defined above.
fun <- .wrapped_expr_to_fun(.expr_wrap)
f <- wrap(fun$f)
if (.discard) {
wrap(function() fun$val)()
progress$step()
for(i in seq.int(from = 2L, length.out = .n - 1L)) {
f()
progress$step()
}
invisible(NULL)
} else {
result <- vector("list", length = .n)
result[1L] <- list(wrap(function() fun$val)())
progress$step()
for(i in seq.int(from = 2L, length.out = .n - 1L)) {
result[i] <- list(f())
progress$step()
}
result
}
}
#' r*ply helper function
#'
#' Call a function to check if the result is a function or an expression, to
#' support expressions as arguments to the r*ply family.
#'
#' @param .expr_wrap function to call
#' @return named list with two components. f -- function, val -- result of first
#' evaluation
#' @noRd
.wrapped_expr_to_fun <- function(.expr_wrap) {
# When .expr_wrap is evaluated, it will return either a function or an
# expression. In the first case, this function is assigned to the f
# component, and also called once explicitly to assign the val component. In
# the second case, this has been already the first evaluation of .expr -- the
# parameter wrapped by .expr_wrap; the results are reused for the val
# component, and the wrapped function is assigned to f.
res <- .expr_wrap()
if (is.function(res)) {
list(f = res, val = res())
} else {
list(f = .expr_wrap, val = res)
}
}
| /R/rlply.r | no_license | tentacles-from-outer-space/plyr | R | false | false | 3,855 | r | #' Replicate expression and return results in a list.
#'
#' Evalulate expression n times then combine results into a list
#'
#' This function runs an expression multiple times, and combines the
#' result into a list. If there are no results, then this function will return
#' a list of length 0 (\code{list()}). This function is equivalent to
#' \code{\link{replicate}}, but will always return results as a list.
#'
#'
#' @keywords manip
#' @param .n number of times to evaluate the expression
#' @param .expr expression to evaluate
#' @param .progress name of the progress bar to use, see \code{\link{create_progress_bar}}
#' @return list of results
#' @export
#' @references Hadley Wickham (2011). The Split-Apply-Combine Strategy for
#' Data Analysis. Journal of Statistical Software, 40(1), 1-29.
#' \url{http://www.jstatsoft.org/v40/i01/}.
#' @examples
#' mods <- rlply(100, lm(y ~ x, data=data.frame(x=rnorm(100), y=rnorm(100))))
#' hist(laply(mods, function(x) summary(x)$r.squared))
rlply <- function(.n, .expr, .progress = "none") {
res <- .rlply_worker(.n, .progress,
eval.parent(substitute(function() .expr)))
res
}
.rlply_worker <- function(.n, .progress, .expr_wrap, .print = FALSE,
.discard = FALSE) {
if (!is.vector(.n, "numeric") || length(.n) > 1L)
stop(".n must be an integer vector of length 1")
if (.n == 0L)
return (list())
progress <- create_progress_bar(.progress)
progress$init(.n)
on.exit(progress$term())
if (.print) {
wrap <- function(f) function() { print(f()) }
} else {
wrap <- identity
}
# The logic below is responsible for ascertaining that .expr is evaluated
# exactly .n times, whether it's a function or an expression. (See GitHub
# issue #158.) When the function .rlply_worker is called, the .expr_wrap
# argument is a function that returns the .expr argument passed to the calling
# r*ply function. The .wrapped_expr_to_fun function will convert the
# .expr_wrap argument to a list that contains a function and the result of the
# first evaluation, which is necessary because there seems to be no other way
# to find out if .expr is a function or an expression without evaluating it at
# least once. After that, only .n - 1 further evaluations are necessary.
#
# In addition, results are printed and/or discareded depending on the `wrap`
# function defined above.
fun <- .wrapped_expr_to_fun(.expr_wrap)
f <- wrap(fun$f)
if (.discard) {
wrap(function() fun$val)()
progress$step()
for(i in seq.int(from = 2L, length.out = .n - 1L)) {
f()
progress$step()
}
invisible(NULL)
} else {
result <- vector("list", length = .n)
result[1L] <- list(wrap(function() fun$val)())
progress$step()
for(i in seq.int(from = 2L, length.out = .n - 1L)) {
result[i] <- list(f())
progress$step()
}
result
}
}
#' r*ply helper function
#'
#' Call a function to check if the result is a function or an expression, to
#' support expressions as arguments to the r*ply family.
#'
#' @param .expr_wrap function to call
#' @return named list with two components. f -- function, val -- result of first
#' evaluation
#' @noRd
.wrapped_expr_to_fun <- function(.expr_wrap) {
# When .expr_wrap is evaluated, it will return either a function or an
# expression. In the first case, this function is assigned to the f
# component, and also called once explicitly to assign the val component. In
# the second case, this has been already the first evaluation of .expr -- the
# parameter wrapped by .expr_wrap; the results are reused for the val
# component, and the wrapped function is assigned to f.
res <- .expr_wrap()
if (is.function(res)) {
list(f = res, val = res())
} else {
list(f = .expr_wrap, val = res)
}
}
|
#starts
stuff1<-read.table("stuff1.txt",header=TRUE,sep=",")
stuff1.label<-rep(1,100)
stuff1<-cbind(stuff1,stuff1.label)
names(stuff1)<-c("weight","height","label")
stuff2<-read.table("stuff2.txt",header=TRUE,sep=",")
stuff2.label<-rep(-1,100)
stuff2<-cbind(stuff2,stuff2.label)
names(stuff2)<-c("weight","height","label")
stuff1.2<-rbind(stuff1,stuff2)
d.set<-data.frame(cbind(rep(1,200),stuff1.2))
names(d.set)<-c("bias","weight","height","label")
d.set
train.index<-sample(nrow(d.set),nrow(d.set)*0.3)
train.index
| /handcode/split_train_and_test_sets.R | no_license | herrj1/R | R | false | false | 542 | r | #starts
stuff1<-read.table("stuff1.txt",header=TRUE,sep=",")
stuff1.label<-rep(1,100)
stuff1<-cbind(stuff1,stuff1.label)
names(stuff1)<-c("weight","height","label")
stuff2<-read.table("stuff2.txt",header=TRUE,sep=",")
stuff2.label<-rep(-1,100)
stuff2<-cbind(stuff2,stuff2.label)
names(stuff2)<-c("weight","height","label")
stuff1.2<-rbind(stuff1,stuff2)
d.set<-data.frame(cbind(rep(1,200),stuff1.2))
names(d.set)<-c("bias","weight","height","label")
d.set
train.index<-sample(nrow(d.set),nrow(d.set)*0.3)
train.index
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energy_util.R
\name{conditional.config.energy2}
\alias{conditional.config.energy2}
\title{Energy function for energy of a conditional configuration of states E(Xi | X/Xi).
Uses the feature formulation.}
\usage{
conditional.config.energy2(
theta.par = NULL,
config,
condition.element.number,
crf,
ff,
printQ = FALSE
)
}
\arguments{
\item{config}{A node configuration (state) vector}
\item{condition.element.number}{i of E(Xi | X/Xi)}
\item{ff}{The feature function}
\item{adj.node.list}{XXXX}
\item{edges.mat}{Matrix of connected node edges}
\item{two.lgp}{Log edge potentials (two-body energies)}
}
\value{
The function will XX
}
\description{
Assumes log-potentials are in gRbase format
}
\details{
The function will XXXX
}
| /man/conditional.config.energy2.Rd | no_license | npetraco/CRFutil | R | false | true | 820 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energy_util.R
\name{conditional.config.energy2}
\alias{conditional.config.energy2}
\title{Energy function for energy of a conditional configuration of states E(Xi | X/Xi).
Uses the feature formulation.}
\usage{
conditional.config.energy2(
theta.par = NULL,
config,
condition.element.number,
crf,
ff,
printQ = FALSE
)
}
\arguments{
\item{config}{A node configuration (state) vector}
\item{condition.element.number}{i of E(Xi | X/Xi)}
\item{ff}{The feature function}
\item{adj.node.list}{XXXX}
\item{edges.mat}{Matrix of connected node edges}
\item{two.lgp}{Log edge potentials (two-body energies)}
}
\value{
The function will XX
}
\description{
Assumes log-potentials are in gRbase format
}
\details{
The function will XXXX
}
|
#install packages
install.packages("maps")
install.packages("gglot2")
install.packages("ggmap")
#load GGPlot2
library(ggmap)
library(ggplot2)
library(maptools)
library(datasets)
library(maps)
library(plyr)
library(reshape2)
#Set working directory
setwd("C:\\Users\\tyler.DESKTOP-7I7VF8T\\Desktop\\OneDrive\\17-18_Spring\\Data Science\\Data")
#TEST
olympic_data <- read.csv("athletes.csv")
country_codes <- read.csv("countries_codes_and_coordinates.csv")
countries <- read.csv("mydata2.csv")
sort(olympic_data)
sort(countries)
table(olympic_data$nationality)
tab <- table(olympic_data$nationality, olympic_data$medalcount)
tab
olympic_data <- sort(unique(olympic_data$nationality))
olympic_data <- data.frame(olympic_data)
olympic_data$medals <- clean_data
olympic_data
countries
countries$lat <- country_codes$Latitude..average.[match(unlist(countries$Var1) , country_codes$Alpha.3.code)]
countries$long <- country_codes$Longitude..average.[match(unlist(countries$Var1) , country_codes$Alpha.3.code)]
countries$Var1 <- country_codes$Country[match(unlist(countries$Var1) , country_codes$Alpha.3.code)]
countries <- na.omit(countries)
mp <- NULL
mapworld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp <- ggplot() + mapworld
mp <- mp + geom_point(aes(x=countries$long, y=countries$lat) ,color="blue", size=1)
mp
colnames(countries) <- c("Index", "Country" ,"Count","Latitude","Longitude")
# Load map data as dataframe
map <- map_data("world")
# Look at the map data
head(map)
# Load dplyr
library(dplyr)
# Join countries and map data
countries2 <- countries %>%
left_join(map,
by = c("Country" = "region")) %>%
select(
Country,
Longitude = long,
Latitude = lat,
Group = group,
Order = order,
Count) %>%
arrange(Order) %>%
as.data.frame()
#Look at countries2
tail(countries2)
#write.csv(countries, file = "MyData2.csv")
# Create a choropleth
ggplot(
data = countries2) +
borders(
database = "world",
colour = "grey60",
fill = "grey90") +
geom_polygon(
aes(
x = Longitude,
y = Latitude,
group = Group,
fill = Count),
color = "grey60") +
scale_fill_gradient(
low = "white",
high = "red") +
ggtitle("Count of Athletes by Country") +
xlab("") +
ylab("") +
labs(color = "Athletes") +
theme(
panel.background = element_blank(),
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
| /Data-Science/Rio-Olympic-2016/map.R | no_license | Hackel15/Marquette-Projects | R | false | false | 2,715 | r | #install packages
install.packages("maps")
install.packages("gglot2")
install.packages("ggmap")
#load GGPlot2
library(ggmap)
library(ggplot2)
library(maptools)
library(datasets)
library(maps)
library(plyr)
library(reshape2)
#Set working directory
setwd("C:\\Users\\tyler.DESKTOP-7I7VF8T\\Desktop\\OneDrive\\17-18_Spring\\Data Science\\Data")
#TEST
olympic_data <- read.csv("athletes.csv")
country_codes <- read.csv("countries_codes_and_coordinates.csv")
countries <- read.csv("mydata2.csv")
sort(olympic_data)
sort(countries)
table(olympic_data$nationality)
tab <- table(olympic_data$nationality, olympic_data$medalcount)
tab
olympic_data <- sort(unique(olympic_data$nationality))
olympic_data <- data.frame(olympic_data)
olympic_data$medals <- clean_data
olympic_data
countries
countries$lat <- country_codes$Latitude..average.[match(unlist(countries$Var1) , country_codes$Alpha.3.code)]
countries$long <- country_codes$Longitude..average.[match(unlist(countries$Var1) , country_codes$Alpha.3.code)]
countries$Var1 <- country_codes$Country[match(unlist(countries$Var1) , country_codes$Alpha.3.code)]
countries <- na.omit(countries)
mp <- NULL
mapworld <- borders("world", colour="gray50", fill="gray50") # create a layer of borders
mp <- ggplot() + mapworld
mp <- mp + geom_point(aes(x=countries$long, y=countries$lat) ,color="blue", size=1)
mp
colnames(countries) <- c("Index", "Country" ,"Count","Latitude","Longitude")
# Load map data as dataframe
map <- map_data("world")
# Look at the map data
head(map)
# Load dplyr
library(dplyr)
# Join countries and map data
countries2 <- countries %>%
left_join(map,
by = c("Country" = "region")) %>%
select(
Country,
Longitude = long,
Latitude = lat,
Group = group,
Order = order,
Count) %>%
arrange(Order) %>%
as.data.frame()
#Look at countries2
tail(countries2)
#write.csv(countries, file = "MyData2.csv")
# Create a choropleth
ggplot(
data = countries2) +
borders(
database = "world",
colour = "grey60",
fill = "grey90") +
geom_polygon(
aes(
x = Longitude,
y = Latitude,
group = Group,
fill = Count),
color = "grey60") +
scale_fill_gradient(
low = "white",
high = "red") +
ggtitle("Count of Athletes by Country") +
xlab("") +
ylab("") +
labs(color = "Athletes") +
theme(
panel.background = element_blank(),
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
|
# #Generating weights from prior
# #Split into jobs in order to improve speed of the inference
# library(data.table)
# library(abind)
# source("/well/mcvean/mtutert/thesis_code/thesis_code/coalescent_coverage/helper_functions.R")
# file = snakemake@params$replicates
# chunk = snakemake@params$chunk
# population_pairs = snakemake@params$paired_values
# print(population_pairs)
#
# #Get populations (GWAS & Reference)
# split_pops = strsplit(population_pairs, "_")
# gwas_pop = split_pops[[1]][1]
# ref_pop = split_pops[[1]][3]
# fst = split_pops[[1]][5]
#
# #Import the reference & gwas panels in according to the population_pairs string
# print(ref_pop)
# print(gwas_pop)
# ref = as.matrix(fread(sprintf("msprime_data/population_split/%s_replicate_%s.csv", ref_pop, file), header = T))
# gwas = as.matrix(fread(sprintf("msprime_data/population_split/%s_replicate_%s.csv", gwas_pop, file), header = T))
# print(dim(gwas))
# #Perform filtering (removing non-segregating and low freq SNPs)
# res = msprime_gwas_sumstats(gwas_haplotypes = gwas, reference_haplotypes = ref)
# gwas = res[[1]]
# ref = res[[2]]
#
# #Write out GWAS & Ref (matched) tables
# #Note that this DOESN'T have to be done on a per Fst basis (since wont change data structure)
# write.table(gwas,sprintf("msprime_data/OOA/matched_panels/%s_GWAS_matched_to_%s_replicate_%s.csv",gwas_pop,ref_pop,file),quote = F,col.names = T, row.names = F)
# write.table(ref,sprintf("msprime_data/OOA/matched_panels/%s_Ref_matched_to_%s_replicate_%s.csv",ref_pop,gwas_pop,file),quote = F,col.names = T, row.names = F)
# #Back out the correct Fst given which population we are looking at
# nhaps_ref = nrow(ref)
# nhaps_gwas = nrow(gwas)
# nsnps = ncol(ref)
# effective_fst = as.numeric(fst)
#
#
# #Loop across to get the draws I need
# qc_matrix_count = 0
# nSamples = 500
# tol = 1
# while(qc_matrix_count<nSamples) {
# print(qc_matrix_count)
# #Draw from gamma_quantiled_weights nhaps times
# gamma_draw = rgamma(n = nhaps_ref, shape = 1/( nhaps_ref * ( effective_fst / (1-effective_fst))), scale = ( nhaps_ref * (effective_fst/(1-effective_fst))))
# #Extend into matrix
# weight_matrix = matrix(rep(gamma_draw,nsnps), ncol = nsnps)
# #Normalize matrix
# norm_weight_matrix = weight_matrix/colSums(weight_matrix)[col(weight_matrix)]
#
# ####### Remove Ascertainment Bias
# #Ask if the weight matrix we are generating will break our filtering step
# tol = 1e-100
# if (all(colSums(ref*norm_weight_matrix)) > tol & all(colSums(ref*norm_weight_matrix)) < 1-tol) {
# qc_matrix_count = qc_matrix_count + 1
# #cbind the matrix results for AF if we have not generated a good weight matrix yet
# if (qc_matrix_count == 1) {
# print(qc_matrix_count)
# AF_Inferred_Results = colSums(ref*norm_weight_matrix)
# cov = cov.wt(ref,norm_weight_matrix[,1],cor = TRUE, method = "ML")
# LD_Results = as.array(cov$cor)
# }
# else{
# AF_Inferred_Results = cbind(AF_Inferred_Results,colSums(ref*norm_weight_matrix))
# cov = cov.wt(ref,norm_weight_matrix[,1],cor = TRUE, method = "ML")
# LD_Results = abind(LD_Results,cov$cor, along = 3)
# }
# }
# }
#
# #Save object in format Fst_#_Replicate_#_Chunk_#
# saveRDS(object = LD_Results, file = sprintf("results/OOA/%s_GWAS_%s_Ref_%s_LD_Replicate_%s_Chunk_%s.RData",gwas_pop,ref_pop,fst,file,chunk), version = 2)
# saveRDS(object = AF_Inferred_Results, file = sprintf("results/OOA/%s_GWAS_%s_Ref_%s_AF_Replicate_%s_Chunk_%s.RData",gwas_pop,ref_pop,fst,file,chunk), version = 2)
# #
#Generating weights from prior
#Split into jobs in order to improve speed of the inference
library(data.table)
source("/well/mcvean/mtutert/thesis_code/thesis_code/coalescent_coverage/helper_functions.R")
file = snakemake@params$replicates
chunk = snakemake@params$chunk
divergence_time_fst_parameter = snakemake@params$paired_values #Do this across all divergence/Fst values (grid of graphs in the end)
divergence_time = strsplit(divergence_time_fst_parameter, "_")[[1]][1]
fst_parameter = strsplit(divergence_time_fst_parameter, "_")[[1]][2]
#Import the reference & gwas panels based on the divergence time and the replicate number (done in parallel through snakemake)
ref = as.matrix(fread(sprintf("msprime_data/population_split/Ref_panel_replicate_%s_split_%s.csv", file, divergence_time), header = T))
gwas = as.matrix(fread(sprintf("msprime_data/population_split/GWAS_panel_replicate_%s_split_%s.csv", file, divergence_time), header = T))
#Perform filtering (removing non-segregating and low freq SNPs)
res = msprime_gwas_sumstats(gwas_haplotypes = gwas, reference_haplotypes = ref)
gwas = res[[1]]
ref = res[[2]]
nSamples = 500
#Write out GWAS & REF (matched) tables
write.table(gwas,sprintf("msprime_data/population_split/matched_panels/GWAS_panel_replicate_%s_split_%s.csv",file,divergence_time),quote = F,col.names = T, row.names = F)
write.table(ref,sprintf("msprime_data/population_split/matched_panels/Ref_panel_replicate_%s_split_%s.csv",file,divergence_time),quote = F,col.names = T, row.names = F)
#Back out the correct Fst given divergence time
nhaps_ref = nrow(ref)
nhaps_gwas = nrow(gwas)
nsnps = ncol(ref)
effective_fst = as.numeric(fst_parameter) #Note that these values have already been backed out of the graph
#Create matrix & array to store AF and LD results
AF_Inferred_Results = matrix(data = NA, nrow = nsnps, ncol = nSamples)
LD_Results = array(data = NA, dim = c(nsnps, nsnps, nSamples))
#Loop across to get the draws I need
for (i in 1:nSamples) {
print(i)
#Draw from gamma_quantiled_weights nhaps times
gamma_draw = rgamma(n = nhaps_ref, shape = 1/( nhaps_ref * ( effective_fst / (1-effective_fst))), scale = ( nhaps_ref * (effective_fst/(1-effective_fst))))
#Extend into matrix
weight_matrix = matrix(rep(gamma_draw,nsnps), ncol = nsnps)
#Normalize matrix
norm_weight_matrix = weight_matrix/colSums(weight_matrix)[col(weight_matrix)]
AF_Inferred_Results[,i] = colSums(ref*norm_weight_matrix)
cov = cov.wt(ref,norm_weight_matrix[,1],cor = TRUE, method = "ML")
LD_Results[,,i] = cov$cor
}
#Save object in format Fst_#_Replicate_#_Chunk_#
saveRDS(object = LD_Results, file = sprintf("results/pop_split/%s_split_LD_Replicate_%s_Chunk_%s.RData", divergence_time_fst_parameter, file, chunk), version = 2)
saveRDS(object = AF_Inferred_Results, file = sprintf("results/pop_split/%s_split_AF_Replicate_%s_Chunk_%s.RData",divergence_time_fst_parameter, file, chunk), version = 2)
print("Done Drawing Weights")
| /coalescent_coverage/prior_draws/draw_weights.R | no_license | marcustutert/thesis_code | R | false | false | 6,904 | r | # #Generating weights from prior
# #Split into jobs in order to improve speed of the inference
# library(data.table)
# library(abind)
# source("/well/mcvean/mtutert/thesis_code/thesis_code/coalescent_coverage/helper_functions.R")
# file = snakemake@params$replicates
# chunk = snakemake@params$chunk
# population_pairs = snakemake@params$paired_values
# print(population_pairs)
#
# #Get populations (GWAS & Reference)
# split_pops = strsplit(population_pairs, "_")
# gwas_pop = split_pops[[1]][1]
# ref_pop = split_pops[[1]][3]
# fst = split_pops[[1]][5]
#
# #Import the reference & gwas panels in according to the population_pairs string
# print(ref_pop)
# print(gwas_pop)
# ref = as.matrix(fread(sprintf("msprime_data/population_split/%s_replicate_%s.csv", ref_pop, file), header = T))
# gwas = as.matrix(fread(sprintf("msprime_data/population_split/%s_replicate_%s.csv", gwas_pop, file), header = T))
# print(dim(gwas))
# #Perform filtering (removing non-segregating and low freq SNPs)
# res = msprime_gwas_sumstats(gwas_haplotypes = gwas, reference_haplotypes = ref)
# gwas = res[[1]]
# ref = res[[2]]
#
# #Write out GWAS & Ref (matched) tables
# #Note that this DOESN'T have to be done on a per Fst basis (since wont change data structure)
# write.table(gwas,sprintf("msprime_data/OOA/matched_panels/%s_GWAS_matched_to_%s_replicate_%s.csv",gwas_pop,ref_pop,file),quote = F,col.names = T, row.names = F)
# write.table(ref,sprintf("msprime_data/OOA/matched_panels/%s_Ref_matched_to_%s_replicate_%s.csv",ref_pop,gwas_pop,file),quote = F,col.names = T, row.names = F)
# #Back out the correct Fst given which population we are looking at
# nhaps_ref = nrow(ref)
# nhaps_gwas = nrow(gwas)
# nsnps = ncol(ref)
# effective_fst = as.numeric(fst)
#
#
# #Loop across to get the draws I need
# qc_matrix_count = 0
# nSamples = 500
# tol = 1
# while(qc_matrix_count<nSamples) {
# print(qc_matrix_count)
# #Draw from gamma_quantiled_weights nhaps times
# gamma_draw = rgamma(n = nhaps_ref, shape = 1/( nhaps_ref * ( effective_fst / (1-effective_fst))), scale = ( nhaps_ref * (effective_fst/(1-effective_fst))))
# #Extend into matrix
# weight_matrix = matrix(rep(gamma_draw,nsnps), ncol = nsnps)
# #Normalize matrix
# norm_weight_matrix = weight_matrix/colSums(weight_matrix)[col(weight_matrix)]
#
# ####### Remove Ascertainment Bias
# #Ask if the weight matrix we are generating will break our filtering step
# tol = 1e-100
# if (all(colSums(ref*norm_weight_matrix)) > tol & all(colSums(ref*norm_weight_matrix)) < 1-tol) {
# qc_matrix_count = qc_matrix_count + 1
# #cbind the matrix results for AF if we have not generated a good weight matrix yet
# if (qc_matrix_count == 1) {
# print(qc_matrix_count)
# AF_Inferred_Results = colSums(ref*norm_weight_matrix)
# cov = cov.wt(ref,norm_weight_matrix[,1],cor = TRUE, method = "ML")
# LD_Results = as.array(cov$cor)
# }
# else{
# AF_Inferred_Results = cbind(AF_Inferred_Results,colSums(ref*norm_weight_matrix))
# cov = cov.wt(ref,norm_weight_matrix[,1],cor = TRUE, method = "ML")
# LD_Results = abind(LD_Results,cov$cor, along = 3)
# }
# }
# }
#
# #Save object in format Fst_#_Replicate_#_Chunk_#
# saveRDS(object = LD_Results, file = sprintf("results/OOA/%s_GWAS_%s_Ref_%s_LD_Replicate_%s_Chunk_%s.RData",gwas_pop,ref_pop,fst,file,chunk), version = 2)
# saveRDS(object = AF_Inferred_Results, file = sprintf("results/OOA/%s_GWAS_%s_Ref_%s_AF_Replicate_%s_Chunk_%s.RData",gwas_pop,ref_pop,fst,file,chunk), version = 2)
# #
#Generating weights from prior
#Split into jobs in order to improve speed of the inference
library(data.table)
source("/well/mcvean/mtutert/thesis_code/thesis_code/coalescent_coverage/helper_functions.R")
file = snakemake@params$replicates
chunk = snakemake@params$chunk
divergence_time_fst_parameter = snakemake@params$paired_values #Do this across all divergence/Fst values (grid of graphs in the end)
divergence_time = strsplit(divergence_time_fst_parameter, "_")[[1]][1]
fst_parameter = strsplit(divergence_time_fst_parameter, "_")[[1]][2]
#Import the reference & gwas panels based on the divergence time and the replicate number (done in parallel through snakemake)
ref = as.matrix(fread(sprintf("msprime_data/population_split/Ref_panel_replicate_%s_split_%s.csv", file, divergence_time), header = T))
gwas = as.matrix(fread(sprintf("msprime_data/population_split/GWAS_panel_replicate_%s_split_%s.csv", file, divergence_time), header = T))
#Perform filtering (removing non-segregating and low freq SNPs)
res = msprime_gwas_sumstats(gwas_haplotypes = gwas, reference_haplotypes = ref)
gwas = res[[1]]
ref = res[[2]]
nSamples = 500
#Write out GWAS & REF (matched) tables
write.table(gwas,sprintf("msprime_data/population_split/matched_panels/GWAS_panel_replicate_%s_split_%s.csv",file,divergence_time),quote = F,col.names = T, row.names = F)
write.table(ref,sprintf("msprime_data/population_split/matched_panels/Ref_panel_replicate_%s_split_%s.csv",file,divergence_time),quote = F,col.names = T, row.names = F)
#Back out the correct Fst given divergence time
nhaps_ref = nrow(ref)
nhaps_gwas = nrow(gwas)
nsnps = ncol(ref)
effective_fst = as.numeric(fst_parameter) #Note that these values have already been backed out of the graph
#Create matrix & array to store AF and LD results
AF_Inferred_Results = matrix(data = NA, nrow = nsnps, ncol = nSamples)
LD_Results = array(data = NA, dim = c(nsnps, nsnps, nSamples))
#Loop across to get the draws I need
for (i in 1:nSamples) {
print(i)
#Draw from gamma_quantiled_weights nhaps times
gamma_draw = rgamma(n = nhaps_ref, shape = 1/( nhaps_ref * ( effective_fst / (1-effective_fst))), scale = ( nhaps_ref * (effective_fst/(1-effective_fst))))
#Extend into matrix
weight_matrix = matrix(rep(gamma_draw,nsnps), ncol = nsnps)
#Normalize matrix
norm_weight_matrix = weight_matrix/colSums(weight_matrix)[col(weight_matrix)]
AF_Inferred_Results[,i] = colSums(ref*norm_weight_matrix)
cov = cov.wt(ref,norm_weight_matrix[,1],cor = TRUE, method = "ML")
LD_Results[,,i] = cov$cor
}
#Save object in format Fst_#_Replicate_#_Chunk_#
saveRDS(object = LD_Results, file = sprintf("results/pop_split/%s_split_LD_Replicate_%s_Chunk_%s.RData", divergence_time_fst_parameter, file, chunk), version = 2)
saveRDS(object = AF_Inferred_Results, file = sprintf("results/pop_split/%s_split_AF_Replicate_%s_Chunk_%s.RData",divergence_time_fst_parameter, file, chunk), version = 2)
print("Done Drawing Weights")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combine_mgNetworks.R
\name{combine_mgNetworks}
\alias{combine_mgNetworks}
\title{Combine Mangal networks.}
\usage{
combine_mgNetworks(...)
}
\arguments{
\item{...}{objects of class \code{mgNetworksCollection} or \code{mgNetwork} or a list #' of objects of these classes.}
}
\value{
An object of class \code{mgNetworksCollection}
}
\description{
Combine \code{mgNetworksCollection} and \code{mgNetwork} objects into a
\code{mgNetworksCollection} object.
}
\examples{
mg_19 <- get_collection(search_networks(list(dataset_id = 19)))
mg_lagoon <- get_collection(search_datasets(query='lagoon\%'))
combine_mgNetworks(mg_19, mg_lagoon)
}
| /man/combine_mgNetworks.Rd | no_license | KevCaz/rmangal | R | false | true | 711 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combine_mgNetworks.R
\name{combine_mgNetworks}
\alias{combine_mgNetworks}
\title{Combine Mangal networks.}
\usage{
combine_mgNetworks(...)
}
\arguments{
\item{...}{objects of class \code{mgNetworksCollection} or \code{mgNetwork} or a list #' of objects of these classes.}
}
\value{
An object of class \code{mgNetworksCollection}
}
\description{
Combine \code{mgNetworksCollection} and \code{mgNetwork} objects into a
\code{mgNetworksCollection} object.
}
\examples{
mg_19 <- get_collection(search_networks(list(dataset_id = 19)))
mg_lagoon <- get_collection(search_datasets(query='lagoon\%'))
combine_mgNetworks(mg_19, mg_lagoon)
}
|
library(shiny)
source("functionsGraph.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
navbarPage (
"Network Graphs",
tabPanel("Point Network",
selectInput("simpleNet", "Choose Family", c("Kapoor", "Anand")),
visNetworkOutput("network1")),
tabPanel("Community Detection Graph",
selectInput("comDet", "Choose Family", c("Kapoor", "Anand")),
plotOutput("plot", height = "800px")),
tabPanel("Community Members",
sidebarLayout(
sidebarPanel(
selectInput("cm", "Choose Family", c("Kapoor", "Anand"))
),
mainPanel(
tableOutput("comMem")
)
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$network1 <- renderVisNetwork({
visNetwork(edges = get_edges(input$simpleNet), nodes = get_nodes(input$simpleNet), height = "1000px") %>%
visIgraphLayout(layout = "layout_nicely") %>%
visNodes(size = 10) %>%
visOptions(highlightNearest = list(enabled = T, hover = T),
nodesIdSelection = T)
})
output$plot <- renderPlot({
plot(communityDetection(input$comDet)[[1]], layout = communityDetection(input$comDet)[[2]])
})
output$comMem <- renderTable({
by.community(input$cm)
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /graph_app.R | no_license | aamodini/imdbBollywood | R | false | false | 1,521 | r | library(shiny)
source("functionsGraph.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
navbarPage (
"Network Graphs",
tabPanel("Point Network",
selectInput("simpleNet", "Choose Family", c("Kapoor", "Anand")),
visNetworkOutput("network1")),
tabPanel("Community Detection Graph",
selectInput("comDet", "Choose Family", c("Kapoor", "Anand")),
plotOutput("plot", height = "800px")),
tabPanel("Community Members",
sidebarLayout(
sidebarPanel(
selectInput("cm", "Choose Family", c("Kapoor", "Anand"))
),
mainPanel(
tableOutput("comMem")
)
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$network1 <- renderVisNetwork({
visNetwork(edges = get_edges(input$simpleNet), nodes = get_nodes(input$simpleNet), height = "1000px") %>%
visIgraphLayout(layout = "layout_nicely") %>%
visNodes(size = 10) %>%
visOptions(highlightNearest = list(enabled = T, hover = T),
nodesIdSelection = T)
})
output$plot <- renderPlot({
plot(communityDetection(input$comDet)[[1]], layout = communityDetection(input$comDet)[[2]])
})
output$comMem <- renderTable({
by.community(input$cm)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
hpc = read.delim('household_power_consumption.txt',sep = ';')
hpc$Date = as.Date(as.character(hpc$Date),'%d/%m/%Y')
hpc$Global_active_power = as.numeric(as.character(hpc$Global_active_power))
hpc = subset(hpc, hpc$Date >= '2007-02-01' & hpc$Date <= '2007-02-02')
DT = as.POSIXct(paste(hpc$Date,hpc$Time), format = '%Y-%m-%d %H:%M:%S')
png(filename = 'plot2.png', width = 480, height = 480)
plot(hpc$Global_active_power~DT , type = 'l', xlab = '', ylab = 'Global Active Power (kilowatts)')
dev.off()
| /Exploratory Data Analysis/plot2.R | no_license | cc1101027/ExData_Plotting1 | R | false | false | 500 | r | hpc = read.delim('household_power_consumption.txt',sep = ';')
hpc$Date = as.Date(as.character(hpc$Date),'%d/%m/%Y')
hpc$Global_active_power = as.numeric(as.character(hpc$Global_active_power))
hpc = subset(hpc, hpc$Date >= '2007-02-01' & hpc$Date <= '2007-02-02')
DT = as.POSIXct(paste(hpc$Date,hpc$Time), format = '%Y-%m-%d %H:%M:%S')
png(filename = 'plot2.png', width = 480, height = 480)
plot(hpc$Global_active_power~DT , type = 'l', xlab = '', ylab = 'Global Active Power (kilowatts)')
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strip_model.R
\name{strip_model}
\alias{strip_model}
\title{Strip Model to Reduce Memory Footprint}
\usage{
strip_model(model)
}
\arguments{
\item{model}{Object of class \code{glm} or \code{lm}}
}
\value{
Object of class \code{glm} or \code{lm}, but with some elements removed
}
\description{
Strips elements from a \code{glm} or \code{lm} object to reduce size.
Allows for printing/showing, but \code{summary} must be run before stripping
}
\examples{
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
object.size(lm.D9)
object.size(strip_model(lm.D9))
}
| /man/strip_model.Rd | no_license | muschellij2/msseg | R | false | true | 802 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strip_model.R
\name{strip_model}
\alias{strip_model}
\title{Strip Model to Reduce Memory Footprint}
\usage{
strip_model(model)
}
\arguments{
\item{model}{Object of class \code{glm} or \code{lm}}
}
\value{
Object of class \code{glm} or \code{lm}, but with some elements removed
}
\description{
Strips elements from a \code{glm} or \code{lm} object to reduce size.
Allows for printing/showing, but \code{summary} must be run before stripping
}
\examples{
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
object.size(lm.D9)
object.size(strip_model(lm.D9))
}
|
library(moments)
data=read.csv(choose.files())
metrics=data[,4:9]
sta=data.frame(index=seq(1,10))
for(i in 1:6){
temp=metrics[,i]
sta[,i+1]=c(fivenum(temp),mean(temp),skewness(temp)-3,kurtosis(temp),cor(temp,data$bug,method='pearson'),cor(temp,data$bug,method='spearman'))
print(cor.test(temp,data$bug,method='pearson'))
print(cor.test(temp,data$bug,method='spearman'))
}
sta=sta[,2:7]
names(sta)=names(metrics)
row.names(sta)=c('Min','1st Qu.','Median','3rd Qu.','Max','Mean','Skewness','Kurtosis','Pearson','Spearman')
write.csv(sta,'static.csv')
| /ex3_151278035_王洋/统计分析.R | no_license | NJUocean/Software-Metric | R | false | false | 559 | r | library(moments)
data=read.csv(choose.files())
metrics=data[,4:9]
sta=data.frame(index=seq(1,10))
for(i in 1:6){
temp=metrics[,i]
sta[,i+1]=c(fivenum(temp),mean(temp),skewness(temp)-3,kurtosis(temp),cor(temp,data$bug,method='pearson'),cor(temp,data$bug,method='spearman'))
print(cor.test(temp,data$bug,method='pearson'))
print(cor.test(temp,data$bug,method='spearman'))
}
sta=sta[,2:7]
names(sta)=names(metrics)
row.names(sta)=c('Min','1st Qu.','Median','3rd Qu.','Max','Mean','Skewness','Kurtosis','Pearson','Spearman')
write.csv(sta,'static.csv')
|
# Calculate quantiles
# Hydrologic Response Units = HRUs
library(data.table)
library(dplyr)
totS_data_yrs <- readRDS("WBEEP/cache/totS_yrs.rds")
# A resulting value of 20% means that this value of stored water
# is greater than 20% of the daily stored water values for that HRU
# Calculate quantiles based on the last 5 years of total water storage for each individual HRU.
totS_percentiles_hru <- totS_data_yrs %>%
group_by(HRU) %>%
summarize(Q00 = quantile(totS, probs = 0.0),
Q10 = quantile(totS, probs = 0.1),
Q20 = quantile(totS, probs = 0.2),
Q30 = quantile(totS, probs = 0.3),
Q40 = quantile(totS, probs = 0.4),
Q50 = quantile(totS, probs = 0.5),
Q60 = quantile(totS, probs = 0.6),
Q70 = quantile(totS, probs = 0.7),
Q80 = quantile(totS, probs = 0.8),
Q90 = quantile(totS, probs = 0.9),
Q100 = quantile(totS, probs = 1.0))
# Reshape and format quantiles labels into decimal numbers
totS_percentiles_hru_fix <- totS_percentiles_hru %>%
tidyr::gather(stat_name, stat_value, -HRU) %>%
mutate(stat_value = as.numeric(stat_value),
stat_type = as.numeric(gsub("Q", "", stat_name))/100) %>%
select(HRU, stat_name, stat_type, stat_value)
# Save quantiles
saveRDS(totS_percentiles_hru_fix, "WBEEP/cache/nhru_totS_percentiles_hru_yrs.rds")
# Group all HRUs and determine percentiles
# That way, people can look at the map and compare across regions
# A resulting value of 20% means that this value of stored water
# is greater than 20% of the daily stored water values for CONUS
totS_percentiles <- totS_data_yrs %>%
summarize(Q00 = quantile(totS, probs = 0.0),
Q10 = quantile(totS, probs = 0.1),
Q20 = quantile(totS, probs = 0.2),
Q30 = quantile(totS, probs = 0.3),
Q40 = quantile(totS, probs = 0.4),
Q50 = quantile(totS, probs = 0.5),
Q60 = quantile(totS, probs = 0.6),
Q70 = quantile(totS, probs = 0.7),
Q80 = quantile(totS, probs = 0.8),
Q90 = quantile(totS, probs = 0.9),
Q100 = quantile(totS, probs = 1.0))
# Reshape and format quantiles labels into decimal numbers
totS_percentiles_fix <- totS_percentiles %>%
tidyr::gather(stat_name, stat_value) %>% # warning here is about the "named" vector results from `quantile`
mutate(stat_value = as.numeric(stat_value),
stat_type = as.numeric(gsub("Q", "", stat_name))/100) %>%
select(stat_name, stat_type, stat_value)
# Save quantiles
saveRDS(totS_percentiles_fix, "WBEEP/cache/nhru_totS_percentiles_yrs.rds")
| /WBEEP/rscripts/create_totalstorage_example_data/4_process_totS_yrs_percentiles.R | permissive | usgs-makerspace/makerspace-sandbox | R | false | false | 2,653 | r | # Calculate quantiles
# Hydrologic Response Units = HRUs
library(data.table)
library(dplyr)
totS_data_yrs <- readRDS("WBEEP/cache/totS_yrs.rds")
# A resulting value of 20% means that this value of stored water
# is greater than 20% of the daily stored water values for that HRU
# Calculate quantiles based on the last 5 years of total water storage for each individual HRU.
totS_percentiles_hru <- totS_data_yrs %>%
group_by(HRU) %>%
summarize(Q00 = quantile(totS, probs = 0.0),
Q10 = quantile(totS, probs = 0.1),
Q20 = quantile(totS, probs = 0.2),
Q30 = quantile(totS, probs = 0.3),
Q40 = quantile(totS, probs = 0.4),
Q50 = quantile(totS, probs = 0.5),
Q60 = quantile(totS, probs = 0.6),
Q70 = quantile(totS, probs = 0.7),
Q80 = quantile(totS, probs = 0.8),
Q90 = quantile(totS, probs = 0.9),
Q100 = quantile(totS, probs = 1.0))
# Reshape and format quantiles labels into decimal numbers
totS_percentiles_hru_fix <- totS_percentiles_hru %>%
tidyr::gather(stat_name, stat_value, -HRU) %>%
mutate(stat_value = as.numeric(stat_value),
stat_type = as.numeric(gsub("Q", "", stat_name))/100) %>%
select(HRU, stat_name, stat_type, stat_value)
# Save quantiles
saveRDS(totS_percentiles_hru_fix, "WBEEP/cache/nhru_totS_percentiles_hru_yrs.rds")
# Group all HRUs and determine percentiles
# That way, people can look at the map and compare across regions
# A resulting value of 20% means that this value of stored water
# is greater than 20% of the daily stored water values for CONUS
totS_percentiles <- totS_data_yrs %>%
summarize(Q00 = quantile(totS, probs = 0.0),
Q10 = quantile(totS, probs = 0.1),
Q20 = quantile(totS, probs = 0.2),
Q30 = quantile(totS, probs = 0.3),
Q40 = quantile(totS, probs = 0.4),
Q50 = quantile(totS, probs = 0.5),
Q60 = quantile(totS, probs = 0.6),
Q70 = quantile(totS, probs = 0.7),
Q80 = quantile(totS, probs = 0.8),
Q90 = quantile(totS, probs = 0.9),
Q100 = quantile(totS, probs = 1.0))
# Reshape and format quantiles labels into decimal numbers
totS_percentiles_fix <- totS_percentiles %>%
tidyr::gather(stat_name, stat_value) %>% # warning here is about the "named" vector results from `quantile`
mutate(stat_value = as.numeric(stat_value),
stat_type = as.numeric(gsub("Q", "", stat_name))/100) %>%
select(stat_name, stat_type, stat_value)
# Save quantiles
saveRDS(totS_percentiles_fix, "WBEEP/cache/nhru_totS_percentiles_yrs.rds")
|
\name{martin}
\alias{martin}
\docType{data}
\title{
House martin count data.}
\description{
House martin nest count data. Data are from Chapter 5 of Kery and Schaub (2012), Bayeisan Population Analysis Using WinBUGS - A heirarchical perspective.
}
\usage{data("martin")}
\format{
A data frame with 20 observations on the following 2 variables.
\describe{
\item{\code{counts}}{a numeric vector}
\item{\code{year}}{a numeric vector}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(martin)
## maybe str(martin) ; plot(martin) ...
}
\keyword{datasets}
| /man/martin.Rd | no_license | chrissuthy/MT5767 | R | false | false | 767 | rd | \name{martin}
\alias{martin}
\docType{data}
\title{
House martin count data.}
\description{
House martin nest count data. Data are from Chapter 5 of Kery and Schaub (2012), Bayeisan Population Analysis Using WinBUGS - A heirarchical perspective.
}
\usage{data("martin")}
\format{
A data frame with 20 observations on the following 2 variables.
\describe{
\item{\code{counts}}{a numeric vector}
\item{\code{year}}{a numeric vector}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(martin)
## maybe str(martin) ; plot(martin) ...
}
\keyword{datasets}
|
library(Hmisc)
fname=file.choose()
stopifnot(file.exists(fname))
titlename=strsplit(basename(fname),"\\.")[[1]][1];
#alternatively you can use
#titlename=unlist(strsplit(basename(fname),"[.]"))[1]
dat=read.csv(fname,sep="\t",skip=2)
cond=dat[3:4]
cond=cond[complete.cases(cond),]
names(cond)=c("x","cond")
uv=dat[1:2]
uv=uv[complete.cases(uv),]
names(uv)=c("x","uv")
uv$uv=uv$uv/1000 #fplc unit was mAU
#uv$uv=uv$uv/max(uv$uv) #normalize
plot(uv$x,uv$uv,type="l",col="blue",lwd=2,ylim=c(0,1.2),xlim=c(0,25),xlab="Elution (ml)",ylab="Abs./Fluor.")
minor.tick(nx=5)
#library(xlsx)
#fname=file.choose()
#stopifnot(file.exists(fname))
#file <- loadWorkbook(fname)
#df1 <- readColumns(getSheets(file)[[1]], startColumn = 3, endColumn = 5, startRow = 5, endRow = 8, header = T)
winDialog("ok", paste0("Copy the plate reading for",titlename,"to clipboard!"))
# str1=readClipboard(format = 1, raw = FALSE)
# str2=gsub("OVRFLW","Inf",str1)
# writeClipboard(str2, format = 1)
f=read.csv('clipboard',sep="\t",head=F)
#this line convert sepentine to row by row.
# f[c(2,4,6,8), 1:12]=f[c(2,4,6,8), 12:1]
f=c(t(f)) #convert back to one column
f=(f-mean(f[91:95]))/(max(f)-mean(f[91:95]))*1 #normalize
#read from the chromatogram, the fraction of A1 should be 6.65 to 6.9ml.
#The tubing from UV detector to the fraction connector = 287ul.
df=data.frame(elution=0.25*(0:95)+6.25-0.287,F=f)
names(df)=c("Elution","Fluorescence")
lines(df$Elution,df$Fluorescence,type="s",col='red',lwd=2)
legend("topleft",legend=c("Abs@280nm","fluor@520nm"),bty="n",lwd=2,col=c("blue","red"))
title(titlename)
write.csv(df,paste0(titlename,"_fluor.dat"),row.names = F)
pdfname=choose.files(paste0(titlename,"_fluor.pdf"),caption = "Save to pdf?",filters = "*.pdf")
if (length(pdfname)==1) {
pdf(pdfname)
plot(uv$x,uv$uv,type="l",col="blue",lwd=2,ylim=c(0,1.3),xlim=c(0,25),xlab="Elution (ml)",ylab="Abs./Fluor.")
minor.tick(nx=5)
lines(df$Elution,df$Fluorescence,type="s",col='red',lwd=2)
legend("topleft",legend=c("Abs@280nm","Fluor.em@520"),bty="n",lwd=2,col=c("blue","red"))
title(titlename)
dev.off()
system(paste("explorer",pdfname),wait=F)
} | /GF-uv-fluor.R | no_license | YijiaXiong/scripts | R | false | false | 2,213 | r | library(Hmisc)
fname=file.choose()
stopifnot(file.exists(fname))
titlename=strsplit(basename(fname),"\\.")[[1]][1];
#alternatively you can use
#titlename=unlist(strsplit(basename(fname),"[.]"))[1]
dat=read.csv(fname,sep="\t",skip=2)
cond=dat[3:4]
cond=cond[complete.cases(cond),]
names(cond)=c("x","cond")
uv=dat[1:2]
uv=uv[complete.cases(uv),]
names(uv)=c("x","uv")
uv$uv=uv$uv/1000 #fplc unit was mAU
#uv$uv=uv$uv/max(uv$uv) #normalize
plot(uv$x,uv$uv,type="l",col="blue",lwd=2,ylim=c(0,1.2),xlim=c(0,25),xlab="Elution (ml)",ylab="Abs./Fluor.")
minor.tick(nx=5)
#library(xlsx)
#fname=file.choose()
#stopifnot(file.exists(fname))
#file <- loadWorkbook(fname)
#df1 <- readColumns(getSheets(file)[[1]], startColumn = 3, endColumn = 5, startRow = 5, endRow = 8, header = T)
winDialog("ok", paste0("Copy the plate reading for",titlename,"to clipboard!"))
# str1=readClipboard(format = 1, raw = FALSE)
# str2=gsub("OVRFLW","Inf",str1)
# writeClipboard(str2, format = 1)
f=read.csv('clipboard',sep="\t",head=F)
#this line convert sepentine to row by row.
# f[c(2,4,6,8), 1:12]=f[c(2,4,6,8), 12:1]
f=c(t(f)) #convert back to one column
f=(f-mean(f[91:95]))/(max(f)-mean(f[91:95]))*1 #normalize
#read from the chromatogram, the fraction of A1 should be 6.65 to 6.9ml.
#The tubing from UV detector to the fraction connector = 287ul.
df=data.frame(elution=0.25*(0:95)+6.25-0.287,F=f)
names(df)=c("Elution","Fluorescence")
lines(df$Elution,df$Fluorescence,type="s",col='red',lwd=2)
legend("topleft",legend=c("Abs@280nm","fluor@520nm"),bty="n",lwd=2,col=c("blue","red"))
title(titlename)
write.csv(df,paste0(titlename,"_fluor.dat"),row.names = F)
pdfname=choose.files(paste0(titlename,"_fluor.pdf"),caption = "Save to pdf?",filters = "*.pdf")
if (length(pdfname)==1) {
pdf(pdfname)
plot(uv$x,uv$uv,type="l",col="blue",lwd=2,ylim=c(0,1.3),xlim=c(0,25),xlab="Elution (ml)",ylab="Abs./Fluor.")
minor.tick(nx=5)
lines(df$Elution,df$Fluorescence,type="s",col='red',lwd=2)
legend("topleft",legend=c("Abs@280nm","Fluor.em@520"),bty="n",lwd=2,col=c("blue","red"))
title(titlename)
dev.off()
system(paste("explorer",pdfname),wait=F)
} |
library(ggplot2)
set.seed(26061993)
# Exercise a)
tau = 0.4
p = 0.9
x = c(sample(c(0,1),1),rep(NA,249))
y = c(x[1] + rnorm(1,0,tau), rep(NA,249))
for (i in 2:250){
if (runif(1)<p){
x[i] = x[i-1]
}
else{x[i] = 1-x[i-1]}
y[i] = x[i] + rnorm(1,0,tau)
}
df = data.frame(x = x, y = y)
ggplot(df, aes(x = 1:250, y = x)) + geom_line() + labs(x = "i", y = "x", title = "Generated sample for x") + theme_classic(base_size = 19)
ggplot(df, aes(x = 1:250, y = y)) + geom_point() + labs(x = "i", y = "y", title = "Generated sample for y") + theme_classic(base_size = 19)
# Exercise b)
likelihood <- function(parameters){
p = parameters[1]
tau = parameters[2]
# First initiate normality constants and forward probabilities
norm_const = c(1/(dnorm(y[1],0,tau)*0.5+dnorm(y[1],1,tau)*0.5), rep(NA,249))
forward_prob = matrix(0,nrow=250,ncol = 2)
forward_prob[1,1] = norm_const[1]*0.5*dnorm(y[1],0,tau)
forward_prob[1,2] = norm_const[1]*0.5*dnorm(y[1],1,tau)
transition = matrix(NA, nrow = 250, ncol = 4)
# Iterate to find constants and probabilities
for (t in 2:250){
norm_const[t] = 1/(dnorm(y[t],0,tau)*p*forward_prob[t-1,1] + (1-p)*dnorm(y[t],0,tau)*forward_prob[t-1,2] + dnorm(y[t],1,tau)*(1-p)*forward_prob[t-1,1] + dnorm(y[t],1,tau)*p*forward_prob[t-1,2])
transition[t,1] = p*dnorm(y[t],0,tau)*forward_prob[t-1,1]*norm_const[t]
transition[t,2] = (1-p)*dnorm(y[t],1,tau)*forward_prob[t-1,1]*norm_const[t]
transition[t,3] = (1-p)*dnorm(y[t],0,tau)*forward_prob[t-1,2]*norm_const[t]
transition[t,4] = p*dnorm(y[t],1,tau)*forward_prob[t-1,2]*norm_const[t]
forward_prob[t,1] = transition[t,1] + transition[t,3]
forward_prob[t,2] = transition[t,2] + transition[t,4]
}
return(log(1/prod(norm_const)))
}
neg_likelihood <- function(parameters){
return(-likelihood(parameters))
}
grid_points = 50
p_values = rep(seq(0, 1, length.out = grid_points),grid_points)
tau_values = rep(seq(0.1, 1, length.out = grid_points),each = grid_points)
test = matrix(c(p_values, tau_values), ncol = 2, byrow = F)
likelihood_values = matrix(apply(test,1,likelihood), ncol= grid_points)
x_axis = unique(p_values)
y_axis = unique(tau_values)
plot_data <- list(x_axis, y_axis, likelihood_values)
names(plot_data) <- c("x","y","z")
contour(plot_data, drawlabels = F)
optimal = optim(c(0.5,0.5), neg_likelihood)
dfll = data.frame(x = p_values, y = tau_values, llik = as.vector(likelihood_values))
dfll_dot = data.frame(x = optimal$par[1], y = optimal$par[2])
ggplot() + geom_contour(data = dfll, aes(x = x, y = y, z = llik, col = "Loglikelihood"), show.legend = T) +
geom_point(data = dfll_dot, aes(x = x, y = y,col = "Computed optimal"), size = 4, alpha = 0.7) + labs(x = "p", y = "tau", title = "Loglikelihood of y, as function of p and tau") + theme_classic()
# Exercise c)
p_est = optimal$par[1]
tau_est = optimal$par[2]
# First initiate normality constants and forward probabilities
norm_const = c(1/(dnorm(y[1],0,tau_est)*0.5+dnorm(y[1],1,tau_est)*0.5), rep(NA,249))
forward_prob = matrix(0,nrow=250,ncol = 2)
forward_prob[1,1] = norm_const[1]*0.5*dnorm(y[1],0,tau_est)
forward_prob[1,2] = norm_const[1]*0.5*dnorm(y[1],1,tau_est)
transition = matrix(NA, nrow = 250, ncol = 4)
# Iterate to find constants and probabilities
for (t in 2:250){
norm_const[t] = 1/(dnorm(y[t],0,tau_est)*p_est*forward_prob[t-1,1] + (1-p_est)*dnorm(y[t],0,tau_est)*forward_prob[t-1,2] + dnorm(y[t],1,tau_est)*(1-p_est)*forward_prob[t-1,1] + dnorm(y[t],1,tau_est)*p_est*forward_prob[t-1,2])
transition[t,1] = p_est*dnorm(y[t],0,tau_est)*forward_prob[t-1,1]*norm_const[t]
transition[t,2] = (1-p_est)*dnorm(y[t],1,tau_est)*forward_prob[t-1,1]*norm_const[t]
transition[t,3] = (1-p_est)*dnorm(y[t],0,tau_est)*forward_prob[t-1,2]*norm_const[t]
transition[t,4] = p_est*dnorm(y[t],1,tau_est)*forward_prob[t-1,2]*norm_const[t]
forward_prob[t,1] = transition[t,1] + transition[t,3]
forward_prob[t,2] = transition[t,2] + transition[t,4]
}
# Initiate
backward_prob = matrix(0,nrow = 250, ncol = 2)
backward_prob[250,1] = forward_prob[250,1]
backward_prob[250,2] = forward_prob[250,2]
back_transition = matrix(NA, ncol = 4, nrow = 250)
propagation = matrix(NA, ncol = 4, nrow = 250)
for (t in 250:2){
back_transition[t,1] = transition[t,1]/forward_prob[t,1]*backward_prob[t,1]
back_transition[t,2] = transition[t,2]/forward_prob[t,2]*backward_prob[t,2]
back_transition[t,3] = transition[t,3]/forward_prob[t,1]*backward_prob[t,1]
back_transition[t,4] = transition[t,4]/forward_prob[t,2]*backward_prob[t,2]
backward_prob[t-1,1] = back_transition[t,1] + back_transition[t,2]
backward_prob[t-1,2] = back_transition[t,3] + back_transition[t,4]
propagation[t,1] = back_transition[t,1]/backward_prob[t-1,1]
propagation[t,2] = back_transition[t,2]/backward_prob[t-1,1]
propagation[t,3] = back_transition[t,3]/backward_prob[t-1,2]
propagation[t,4] = back_transition[t,4]/backward_prob[t-1,2]
}
df2 = data.frame(prob_0 = backward_prob[,1], prob_1 = backward_prob[,2])
ggplot(df2, aes(x = 1:250, y = prob_1)) + geom_line() + labs(x = "i", y = "prob x_i = 1", title = "Computed marginal probabilities for x_i = 1") + theme_classic(base_size = 19)
x_est = rep(NA, 250)
x_est[1] = ifelse(runif(1)<backward_prob[1,2], 1, 0)
for (t in 2:250){
x_est[t] = ifelse(runif(1) < propagation[t,as.integer(2*(1+x_est[t-1]))], 1, 0)
}
df3 = data.frame(x = x_est)
ggplot(df3, aes(x = 1:250, y = x)) + geom_line() + labs(x = "i", y = "x", title = "Simulated values for x based on FB-algorithm") + theme_classic(base_size = 19)
# Exercise d)
df4 = data.frame(markov = round(backward_prob[,2]), indep = ifelse(y<0.5, 0, 1))
ggplot(df4, aes(x = 1:250)) + geom_line(aes(y = markov)) + labs(x = "i", y = "x", title = "Predicted values for x based on Markov property") + theme_classic(base_size = 19)
ggplot(df4, aes(x = 1:250)) + geom_line(aes(y = indep)) + labs(x = "i",y = "x", title = "Predicted values for x based on independence ") + theme_classic(base_size = 19)
df2 = data.frame(prob_0 = backward_prob[,1], prob_1 = backward_prob[,2], prob_1_indep = dnorm(y,1,tau)/(dnorm(y,1,tau)+dnorm(y,0,tau)))
ggplot(df2, aes(x = 1:250)) + geom_line(size = 1.2, aes(y = prob_1, col = "Markov")) + geom_line(size = 0.1, aes(y = prob_1_indep, col = "Independent")) + labs(x = "i", y = "prob x_i = 1", title = "Probability of x_i = 1 for Markov and independence assumptions") + theme_classic()
| /Project 2 - code.R | no_license | henrisli/MA8001-Project-2 | R | false | false | 6,414 | r | library(ggplot2)
set.seed(26061993)
# Exercise a)
tau = 0.4
p = 0.9
x = c(sample(c(0,1),1),rep(NA,249))
y = c(x[1] + rnorm(1,0,tau), rep(NA,249))
for (i in 2:250){
if (runif(1)<p){
x[i] = x[i-1]
}
else{x[i] = 1-x[i-1]}
y[i] = x[i] + rnorm(1,0,tau)
}
df = data.frame(x = x, y = y)
ggplot(df, aes(x = 1:250, y = x)) + geom_line() + labs(x = "i", y = "x", title = "Generated sample for x") + theme_classic(base_size = 19)
ggplot(df, aes(x = 1:250, y = y)) + geom_point() + labs(x = "i", y = "y", title = "Generated sample for y") + theme_classic(base_size = 19)
# Exercise b)
likelihood <- function(parameters){
p = parameters[1]
tau = parameters[2]
# First initiate normality constants and forward probabilities
norm_const = c(1/(dnorm(y[1],0,tau)*0.5+dnorm(y[1],1,tau)*0.5), rep(NA,249))
forward_prob = matrix(0,nrow=250,ncol = 2)
forward_prob[1,1] = norm_const[1]*0.5*dnorm(y[1],0,tau)
forward_prob[1,2] = norm_const[1]*0.5*dnorm(y[1],1,tau)
transition = matrix(NA, nrow = 250, ncol = 4)
# Iterate to find constants and probabilities
for (t in 2:250){
norm_const[t] = 1/(dnorm(y[t],0,tau)*p*forward_prob[t-1,1] + (1-p)*dnorm(y[t],0,tau)*forward_prob[t-1,2] + dnorm(y[t],1,tau)*(1-p)*forward_prob[t-1,1] + dnorm(y[t],1,tau)*p*forward_prob[t-1,2])
transition[t,1] = p*dnorm(y[t],0,tau)*forward_prob[t-1,1]*norm_const[t]
transition[t,2] = (1-p)*dnorm(y[t],1,tau)*forward_prob[t-1,1]*norm_const[t]
transition[t,3] = (1-p)*dnorm(y[t],0,tau)*forward_prob[t-1,2]*norm_const[t]
transition[t,4] = p*dnorm(y[t],1,tau)*forward_prob[t-1,2]*norm_const[t]
forward_prob[t,1] = transition[t,1] + transition[t,3]
forward_prob[t,2] = transition[t,2] + transition[t,4]
}
return(log(1/prod(norm_const)))
}
neg_likelihood <- function(parameters){
return(-likelihood(parameters))
}
grid_points = 50
p_values = rep(seq(0, 1, length.out = grid_points),grid_points)
tau_values = rep(seq(0.1, 1, length.out = grid_points),each = grid_points)
test = matrix(c(p_values, tau_values), ncol = 2, byrow = F)
likelihood_values = matrix(apply(test,1,likelihood), ncol= grid_points)
x_axis = unique(p_values)
y_axis = unique(tau_values)
plot_data <- list(x_axis, y_axis, likelihood_values)
names(plot_data) <- c("x","y","z")
contour(plot_data, drawlabels = F)
optimal = optim(c(0.5,0.5), neg_likelihood)
dfll = data.frame(x = p_values, y = tau_values, llik = as.vector(likelihood_values))
dfll_dot = data.frame(x = optimal$par[1], y = optimal$par[2])
ggplot() + geom_contour(data = dfll, aes(x = x, y = y, z = llik, col = "Loglikelihood"), show.legend = T) +
geom_point(data = dfll_dot, aes(x = x, y = y,col = "Computed optimal"), size = 4, alpha = 0.7) + labs(x = "p", y = "tau", title = "Loglikelihood of y, as function of p and tau") + theme_classic()
# Exercise c)
p_est = optimal$par[1]
tau_est = optimal$par[2]
# First initiate normality constants and forward probabilities
norm_const = c(1/(dnorm(y[1],0,tau_est)*0.5+dnorm(y[1],1,tau_est)*0.5), rep(NA,249))
forward_prob = matrix(0,nrow=250,ncol = 2)
forward_prob[1,1] = norm_const[1]*0.5*dnorm(y[1],0,tau_est)
forward_prob[1,2] = norm_const[1]*0.5*dnorm(y[1],1,tau_est)
transition = matrix(NA, nrow = 250, ncol = 4)
# Iterate to find constants and probabilities
for (t in 2:250){
norm_const[t] = 1/(dnorm(y[t],0,tau_est)*p_est*forward_prob[t-1,1] + (1-p_est)*dnorm(y[t],0,tau_est)*forward_prob[t-1,2] + dnorm(y[t],1,tau_est)*(1-p_est)*forward_prob[t-1,1] + dnorm(y[t],1,tau_est)*p_est*forward_prob[t-1,2])
transition[t,1] = p_est*dnorm(y[t],0,tau_est)*forward_prob[t-1,1]*norm_const[t]
transition[t,2] = (1-p_est)*dnorm(y[t],1,tau_est)*forward_prob[t-1,1]*norm_const[t]
transition[t,3] = (1-p_est)*dnorm(y[t],0,tau_est)*forward_prob[t-1,2]*norm_const[t]
transition[t,4] = p_est*dnorm(y[t],1,tau_est)*forward_prob[t-1,2]*norm_const[t]
forward_prob[t,1] = transition[t,1] + transition[t,3]
forward_prob[t,2] = transition[t,2] + transition[t,4]
}
# Initiate
backward_prob = matrix(0,nrow = 250, ncol = 2)
backward_prob[250,1] = forward_prob[250,1]
backward_prob[250,2] = forward_prob[250,2]
back_transition = matrix(NA, ncol = 4, nrow = 250)
propagation = matrix(NA, ncol = 4, nrow = 250)
for (t in 250:2){
back_transition[t,1] = transition[t,1]/forward_prob[t,1]*backward_prob[t,1]
back_transition[t,2] = transition[t,2]/forward_prob[t,2]*backward_prob[t,2]
back_transition[t,3] = transition[t,3]/forward_prob[t,1]*backward_prob[t,1]
back_transition[t,4] = transition[t,4]/forward_prob[t,2]*backward_prob[t,2]
backward_prob[t-1,1] = back_transition[t,1] + back_transition[t,2]
backward_prob[t-1,2] = back_transition[t,3] + back_transition[t,4]
propagation[t,1] = back_transition[t,1]/backward_prob[t-1,1]
propagation[t,2] = back_transition[t,2]/backward_prob[t-1,1]
propagation[t,3] = back_transition[t,3]/backward_prob[t-1,2]
propagation[t,4] = back_transition[t,4]/backward_prob[t-1,2]
}
df2 = data.frame(prob_0 = backward_prob[,1], prob_1 = backward_prob[,2])
ggplot(df2, aes(x = 1:250, y = prob_1)) + geom_line() + labs(x = "i", y = "prob x_i = 1", title = "Computed marginal probabilities for x_i = 1") + theme_classic(base_size = 19)
x_est = rep(NA, 250)
x_est[1] = ifelse(runif(1)<backward_prob[1,2], 1, 0)
for (t in 2:250){
x_est[t] = ifelse(runif(1) < propagation[t,as.integer(2*(1+x_est[t-1]))], 1, 0)
}
df3 = data.frame(x = x_est)
ggplot(df3, aes(x = 1:250, y = x)) + geom_line() + labs(x = "i", y = "x", title = "Simulated values for x based on FB-algorithm") + theme_classic(base_size = 19)
# Exercise d)
df4 = data.frame(markov = round(backward_prob[,2]), indep = ifelse(y<0.5, 0, 1))
ggplot(df4, aes(x = 1:250)) + geom_line(aes(y = markov)) + labs(x = "i", y = "x", title = "Predicted values for x based on Markov property") + theme_classic(base_size = 19)
ggplot(df4, aes(x = 1:250)) + geom_line(aes(y = indep)) + labs(x = "i",y = "x", title = "Predicted values for x based on independence ") + theme_classic(base_size = 19)
df2 = data.frame(prob_0 = backward_prob[,1], prob_1 = backward_prob[,2], prob_1_indep = dnorm(y,1,tau)/(dnorm(y,1,tau)+dnorm(y,0,tau)))
ggplot(df2, aes(x = 1:250)) + geom_line(size = 1.2, aes(y = prob_1, col = "Markov")) + geom_line(size = 0.1, aes(y = prob_1_indep, col = "Independent")) + labs(x = "i", y = "prob x_i = 1", title = "Probability of x_i = 1 for Markov and independence assumptions") + theme_classic()
|
###################################################
### chunk number 1: eval=FALSE
###################################################
## desktop=getwd()
## options(repos="http://cran.us.r-project.org")
## install.packages("plotrix",destdir=desktop,lib=desktop)
## library(plotrix,lib=desktop)
## install.packages("gplots",destdir=desktop,lib=desktop)
## install.packages("gtools",destdir=desktop,lib=desktop)
## install.packages("gdata",destdir=desktop,lib=desktop)
## library(gtools,lib=desktop)
## library(gdata,lib=desktop)
## library(gplots,lib=desktop)
###################################################
### chunk number 2:
###################################################
timevec1 = c("11:00:00","11:25:30","15:30:20")
times1 = times(timevec1)
###################################################
### chunk number 3:
###################################################
set.seed(1001)
mydata = data.frame(indiv=rep(1:3,c(3,4,5)),
sex=factor(c(rep("F",7),rep("M",5))),
day=c(1:3,1:4,1:5),dist=runif(12))
###################################################
### chunk number 4:
###################################################
r1 = reshape(mydata,direction="wide",idvar="indiv",timevar="day",
v.names="dist"); r1
###################################################
### chunk number 5:
###################################################
table(r1$sex)
###################################################
### chunk number 6:
###################################################
splitdata = split.data.frame(mydata,mydata$indiv)
firstlines = lapply(splitdata,function(x)x[1,])
recombined = do.call("rbind",firstlines)
| /website/miscR.R | no_license | michaelfrancenelson/Bolker_2008_walkthrough | R | false | false | 1,653 | r | ###################################################
### chunk number 1: eval=FALSE
###################################################
## desktop=getwd()
## options(repos="http://cran.us.r-project.org")
## install.packages("plotrix",destdir=desktop,lib=desktop)
## library(plotrix,lib=desktop)
## install.packages("gplots",destdir=desktop,lib=desktop)
## install.packages("gtools",destdir=desktop,lib=desktop)
## install.packages("gdata",destdir=desktop,lib=desktop)
## library(gtools,lib=desktop)
## library(gdata,lib=desktop)
## library(gplots,lib=desktop)
###################################################
### chunk number 2:
###################################################
timevec1 = c("11:00:00","11:25:30","15:30:20")
times1 = times(timevec1)
###################################################
### chunk number 3:
###################################################
set.seed(1001)
mydata = data.frame(indiv=rep(1:3,c(3,4,5)),
sex=factor(c(rep("F",7),rep("M",5))),
day=c(1:3,1:4,1:5),dist=runif(12))
###################################################
### chunk number 4:
###################################################
r1 = reshape(mydata,direction="wide",idvar="indiv",timevar="day",
v.names="dist"); r1
###################################################
### chunk number 5:
###################################################
table(r1$sex)
###################################################
### chunk number 6:
###################################################
splitdata = split.data.frame(mydata,mydata$indiv)
firstlines = lapply(splitdata,function(x)x[1,])
recombined = do.call("rbind",firstlines)
|
## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
| /cachematrix.R | no_license | dumiduattanayake/ProgrammingAssignment2 | R | false | false | 1,548 | r | ## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
}
|
library(GFPLVCM) ### dependencies: MASS, fda, stats
n=100 ## sample size
pois_par=10 ## parameter for the Poission distribution to generate the observation times
order_1=4; order_2=4 ## order of the B-splines in the s and u directions
breaks=c(0,.2, .4, .6, .8, 1) ## knots of the B-splines
###########################################################
u_time=200; ## observation times for the functional variable in the u direction
len_k=50 ; ## number of basis functions to generate the functional parameter
gamma_real=0.3; ## true value of gamma
B = 4 ## signal of the functional parameter
pre_n <- 200 #### sample size of prediction
boot_R <- 1000 ### times for bootstrap
grid <- u_time
lambda_range <- seq(n^(-2), n^(-0.4), length=10) ## range of the tuning parameter lambda
h_range <- seq(n^(-1), n^(-0.2), length=10) ## range of the tuning parameter bandwidth
data <- Gen_data(n, pois_par, u_time, gamma_real, len_k, B) ### generate the data
para <- tuning_parameter_selection( lambda_range, h_range, data$y, data$x, data$z, order_1, order_2, breaks, pois_par, grid)
final_beta <- parameter_estimate_prediction(data$y, data$x, data$z, para$final_lambda, para$final_lambda, para$final_bd, order_1, order_2, breaks, pois_par,len_k, gamma_real, pre_n, B)
boot <- parameter_estimate_boot(data$y, data$x, data$z, para$final_lambda, para$final_lambda, para$final_bd, order_1, order_2, breaks, boot_R, final_beta$gamma_est, final_beta$b_est)
## do estimation, prediction and testing
| /Simulation_demo.R | no_license | BIG-S2/GFPLVCM | R | false | false | 1,552 | r | library(GFPLVCM) ### dependencies: MASS, fda, stats
n=100 ## sample size
pois_par=10 ## parameter for the Poission distribution to generate the observation times
order_1=4; order_2=4 ## order of the B-splines in the s and u directions
breaks=c(0,.2, .4, .6, .8, 1) ## knots of the B-splines
###########################################################
u_time=200; ## observation times for the functional variable in the u direction
len_k=50 ; ## number of basis functions to generate the functional parameter
gamma_real=0.3; ## true value of gamma
B = 4 ## signal of the functional parameter
pre_n <- 200 #### sample size of prediction
boot_R <- 1000 ### times for bootstrap
grid <- u_time
lambda_range <- seq(n^(-2), n^(-0.4), length=10) ## range of the tuning parameter lambda
h_range <- seq(n^(-1), n^(-0.2), length=10) ## range of the tuning parameter bandwidth
data <- Gen_data(n, pois_par, u_time, gamma_real, len_k, B) ### generate the data
para <- tuning_parameter_selection( lambda_range, h_range, data$y, data$x, data$z, order_1, order_2, breaks, pois_par, grid)
final_beta <- parameter_estimate_prediction(data$y, data$x, data$z, para$final_lambda, para$final_lambda, para$final_bd, order_1, order_2, breaks, pois_par,len_k, gamma_real, pre_n, B)
boot <- parameter_estimate_boot(data$y, data$x, data$z, para$final_lambda, para$final_lambda, para$final_bd, order_1, order_2, breaks, boot_R, final_beta$gamma_est, final_beta$b_est)
## do estimation, prediction and testing
|
# params_mlogit_list() ---------------------------------------------------------
#' Parameters of a list of multinomial logit models
#'
#' Create a list containing the parameters of multiple fitted multinomial logit models.
#' Can be used to parameterize state transitions in a discrete time transition model
#' by passing to the `params` field of a [`CohortDtstmTrans`] object.
#' @param ... Objects of class [`params_mlogit`], which can be named.
#'
#' @return An object of class `params_mlogit_list`, which is a list containing
#' [`params_mlogit`] objects.
#' @examples
#' # Consider a sick-sicker model
#'
#' params <- params_mlogit_list(
#' ## Transitions from sick state (sick -> sicker, sick -> death)
#' sick = params_mlogit(
#' coefs = list(
#' sicker = data.frame(
#' intercept = c(-0.33, -.2),
#' treat = c(log(.75), log(.8))
#' ),
#' death = data.frame(
#' intercept = c(-1, -1.2),
#' treat = c(log(.6), log(.65))
#' )
#' )
#' ),
#'
#' ## Transitions from sicker state (sicker -> death)
#' sicker = params_mlogit(
#' coefs = list(
#' death = data.frame(
#' intercept = c(-1.5, -1.4),
#' treat = c(log(.5), log(.55))
#' )
#' )
#' )
#' )
#' summary(params)
#' params
#'
#' @seealso [summary.params_mlogit_list()], [params_mlogit()], [`CohortDtstmTrans`]
#' @export
params_mlogit_list <- function(...){
p <- new_params_list(..., inner_class = "params_mlogit",
new_class = "params_mlogit_list")
check_params_list(p)
}
# summary.params_surv_list() ---------------------------------------------------
#' @rdname summary.params
#' @export
summary.params_mlogit_list <- function(object, probs = c(.025, .975), ...) {
summary_params_list(object, probs, idcol = "from", ...)
}
# print.params_mlogit_list() ---------------------------------------------------
#' @export
print.params_mlogit_list <- function(x, ...) {
cat("A \"params_mlogit_list\" object\n\n")
cat("Summary of coefficients:\n")
print(summary(x))
cat("\n")
cat(paste0("Number of parameter samples: ", x[[1]]$n_samples))
cat("\n")
cat(paste0("Number of starting (non-absorbing) states: ", length(x)))
cat("\n")
cat("Number of transitions by starting state:",
sapply(x, function(z) dim(z$coef)[3]))
invisible(x)
}
# create_params.multinom_list() ------------------------------------------------
#' @export
#' @rdname create_params
create_params.multinom_list <- function(object, n = 1000, uncertainty = c("normal", "none"), ...){
return(create_params_list(object, n = n, uncertainty = uncertainty,
inner_class = "params_mlogit", new_class = "params_mlogit_list",
...))
} | /R/params_mlogit_list.R | no_license | jeff-m-sullivan/hesim | R | false | false | 2,787 | r | # params_mlogit_list() ---------------------------------------------------------
#' Parameters of a list of multinomial logit models
#'
#' Create a list containing the parameters of multiple fitted multinomial logit models.
#' Can be used to parameterize state transitions in a discrete time transition model
#' by passing to the `params` field of a [`CohortDtstmTrans`] object.
#' @param ... Objects of class [`params_mlogit`], which can be named.
#'
#' @return An object of class `params_mlogit_list`, which is a list containing
#' [`params_mlogit`] objects.
#' @examples
#' # Consider a sick-sicker model
#'
#' params <- params_mlogit_list(
#' ## Transitions from sick state (sick -> sicker, sick -> death)
#' sick = params_mlogit(
#' coefs = list(
#' sicker = data.frame(
#' intercept = c(-0.33, -.2),
#' treat = c(log(.75), log(.8))
#' ),
#' death = data.frame(
#' intercept = c(-1, -1.2),
#' treat = c(log(.6), log(.65))
#' )
#' )
#' ),
#'
#' ## Transitions from sicker state (sicker -> death)
#' sicker = params_mlogit(
#' coefs = list(
#' death = data.frame(
#' intercept = c(-1.5, -1.4),
#' treat = c(log(.5), log(.55))
#' )
#' )
#' )
#' )
#' summary(params)
#' params
#'
#' @seealso [summary.params_mlogit_list()], [params_mlogit()], [`CohortDtstmTrans`]
#' @export
params_mlogit_list <- function(...){
p <- new_params_list(..., inner_class = "params_mlogit",
new_class = "params_mlogit_list")
check_params_list(p)
}
# summary.params_surv_list() ---------------------------------------------------
#' @rdname summary.params
#' @export
summary.params_mlogit_list <- function(object, probs = c(.025, .975), ...) {
summary_params_list(object, probs, idcol = "from", ...)
}
# print.params_mlogit_list() ---------------------------------------------------
#' @export
print.params_mlogit_list <- function(x, ...) {
cat("A \"params_mlogit_list\" object\n\n")
cat("Summary of coefficients:\n")
print(summary(x))
cat("\n")
cat(paste0("Number of parameter samples: ", x[[1]]$n_samples))
cat("\n")
cat(paste0("Number of starting (non-absorbing) states: ", length(x)))
cat("\n")
cat("Number of transitions by starting state:",
sapply(x, function(z) dim(z$coef)[3]))
invisible(x)
}
# create_params.multinom_list() ------------------------------------------------
#' @export
#' @rdname create_params
create_params.multinom_list <- function(object, n = 1000, uncertainty = c("normal", "none"), ...){
return(create_params_list(object, n = n, uncertainty = uncertainty,
inner_class = "params_mlogit", new_class = "params_mlogit_list",
...))
} |
# Reading data and put them into an array:
data<-read.table("household_power_consumption.txt", sep=";", header=T, na.strings = "?")
# Select a portion of the data that satisfies the dates below:
small.data <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
# A useful object for dates:
date.set <- strptime(paste(small.data$Date, small.data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# Plot the data:
png("plot2.png", width=480, height=480)
plot(date.set,
as.numeric(small.data$Global_active_power) ,
type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
| /plot2.R | no_license | choboroku/ExData_Plotting1 | R | false | false | 583 | r |
# Reading data and put them into an array:
data<-read.table("household_power_consumption.txt", sep=";", header=T, na.strings = "?")
# Select a portion of the data that satisfies the dates below:
small.data <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
# A useful object for dates:
date.set <- strptime(paste(small.data$Date, small.data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# Plot the data:
png("plot2.png", width=480, height=480)
plot(date.set,
as.numeric(small.data$Global_active_power) ,
type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
rm(list=ls())
# on linux: sudo apt-get install r-cran-rjava to get Rcpp, JDK working (needed for these libraries)
library(igraph)
library(stringr)
library(openxlsx)
# modify your path as appropriate
# note that this a slow/inefficient function - CSV or similar format would be better
data <- read.xlsx("/home/rbshaffer/Downloads/globalterrorismdb_0616dist.xlsx")
# visualize attacks from top N groups
n <- 25
ordered <- unique(data$gname)
prevalence <- sapply(ordered, function(x){sum(data$gname == x)})
ordered <- ordered[order(prevalence, decreasing = T)]
edgelist <- as.matrix(data[data$gname %in% ordered[2:(n+1)],c('gname', 'country_txt')])
edgelist <- edgelist[!edgelist[,1] %in% c('Unaffiliated Individual(s)'),]
# shorten some names
to_shorten <- grep('\\(', edgelist[,1])
edgelist[to_shorten, 1] <- str_match(edgelist[to_shorten, 1], '\\(([-A-Za-z0-9 ]+)\\)')[,2]
# collapse to a weighted graph of unique entries (could imagine separating by year, decade, etc)
weighted_edges <- unique(edgelist)
igraph_obj <- graph_from_edgelist(weighted_edges, directed=F)
# add a few attributes
V(igraph_obj)$type <- V(igraph_obj)$name %in% edgelist[,1]
E(igraph_obj)$weight <- apply(weighted_edges, 1, function(x){sum(edgelist == x)})
# collapsing the two-mode terrorism network to two one-mode networks
bp_terror_adj <- t(as_incidence_matrix(igraph_obj)) %*% (as_incidence_matrix(igraph_obj))
bp_country_adj <- (as_incidence_matrix(igraph_obj)) %*% t(as_incidence_matrix(igraph_obj))
# generate a version with the diagonals zeroed out (for visual purposes) and one without
bp_terror_nodiag <- bp_terror_adj
bp_country_nodiag <- bp_country_adj
diag(bp_terror_nodiag) <- 0
diag(bp_country_nodiag) <- 0
bp_terror <- graph_from_adjacency_matrix(bp_terror_adj, weighted=T)
bp_country <- graph_from_adjacency_matrix(bp_country_adj, weighted=T)
bp_terror_nd <- graph_from_adjacency_matrix(bp_terror_nodiag, weighted=T)
bp_country_nd <- graph_from_adjacency_matrix(bp_country_nodiag, weighted=T)
# normalize weights for visual purposes (no-diagonal graph only)
edge_weight <- 10
E(bp_terror_nd)$weight <- edge_weight*E(bp_terror_nd)$weight/max(E(bp_terror_nd)$weight)
E(bp_country_nd)$weight <- edge_weight*E(bp_country_nd)$weight/max(E(bp_country_nd)$weight)
plot(bp_terror_nd,
vertex.size=5,
vertex.frame.color='white',
layout=layout_with_fr(bp_terror),
margin=-.1,
edge.width=E(bp_terror)$weight,
edge.arrow.size=0)
plot(bp_country_nd,
vertex.size=5,
vertex.frame.color='white',
layout=layout_with_fr(bp_country),
margin=-.1,
edge.width=E(bp_country)$weight,
edge.arrow.size=0)
ggplot(NULL) + geom_bar(aes(x=names(eigen_centrality(bp_terror)$vector), y=eigen_centrality(bp_terror)$vector), stat='identity') +
xlab('Centrality') + ylab('Group') + theme_minimal() + theme(axis.text.x=element_text(angle=45, hjust=1))
ggplot(NULL) + geom_bar(aes(x=names(eigen_centrality(bp_country)$vector), y=eigen_centrality(bp_country)$vector), stat='identity') +
xlab('Centrality') + ylab('Group') + theme_minimal() + theme(axis.text.x=element_text(angle=45, hjust=1))
| /resources/terrorism_example/02_bipartiteprojection_centrality.R | permissive | rbshaffer/narg | R | false | false | 3,135 | r | rm(list=ls())
# on linux: sudo apt-get install r-cran-rjava to get Rcpp, JDK working (needed for these libraries)
library(igraph)
library(stringr)
library(openxlsx)
# modify your path as appropriate
# note that this a slow/inefficient function - CSV or similar format would be better
data <- read.xlsx("/home/rbshaffer/Downloads/globalterrorismdb_0616dist.xlsx")
# visualize attacks from top N groups
n <- 25
ordered <- unique(data$gname)
prevalence <- sapply(ordered, function(x){sum(data$gname == x)})
ordered <- ordered[order(prevalence, decreasing = T)]
edgelist <- as.matrix(data[data$gname %in% ordered[2:(n+1)],c('gname', 'country_txt')])
edgelist <- edgelist[!edgelist[,1] %in% c('Unaffiliated Individual(s)'),]
# shorten some names
to_shorten <- grep('\\(', edgelist[,1])
edgelist[to_shorten, 1] <- str_match(edgelist[to_shorten, 1], '\\(([-A-Za-z0-9 ]+)\\)')[,2]
# collapse to a weighted graph of unique entries (could imagine separating by year, decade, etc)
weighted_edges <- unique(edgelist)
igraph_obj <- graph_from_edgelist(weighted_edges, directed=F)
# add a few attributes
V(igraph_obj)$type <- V(igraph_obj)$name %in% edgelist[,1]
E(igraph_obj)$weight <- apply(weighted_edges, 1, function(x){sum(edgelist == x)})
# collapsing the two-mode terrorism network to two one-mode networks
bp_terror_adj <- t(as_incidence_matrix(igraph_obj)) %*% (as_incidence_matrix(igraph_obj))
bp_country_adj <- (as_incidence_matrix(igraph_obj)) %*% t(as_incidence_matrix(igraph_obj))
# generate a version with the diagonals zeroed out (for visual purposes) and one without
bp_terror_nodiag <- bp_terror_adj
bp_country_nodiag <- bp_country_adj
diag(bp_terror_nodiag) <- 0
diag(bp_country_nodiag) <- 0
bp_terror <- graph_from_adjacency_matrix(bp_terror_adj, weighted=T)
bp_country <- graph_from_adjacency_matrix(bp_country_adj, weighted=T)
bp_terror_nd <- graph_from_adjacency_matrix(bp_terror_nodiag, weighted=T)
bp_country_nd <- graph_from_adjacency_matrix(bp_country_nodiag, weighted=T)
# normalize weights for visual purposes (no-diagonal graph only)
edge_weight <- 10
E(bp_terror_nd)$weight <- edge_weight*E(bp_terror_nd)$weight/max(E(bp_terror_nd)$weight)
E(bp_country_nd)$weight <- edge_weight*E(bp_country_nd)$weight/max(E(bp_country_nd)$weight)
plot(bp_terror_nd,
vertex.size=5,
vertex.frame.color='white',
layout=layout_with_fr(bp_terror),
margin=-.1,
edge.width=E(bp_terror)$weight,
edge.arrow.size=0)
plot(bp_country_nd,
vertex.size=5,
vertex.frame.color='white',
layout=layout_with_fr(bp_country),
margin=-.1,
edge.width=E(bp_country)$weight,
edge.arrow.size=0)
ggplot(NULL) + geom_bar(aes(x=names(eigen_centrality(bp_terror)$vector), y=eigen_centrality(bp_terror)$vector), stat='identity') +
xlab('Centrality') + ylab('Group') + theme_minimal() + theme(axis.text.x=element_text(angle=45, hjust=1))
ggplot(NULL) + geom_bar(aes(x=names(eigen_centrality(bp_country)$vector), y=eigen_centrality(bp_country)$vector), stat='identity') +
xlab('Centrality') + ylab('Group') + theme_minimal() + theme(axis.text.x=element_text(angle=45, hjust=1))
|
data<-DBTS
N<-as.numeric(dim(data)[1])
GBPEUR<-data$HU
SEKEUR<-data$RS
CADEUR<-data$WGenRO
hurst_vol<-function(X)
{
returns<-diff(X)/X[-length(X)]
D=data$datum[2:length(data$datum)]
test=c(1,2,4,8,16,32)
#this vector will be used to get to the number of subdivision at any time
adjusted_returns<-matrix(0,nrow=129,ncol=N)
#i'll build a matrix of adjusted returns
#I initialise this matrix with zeros because i will sum those returns
stdev<-c(0)
moyenne<-matrix(0,nrow=6,ncol=62)
#i build a matrix of means of returns
for(i in seq(1,length(test))){
splitlength=length(returns)/test[i]
#splitlength is the number of elements in a subdivision
for(j in seq(1,test[i]))
{
temp<-returns[((j-1)*splitlength+1):(j*splitlength)]
#temp is a temporary vector filled with the returns of a subdivision
moyenne[i,j]=mean(temp)
adjusted_returns[test[i]:sum(test[1:i]),1:splitlength]=temp-moyenne[i,j]
stdev[test[i]:sum(test[1:i])]<-sd(temp)
#i compute subdivision standard deviation for the rescaled range calculation
}
}
deviate_series<-matrix(0,nrow=129,ncol=N)
#I build a deviation series of the adjusted returns
for(i in 1:129)
{
deviate_series[i,]<-cumsum(adjusted_returns[i,])
}
widest_difference<-c(0)
#I compute the widest difference in all the deviation series
for(i in 1:63)
{
widest_difference[i]<-max(deviate_series[i,])-min(deviate_series[i,])
}
rescaled_range<-c(0)
#i use the widest difference and the standard deviation of each subdivision to compute rescaled ranges
for(i in 1:63)
{
rescaled_range[i]<-widest_difference[i]/stdev[i]
}
rescaled_range_m<-c(0)
size<-c(0)
#i compute the rescaled range's means and I create a subdivision's size vector
for(i in 1:6)
{
rescaled_range_m[i]<-mean(rescaled_range[test[i]:sum(test[1:i])])
size[i]<-N/test[i]
}
#i put some log on the two vectors i just compute
rescaled_range_m_l<-log10(rescaled_range_m)
size_l<-log10(size)
plot(size_l,rescaled_range_m_l,type='l',xlab='log10(size of subdivision)',ylab='log10(meaned rescaled range)',main='logarithmically adjusted rescaled range \n function of the size of subdivision',sub='the slope is the Hurst exponent of the time series')
linear_regression<-lm(rescaled_range_m_l~size_l)
#i make a linear regression to get the Hurst exponent
Hurst<-as.numeric(linear_regression$coefficients[2])
vol=sd(X)
annualized_vol=vol*(length(X)**(Hurst))
cat('Hurst exponent : ',Hurst)
cat(sep='\n')
cat('Annualized volatility : ',vol*sqrt(length(X)),'%')
cat(sep='\n')
cat('Annualized volatility using Hurst exponent: ',annualized_vol,'%')
}
hurst_vol(X=GBPEUR)
hurst_vol(CADEUR)
hurst_vol(SEKEUR)
| /Hurst.R | no_license | marijanrancic/MT_MR | R | false | false | 2,749 | r |
data<-DBTS
N<-as.numeric(dim(data)[1])
GBPEUR<-data$HU
SEKEUR<-data$RS
CADEUR<-data$WGenRO
hurst_vol<-function(X)
{
returns<-diff(X)/X[-length(X)]
D=data$datum[2:length(data$datum)]
test=c(1,2,4,8,16,32)
#this vector will be used to get to the number of subdivision at any time
adjusted_returns<-matrix(0,nrow=129,ncol=N)
#i'll build a matrix of adjusted returns
#I initialise this matrix with zeros because i will sum those returns
stdev<-c(0)
moyenne<-matrix(0,nrow=6,ncol=62)
#i build a matrix of means of returns
for(i in seq(1,length(test))){
splitlength=length(returns)/test[i]
#splitlength is the number of elements in a subdivision
for(j in seq(1,test[i]))
{
temp<-returns[((j-1)*splitlength+1):(j*splitlength)]
#temp is a temporary vector filled with the returns of a subdivision
moyenne[i,j]=mean(temp)
adjusted_returns[test[i]:sum(test[1:i]),1:splitlength]=temp-moyenne[i,j]
stdev[test[i]:sum(test[1:i])]<-sd(temp)
#i compute subdivision standard deviation for the rescaled range calculation
}
}
deviate_series<-matrix(0,nrow=129,ncol=N)
#I build a deviation series of the adjusted returns
for(i in 1:129)
{
deviate_series[i,]<-cumsum(adjusted_returns[i,])
}
widest_difference<-c(0)
#I compute the widest difference in all the deviation series
for(i in 1:63)
{
widest_difference[i]<-max(deviate_series[i,])-min(deviate_series[i,])
}
rescaled_range<-c(0)
#i use the widest difference and the standard deviation of each subdivision to compute rescaled ranges
for(i in 1:63)
{
rescaled_range[i]<-widest_difference[i]/stdev[i]
}
rescaled_range_m<-c(0)
size<-c(0)
#i compute the rescaled range's means and I create a subdivision's size vector
for(i in 1:6)
{
rescaled_range_m[i]<-mean(rescaled_range[test[i]:sum(test[1:i])])
size[i]<-N/test[i]
}
#i put some log on the two vectors i just compute
rescaled_range_m_l<-log10(rescaled_range_m)
size_l<-log10(size)
plot(size_l,rescaled_range_m_l,type='l',xlab='log10(size of subdivision)',ylab='log10(meaned rescaled range)',main='logarithmically adjusted rescaled range \n function of the size of subdivision',sub='the slope is the Hurst exponent of the time series')
linear_regression<-lm(rescaled_range_m_l~size_l)
#i make a linear regression to get the Hurst exponent
Hurst<-as.numeric(linear_regression$coefficients[2])
vol=sd(X)
annualized_vol=vol*(length(X)**(Hurst))
cat('Hurst exponent : ',Hurst)
cat(sep='\n')
cat('Annualized volatility : ',vol*sqrt(length(X)),'%')
cat(sep='\n')
cat('Annualized volatility using Hurst exponent: ',annualized_vol,'%')
}
hurst_vol(X=GBPEUR)
hurst_vol(CADEUR)
hurst_vol(SEKEUR)
|
# Set working directory to specific dir in active RSS
setwd('/active/cherry_t/OrgManuscript_SingleCell_Data/human_scRNA')
# Load necessary libraries (dplyr v0.8.5; Seurat v3.1.1; patchwork v1.0.0; ggplot2 v3.3.0); make sure Seurat library has uwot installed (v0.1.4)
library(dplyr)
library(Seurat)
library(patchwork)
library(ggplot2)
#Make sure Harmony library is installed
#library(devtools)
#install_github("immunogenomics/harmony")
library(harmony)
#Adjust the maximum size of global objects (this may need to be increased later)
options(future.globals.maxSize = 8000 * 1024^2)
##known markers
markers <- c('RCVRN', 'RHO', 'CRX', 'ARR3', 'GNAT2', 'VSX2', 'LHX4', 'TRPM1', 'GRM6', 'SLC1A3', 'RLBP1', 'PAX6', 'LHX1', 'ONECUT2', 'TFAP2B', 'GAD1', 'SLC6A9', 'RBPMS', 'NEFM', 'GFAP', 'CD74', 'P2RY12', 'BEST1', 'RPE65', 'SFRP2')
#Import 10x data and convert each dataset to Seurat object (take from individual sample outputs, not cellranger's aggr output). Define sample with project= "samplename"
d74.data = Read10X(data.dir = './d74/outs/filtered_feature_bc_matrix/')
d74 = CreateSeuratObject(counts = d74.data, project = "d74", min.cells = 3, min.features = 200)
d78.data = Read10X(data.dir = './d78/outs/filtered_feature_bc_matrix/')
d78 = CreateSeuratObject(counts = d78.data, project = "d78", min.cells = 3, min.features = 200)
# Merge into one single Seurat object
human=merge(d74, y=c(d78))
#Validate the merge by checking number of cells per group
table(human$orig.ident)
#Store mitochondrial percentage in the Seurat object metadata
human[["percent.mt"]] <- PercentageFeatureSet(human, pattern = "^MT-")
#Visualize QC metrics (will split by 'orig.ident')
VlnPlot(human, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3, pt.size=0.1)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.qc.pdf", width=20)
#Add sample and condition information explicitly into the metadata (as oppsoed to storing in 'orig.ident') for future downstream analysis
#Add sample info (simply copying 'orig.ident')
human$sample <- plyr::mapvalues(
x = human$orig.ident,
from = c('d74', 'd78'),
to = c('d74', 'd78'))
#Validate new metadata columns by checking that number of cells per sample/phenotype adds up
table(human$sample)
#Run same QC metrics by new metadata columns to ensure it is the same as original QC metrics
VlnPlot(human, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), group.by='sample', ncol = 3, pt.size=0.1)
#Filter the data
human <- subset(human, subset = nFeature_RNA > 200 & nFeature_RNA < 9000 & percent.mt < 5)
#Run standard Seurat workflow (Normalize, Find Variable Features, Scale, PCA) in order to check principal components between samples
human=NormalizeData(human)
human <- FindVariableFeatures(human, selection.method = "vst", nfeatures = 2000)
human=ScaleData(human)
human = RunPCA(human)
DimPlot(human, reduction='pca', group.by='sample')
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.pca.pdf", width=20)
ElbowPlot(human, ndims=30)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.elbow_plot.pdf", width=20)
#If samples do not cluster together in PC space (which is the case here), then there is no need to run harmony (and likely no need to use the integrated method either); the merged analysis should do.
#Determine the number of dimensions to use in downstream analyses based on the point at which the Elbow Plot becomes flat (ok to be conservative)
#Save merged and filtered dataset as an R object to be able to re-load it for various future analyses without needing to perform the previous computations
saveRDS(human, file = "./seurat_analysis/d70s/human.rds")
##and load if needed
#human <- readRDS(file = "./seurat_analysis/d70s/human.rds")
#Run SCTransform, set var.to.regress to percent.mt
human_merged <- SCTransform(human, vars.to.regress = "percent.mt", verbose = FALSE)
##resolution 0.4 and 30 dims
#We can now run the standard Seurat workflow (PCA, UMAP, FindNeighbors, FindClusters).
human_merged <- RunPCA(human_merged, verbose = FALSE)
human_merged <- RunUMAP(human_merged, dims = 1:30, verbose = FALSE)
human_merged <- FindNeighbors(human_merged, dims = 1:30, verbose=FALSE)
human_merged <- FindClusters(human_merged, resolution = 0.4)
#Plot UMAP
DimPlot(human_merged, reduction='umap', split.by='sample', pt.size=0.1, label = TRUE)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.UMAP.res0.4.sample_split.pdf", width = 20)
DimPlot(human_merged, reduction='umap', pt.size=0.1, label = TRUE)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.UMAP.res0.4.pdf", width=20)
#Identify the number of cells in each cluster between samples
counts_cluster_sample = table(human_merged$seurat_clusters, human_merged$sample)
write.csv(counts_cluster_sample, file='./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.counts_cluster_sample.res0.4.csv')
##graph known markers
#Dot plot - the size of the dot = % of cells and color represents the average expression
DotPlot(human_merged, features = markers) + RotatedAxis()
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.res0.4.known_markers.dotplot.pdf", width = 20)
#Find all markers that define each cluster
human_merged.markers <- FindAllMarkers(human_merged, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.csv(human_merged.markers, file='./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.markers.res0.4.csv')
##harmony analysis...
##load if needed
human <- readRDS(file = "./seurat_analysis/d70s/human.rds")
#Harmony needs RunPCA to have been performed, so make sure previous analysis steps have bene performed.
human_harmony <- RunHarmony(object = human, group.by.vars = 'sample')
DimPlot(human_harmony, reduction = 'harmony', group.by = 'sample')
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.harmony_plot.pdf", width = 20)
# Run standard Seurat workflow steps, but set reduction to "harmony" for UMAP and Neighbors
human_harmony <- RunUMAP(human_harmony, dims = 1:30, reduction = 'harmony')
human_harmony <- FindNeighbors(human_harmony, reduction = 'harmony', dims = 1:30)
##change resolutions - 0.4
human_harmony <- FindClusters(human_harmony, resolution = 0.4)
#Plot UMAP
DimPlot(human_harmony, reduction='umap', split.by='sample', pt.size=0.1, label = TRUE)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.UMAP.res0.4.sample_split.pdf", width = 20)
DimPlot(human_harmony, reduction='umap', pt.size=0.1, label = TRUE)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.UMAP.res0.4.pdf", width=20)
#Identify the number of cells in each cluster between samples
counts_cluster_sample = table(human_harmony$seurat_clusters, human_harmony$sample)
write.csv(counts_cluster_sample, file='./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.counts_cluster_sample.res0.4.csv')
#Dot plot - the size of the dot = % of cells and color represents the average expression
DotPlot(human_harmony, features = markers) + RotatedAxis()
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.res0.4.known_markers.dotplot.pdf", width = 20)
#Find all markers that define each cluster
human_harmony.markers <- FindAllMarkers(human_harmony, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.csv(human_harmony.markers, file='./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.markers.res0.4.csv')
#Save object to avoid needing to re-run previous computations
saveRDS(human_harmony, file = "./seurat_analysis/d70s/human_harmony.rds")
| /cherry_sc_org_manuscript/cherry_human_scRNASeq_d70s_0620.R | no_license | atimms/ratchet_scripts | R | false | false | 7,623 | r | # Set working directory to specific dir in active RSS
setwd('/active/cherry_t/OrgManuscript_SingleCell_Data/human_scRNA')
# Load necessary libraries (dplyr v0.8.5; Seurat v3.1.1; patchwork v1.0.0; ggplot2 v3.3.0); make sure Seurat library has uwot installed (v0.1.4)
library(dplyr)
library(Seurat)
library(patchwork)
library(ggplot2)
#Make sure Harmony library is installed
#library(devtools)
#install_github("immunogenomics/harmony")
library(harmony)
#Adjust the maximum size of global objects (this may need to be increased later)
options(future.globals.maxSize = 8000 * 1024^2)
##known markers
markers <- c('RCVRN', 'RHO', 'CRX', 'ARR3', 'GNAT2', 'VSX2', 'LHX4', 'TRPM1', 'GRM6', 'SLC1A3', 'RLBP1', 'PAX6', 'LHX1', 'ONECUT2', 'TFAP2B', 'GAD1', 'SLC6A9', 'RBPMS', 'NEFM', 'GFAP', 'CD74', 'P2RY12', 'BEST1', 'RPE65', 'SFRP2')
#Import 10x data and convert each dataset to Seurat object (take from individual sample outputs, not cellranger's aggr output). Define sample with project= "samplename"
d74.data = Read10X(data.dir = './d74/outs/filtered_feature_bc_matrix/')
d74 = CreateSeuratObject(counts = d74.data, project = "d74", min.cells = 3, min.features = 200)
d78.data = Read10X(data.dir = './d78/outs/filtered_feature_bc_matrix/')
d78 = CreateSeuratObject(counts = d78.data, project = "d78", min.cells = 3, min.features = 200)
# Merge into one single Seurat object
human=merge(d74, y=c(d78))
#Validate the merge by checking number of cells per group
table(human$orig.ident)
#Store mitochondrial percentage in the Seurat object metadata
human[["percent.mt"]] <- PercentageFeatureSet(human, pattern = "^MT-")
#Visualize QC metrics (will split by 'orig.ident')
VlnPlot(human, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3, pt.size=0.1)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.qc.pdf", width=20)
#Add sample and condition information explicitly into the metadata (as oppsoed to storing in 'orig.ident') for future downstream analysis
#Add sample info (simply copying 'orig.ident')
human$sample <- plyr::mapvalues(
x = human$orig.ident,
from = c('d74', 'd78'),
to = c('d74', 'd78'))
#Validate new metadata columns by checking that number of cells per sample/phenotype adds up
table(human$sample)
#Run same QC metrics by new metadata columns to ensure it is the same as original QC metrics
VlnPlot(human, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), group.by='sample', ncol = 3, pt.size=0.1)
#Filter the data
human <- subset(human, subset = nFeature_RNA > 200 & nFeature_RNA < 9000 & percent.mt < 5)
#Run standard Seurat workflow (Normalize, Find Variable Features, Scale, PCA) in order to check principal components between samples
human=NormalizeData(human)
human <- FindVariableFeatures(human, selection.method = "vst", nfeatures = 2000)
human=ScaleData(human)
human = RunPCA(human)
DimPlot(human, reduction='pca', group.by='sample')
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.pca.pdf", width=20)
ElbowPlot(human, ndims=30)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.elbow_plot.pdf", width=20)
#If samples do not cluster together in PC space (which is the case here), then there is no need to run harmony (and likely no need to use the integrated method either); the merged analysis should do.
#Determine the number of dimensions to use in downstream analyses based on the point at which the Elbow Plot becomes flat (ok to be conservative)
#Save merged and filtered dataset as an R object to be able to re-load it for various future analyses without needing to perform the previous computations
saveRDS(human, file = "./seurat_analysis/d70s/human.rds")
##and load if needed
#human <- readRDS(file = "./seurat_analysis/d70s/human.rds")
#Run SCTransform, set var.to.regress to percent.mt
human_merged <- SCTransform(human, vars.to.regress = "percent.mt", verbose = FALSE)
##resolution 0.4 and 30 dims
#We can now run the standard Seurat workflow (PCA, UMAP, FindNeighbors, FindClusters).
human_merged <- RunPCA(human_merged, verbose = FALSE)
human_merged <- RunUMAP(human_merged, dims = 1:30, verbose = FALSE)
human_merged <- FindNeighbors(human_merged, dims = 1:30, verbose=FALSE)
human_merged <- FindClusters(human_merged, resolution = 0.4)
#Plot UMAP
DimPlot(human_merged, reduction='umap', split.by='sample', pt.size=0.1, label = TRUE)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.UMAP.res0.4.sample_split.pdf", width = 20)
DimPlot(human_merged, reduction='umap', pt.size=0.1, label = TRUE)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.UMAP.res0.4.pdf", width=20)
#Identify the number of cells in each cluster between samples
counts_cluster_sample = table(human_merged$seurat_clusters, human_merged$sample)
write.csv(counts_cluster_sample, file='./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.counts_cluster_sample.res0.4.csv')
##graph known markers
#Dot plot - the size of the dot = % of cells and color represents the average expression
DotPlot(human_merged, features = markers) + RotatedAxis()
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.res0.4.known_markers.dotplot.pdf", width = 20)
#Find all markers that define each cluster
human_merged.markers <- FindAllMarkers(human_merged, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.csv(human_merged.markers, file='./seurat_analysis/d70s/human_scrnaseq_d70s_0620.merged.markers.res0.4.csv')
##harmony analysis...
##load if needed
human <- readRDS(file = "./seurat_analysis/d70s/human.rds")
#Harmony needs RunPCA to have been performed, so make sure previous analysis steps have bene performed.
human_harmony <- RunHarmony(object = human, group.by.vars = 'sample')
DimPlot(human_harmony, reduction = 'harmony', group.by = 'sample')
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.harmony_plot.pdf", width = 20)
# Run standard Seurat workflow steps, but set reduction to "harmony" for UMAP and Neighbors
human_harmony <- RunUMAP(human_harmony, dims = 1:30, reduction = 'harmony')
human_harmony <- FindNeighbors(human_harmony, reduction = 'harmony', dims = 1:30)
##change resolutions - 0.4
human_harmony <- FindClusters(human_harmony, resolution = 0.4)
#Plot UMAP
DimPlot(human_harmony, reduction='umap', split.by='sample', pt.size=0.1, label = TRUE)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.UMAP.res0.4.sample_split.pdf", width = 20)
DimPlot(human_harmony, reduction='umap', pt.size=0.1, label = TRUE)
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.UMAP.res0.4.pdf", width=20)
#Identify the number of cells in each cluster between samples
counts_cluster_sample = table(human_harmony$seurat_clusters, human_harmony$sample)
write.csv(counts_cluster_sample, file='./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.counts_cluster_sample.res0.4.csv')
#Dot plot - the size of the dot = % of cells and color represents the average expression
DotPlot(human_harmony, features = markers) + RotatedAxis()
dev.copy2pdf(file="./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.res0.4.known_markers.dotplot.pdf", width = 20)
#Find all markers that define each cluster
human_harmony.markers <- FindAllMarkers(human_harmony, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.csv(human_harmony.markers, file='./seurat_analysis/d70s/human_scrnaseq_d70s_0620.harmony.markers.res0.4.csv')
#Save object to avoid needing to re-run previous computations
saveRDS(human_harmony, file = "./seurat_analysis/d70s/human_harmony.rds")
|
library(tidyverse)
## usage of the script
##
## $ Rscript circular_bar_great.R ../private_data/results/dev_codingNonMHC_lor_center_p001_100PCs_20180123.npz/phenotypes/asthma MGIPhenotype BFold
##
source('circular_bar_no_legend.R')
args <- commandArgs(TRUE)
phenotype_dir <- args[1]
ontology <- args[2]
score <- args[3]
circos_data <- file.path(phenotype_dir, 'great', ontology, 'circos-data.csv')
cosine_score <- file.path(phenotype_dir, 'squared_cosine_scores.tsv')
figure_out <- file.path(phenotype_dir, 'great', ontology, 'circos-no-legend.pdf')
## read data
print('readling data')
if(score == 'BFold'){
data <- read_csv(circos_data) %>%
mutate(Score = BFold, Alpha = -log10(BPval)) %>%
select(-PC_rank, -BFold, -BPval) %>%
rename(
Group_id = PC,
Label = Term
)
}else if(score == 'BPval'){
data <- read_csv(circos_data) %>%
mutate(Alpha = BFold, Score = -log10(BPval)) %>%
select(-PC_rank, -BFold, -BPval) %>%
rename(
Group_id = PC,
Label = Term
)
}
print('readling cosine scores')
groups <- read.table(cosine_score) %>%
rename(
Group_order = V1,
PC_zero_based = V2,
Group_fraction = V3
) %>% mutate(
Group_id = paste0('PC', PC_zero_based + 1)
) %>% select(-PC_zero_based)
## plot
circular_bar_plot(data, groups, loc_margin=0.05, quantile_thr=0.05, alpha_min=0.3)
ggsave(figure_out)
| /uk_biobank/DeGAs/src/circular_bar_great_no_legend.R | permissive | rivas-lab/public-resources | R | false | false | 1,428 | r | library(tidyverse)
## usage of the script
##
## $ Rscript circular_bar_great.R ../private_data/results/dev_codingNonMHC_lor_center_p001_100PCs_20180123.npz/phenotypes/asthma MGIPhenotype BFold
##
source('circular_bar_no_legend.R')
args <- commandArgs(TRUE)
phenotype_dir <- args[1]
ontology <- args[2]
score <- args[3]
circos_data <- file.path(phenotype_dir, 'great', ontology, 'circos-data.csv')
cosine_score <- file.path(phenotype_dir, 'squared_cosine_scores.tsv')
figure_out <- file.path(phenotype_dir, 'great', ontology, 'circos-no-legend.pdf')
## read data
print('readling data')
if(score == 'BFold'){
data <- read_csv(circos_data) %>%
mutate(Score = BFold, Alpha = -log10(BPval)) %>%
select(-PC_rank, -BFold, -BPval) %>%
rename(
Group_id = PC,
Label = Term
)
}else if(score == 'BPval'){
data <- read_csv(circos_data) %>%
mutate(Alpha = BFold, Score = -log10(BPval)) %>%
select(-PC_rank, -BFold, -BPval) %>%
rename(
Group_id = PC,
Label = Term
)
}
print('readling cosine scores')
groups <- read.table(cosine_score) %>%
rename(
Group_order = V1,
PC_zero_based = V2,
Group_fraction = V3
) %>% mutate(
Group_id = paste0('PC', PC_zero_based + 1)
) %>% select(-PC_zero_based)
## plot
circular_bar_plot(data, groups, loc_margin=0.05, quantile_thr=0.05, alpha_min=0.3)
ggsave(figure_out)
|
source("data_extract_google.R")
#inputs = df, api_interval, forecast_horizon
patterns <-
df %>%
arrange(timestamp) %>%
filter(as.numeric(timestamp) %% (24*60*60) != (9*60*60 + 15*60) - (5*60*60 + 30*60)) %>%
group_by(date = as.Date(timestamp)) %>%
mutate(open_high = ifelse(timestamp == min(timestamp), high, "X"),
open_low = ifelse(timestamp == min(timestamp), low, "X")
)
for(i in 1:nrow(patterns)){
if(patterns[i, "open_high"] == "X") {
patterns[i, "open_high"] <- patterns[i-1, "open_high"]
patterns[i, "open_low"] <- patterns[i-1, "open_low"]
}
}
patterns <-
patterns %>%
group_by(date) %>%
mutate(open_high = as.numeric(open_high),
open_low = as.numeric(open_low),
overlap_flag = ifelse(high > open_high & low < open_low & open_high != open_low, 1, 0),
pattern_flag = ifelse(lag(overlap_flag) == 1 & (lag(high) < high | lag(low) > low), 1, 0),
pattern_type = ifelse(pattern_flag == 1 & lag(high) < high, "H", ifelse(pattern_flag == 1 & lag(low) > low, "L", "N"))) %>%
arrange(desc(timestamp)) %>%
as.data.frame(stringsAsFactors=F) %>%
mutate(forecast_horizon_high = rollapplyr(high, forecast_horizon*(6.25*60*60)/api_interval, max, fill = "NA", partial = T),
forecast_horizon_low = rollapplyr(low, forecast_horizon*(6.25*60*60)/api_interval, min, fill = "NA", partial = T)) %>%
arrange(timestamp)
breakouts <-
patterns %>%
filter(pattern_type %in% c("H", "L")) %>%
mutate(pass = ifelse((pattern_type == "H" & forecast_horizon_high >= high + 2*(open_high-open_low))
| (pattern_type == "L" & forecast_horizon_low <= low - 2*(open_high-open_low)), 1, 0),
stock_symbol = api_stock_symbol)
| /high_low_overlap.R | no_license | iqcool1/aws-finance | R | false | false | 1,747 | r | source("data_extract_google.R")
#inputs = df, api_interval, forecast_horizon
patterns <-
df %>%
arrange(timestamp) %>%
filter(as.numeric(timestamp) %% (24*60*60) != (9*60*60 + 15*60) - (5*60*60 + 30*60)) %>%
group_by(date = as.Date(timestamp)) %>%
mutate(open_high = ifelse(timestamp == min(timestamp), high, "X"),
open_low = ifelse(timestamp == min(timestamp), low, "X")
)
for(i in 1:nrow(patterns)){
if(patterns[i, "open_high"] == "X") {
patterns[i, "open_high"] <- patterns[i-1, "open_high"]
patterns[i, "open_low"] <- patterns[i-1, "open_low"]
}
}
patterns <-
patterns %>%
group_by(date) %>%
mutate(open_high = as.numeric(open_high),
open_low = as.numeric(open_low),
overlap_flag = ifelse(high > open_high & low < open_low & open_high != open_low, 1, 0),
pattern_flag = ifelse(lag(overlap_flag) == 1 & (lag(high) < high | lag(low) > low), 1, 0),
pattern_type = ifelse(pattern_flag == 1 & lag(high) < high, "H", ifelse(pattern_flag == 1 & lag(low) > low, "L", "N"))) %>%
arrange(desc(timestamp)) %>%
as.data.frame(stringsAsFactors=F) %>%
mutate(forecast_horizon_high = rollapplyr(high, forecast_horizon*(6.25*60*60)/api_interval, max, fill = "NA", partial = T),
forecast_horizon_low = rollapplyr(low, forecast_horizon*(6.25*60*60)/api_interval, min, fill = "NA", partial = T)) %>%
arrange(timestamp)
breakouts <-
patterns %>%
filter(pattern_type %in% c("H", "L")) %>%
mutate(pass = ifelse((pattern_type == "H" & forecast_horizon_high >= high + 2*(open_high-open_low))
| (pattern_type == "L" & forecast_horizon_low <= low - 2*(open_high-open_low)), 1, 0),
stock_symbol = api_stock_symbol)
|
# read dwd data ----
#' Process data from the DWD CDC FTP Server
#'
#' Read climate data that was downloaded with \code{\link{dataDWD}}.
#' The data is unzipped and subsequently, the file is read, processed and
#' returned as a data.frame.\cr
#' New users are advised to set \code{varnames=TRUE} to obtain more informative
#' column names.\cr\cr
#' \code{readDWD} will call internal (but documented) functions depending on the
#' arguments \code{meta, binary, raster, multia, asc}:\cr
#' to read observational data: \code{\link{readDWD.data},
#' \link{readDWD.meta}, \link{readDWD.multia}}\cr
#' to read interpolated gridded data: \code{\link{readDWD.binary},
#' \link{readDWD.raster}, \link{readDWD.asc}}\cr
#' Not all arguments to \code{readDWD} are used for all functions, e.g.
#' \code{fread} is used only by \code{.data}, while \code{dividebyten}
#' is used in \code{.raster} and \code{.asc}.\cr\cr
#' \code{file} can be a vector with several filenames. Most other arguments can
#' also be a vector and will be recycled to the length of \code{file}.
#'
#' @return Invisible data.frame of the desired dataset,
#' or a named list of data.frames if length(file) > 1.
#' \code{\link{readDWD.binary}} returns a vector,
#' \code{\link{readDWD.raster}} and \code{\link{readDWD.asc}}
#' return raster objects instead of data.frames.
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Jul-Oct 2016, Winter 2018/19
#' @seealso \code{\link{dataDWD}}, \code{\link{readVars}},
#' \code{\link{readMeta}}, \code{\link{selectDWD}}
#' @keywords file chron
#' @importFrom utils read.table unzip read.fwf untar write.table
#' @importFrom berryFunctions checkFile na9 traceCall l2df owa
#' @importFrom pbapply pblapply
#' @importFrom tools file_path_sans_ext
#' @export
#' @examples
#' # see dataDWD
#'
#' @param file Char (vector): name(s) of the file(s) downloaded with
#' \code{\link{dataDWD}},
#' e.g. "~/DWDdata/tageswerte_KL_02575_akt.zip" or
#' "~/DWDdata/RR_Stundenwerte_Beschreibung_Stationen.txt"
#' @param progbar Logical: present a progress bar with estimated remaining time?
#' If missing and length(file)==1, progbar is internally set to FALSE.
#' DEFAULT: TRUE
#' @param fread Logical (vector): read fast? See \code{\link{readDWD.data}}.
#' DEFAULT: FALSE (some users complain it doesn't work on their PC)
#' @param varnames Logical (vector): Expand column names?
#' See \code{\link{readDWD.data}}. DEFAULT: FALSE
#' @param format,tz Format and time zone of time stamps, see \code{\link{readDWD.data}}
#' @param dividebyten Logical (vector): Divide the values in raster files by ten?
#' Used in \code{\link{readDWD.raster}} and \code{\link{readDWD.asc}}.
#' DEFAULT: TRUE
#' @param meta Logical (vector): is the \code{file} a meta file (Beschreibung.txt)?
#' See \code{\link{readDWD.meta}}.
#' DEFAULT: TRUE for each file ending in ".txt"
#' @param multia Logical (vector): is the \code{file} a multi_annual file?
#' Overrides \code{meta}, so set to FALSE manually if
#' \code{\link{readDWD.meta}} needs to be called on a file ending
#' with "Standort.txt". See \code{\link{readDWD.multia}}.
#' DEFAULT: TRUE for each file ending in "Standort.txt"
#' @param binary Logical (vector): does the \code{file} contain binary files?
#' See \code{\link{readDWD.binary}}.
#' DEFAULT: TRUE for each file ending in ".tar.gz"
#' @param raster Logical (vector): does the \code{file} contain a raster file?
#' See \code{\link{readDWD.raster}}.
#' DEFAULT: TRUE for each file ending in ".asc.gz"
#' @param asc Logical (vector): does the \code{file} contain asc files?
#' See \code{\link{readDWD.asc}}.
#' DEFAULT: TRUE for each file ending in ".tar"
#' @param \dots Further arguments passed to the internal \code{readDWD.*}
#' functions and from those to the underlying reading functions
#' documented in each internal function.
#'
readDWD <- function(
file,
progbar=TRUE,
fread=FALSE,
varnames=FALSE,
format=NA,
tz="GMT",
dividebyten=TRUE,
meta= grepl( '.txt$', file),
multia=grepl('Standort.txt$', file),
binary=grepl( '.tar.gz$', file),
raster=grepl( '.asc.gz$', file),
asc= grepl( '.tar$', file),
...
)
{
# recycle arguments:
len <- length(file)
if(missing(progbar) & len==1 & all(!binary) & all(!asc)) progbar <- FALSE
if(anyNA(fread)) fread[is.na(fread)] <- requireNamespace("data.table",quietly=TRUE)
if(len>1)
{
fread <- rep(fread, length.out=len)
varnames <- rep(varnames, length.out=len)
format <- rep(format, length.out=len)
tz <- rep(tz, length.out=len)
dividebyten <- rep(dividebyten, length.out=len)
meta <- rep(meta, length.out=len)
multia <- rep(multia, length.out=len)
binary <- rep(binary, length.out=len)
raster <- rep(raster, length.out=len)
asc <- rep(asc, length.out=len)
}
meta[multia] <- FALSE
# Optional progress bar:
if(progbar) lapply <- pbapply::pblapply
# check package availability:
if(any(fread)) if(!requireNamespace("data.table", quietly=TRUE))
stop("in rdwd::readDWD: to use fread=TRUE, please first install data.table:",
" install.packages('data.table')", call.=FALSE)
#
checkFile(file)
# Handle German Umlaute:
if(any(meta)) # faster to change locale once here, instead of in each readDWD.meta call
{
lct <- Sys.getlocale("LC_CTYPE")
on.exit(Sys.setlocale(category="LC_CTYPE", locale=lct), add=TRUE)
if(!grepl(pattern="german", lct, ignore.case=TRUE))
{
lctry <- c("German","de_DE","de_DE.UTF-8","de_DE.utf8","de")
for(lc in lctry) if(suppressWarnings(Sys.setlocale("LC_CTYPE", lc))!="") break
}
}
#
if(progbar) message("Reading ", length(file), " file", if(length(file)>1)"s", "...")
#
# loop over each filename
output <- lapply(seq_along(file), function(i)
{
# if meta/binary/raster/multia:
if(meta[i]) return(readDWD.meta( file[i], ...))
if(binary[i]) return(readDWD.binary(file[i], progbar=progbar, ...))
if(raster[i]) return(readDWD.raster(file[i], dividebyten=dividebyten[i], ...))
if(multia[i]) return(readDWD.multia(file[i], ...))
if(asc[i]) return(readDWD.asc( file[i], progbar=progbar, dividebyten=dividebyten[i], ...))
# if data:
readDWD.data(file[i], fread=fread[i], varnames=varnames[i],
format=format[i], tz=tz[i], ...)
}) # lapply loop end
#
names(output) <- tools::file_path_sans_ext(basename(file))
output <- if(length(file)==1) output[[1]] else output
return(invisible(output))
}
# read observational data ----
# ~ data ----
#' @title read regular dwd data
#' @description Read regular dwd data.
#' Intended to be called via \code{\link{readDWD}}.
#' @return data.frame
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}
#' @seealso \code{\link{readDWD}}, Examples in \code{\link{dataDWD}}
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/daily_kl_recent_tageswerte_KL_03987_akt.zip
#' @param fread Logical: read faster with \code{data.table::\link[data.table]{fread}}?
#' When reading many large historical files, speedup is significant.
#' NA can also be used, which means TRUE if data.table is available.
#' DEFAULT: FALSE
#' @param varnames Logical (vector): add a short description to the DWD variable
#' abbreviations in the column names?
#' E.g. change \code{FX,TNK} to \code{FX.Windspitze,TNK.Lufttemperatur_Min},
#' see \code{\link{newColumnNames}}.
#' DEFAULT: FALSE (for backwards compatibility)
#' @param format Char (vector): Format passed to
#' \code{\link{as.POSIXct}} (see \code{\link{strptime}})
#' to convert the date/time column to POSIX time format.\cr
#' If NULL, no conversion is performed (date stays a factor).
#' If NA, \code{readDWD} tries to find a suitable format based
#' on the number of characters. DEFAULT: NA
#' @param tz Char (vector): time zone for \code{\link{as.POSIXct}}.
#' "" is the current time zone, and "GMT" is UTC (Universal Time,
#' Coordinated). DEFAULT: "GMT"
#' @param \dots Further arguments passed to \code{\link{read.table}} or
#' \code{data.table::\link[data.table]{fread}}
readDWD.data <- function(file, fread=FALSE, varnames=FALSE, format=NA, tz="GMT", ...)
{
if(fread)
{
# http://dsnotes.com/post/2017-01-27-lessons-learned-from-outbrain-click-prediction-kaggle-competition/
fp <- unzip(file, list=TRUE) # file produkt*, the actual datafile
fp <- fp$Name[grepl("produkt",fp$Name)]
dat <- data.table::fread(cmd=paste("unzip -p", file, fp), na.strings=na9(nspace=0),
header=TRUE, sep=";", stringsAsFactors=TRUE, data.table=FALSE, ...)
} else
{
# temporary unzipping directory
fn <- tools::file_path_sans_ext(basename(file))
exdir <- paste0(tempdir(),"/", fn)
unzip(file, exdir=exdir)
on.exit(unlink(exdir, recursive=TRUE), add=TRUE)
# Read the actual data file:
f <- dir(exdir, pattern="produkt*", full.names=TRUE)
if(length(f)!=1) stop("There should be a single 'produkt*' file, but there are ",
length(f), " in\n ", file, "\n Consider re-downloading (with force=TRUE).")
dat <- read.table(f, na.strings=na9(), header=TRUE, sep=";", as.is=FALSE, ...)
} # end if(!fread)
#
if(varnames) dat <- newColumnNames(dat)
# return if file is empty, e.g. for daily/more_precip/hist_05988 2019-05-16:
if(nrow(dat)==0)
{
warning("File contains no rows: ", file)
return(dat)
}
# process time-stamp: http://stackoverflow.com/a/13022441
if(!is.null(format))
{
# for res=monthly data:
if("MESS_DATUM_BEGINN" %in% colnames(dat))
dat <- cbind(dat[,1, drop=FALSE], MESS_DATUM=dat$MESS_DATUM_BEGINN + 14, dat[,-1])
if(!"MESS_DATUM" %in% colnames(dat))
warning("There is no column 'MESS_DATUM' in ",file, call.=FALSE) else
{
nch <- nchar(as.character(dat$MESS_DATUM[1]))
if(is.na(format)) format <- if(nch== 8) "%Y%m%d" else
if(nch==13) "%Y%m%d%H:%M" else"%Y%m%d%H"
dat$MESS_DATUM <- as.POSIXct(as.character(dat$MESS_DATUM), format=format, tz=tz)
}
}
# final output:
return(dat)
}
# ~ meta ----
#' @title read dwd metadata (Beschreibung*.txt files)
#' @description read dwd metadata (Beschreibung*.txt files).
#' Intended to be called via \code{\link{readDWD}}.\cr
#' Column widths for \code{\link{read.fwf}} are computed internally.\cr
#' if(any(meta)), \code{\link{readDWD}} tries to set the locale to German
#' (to handle Umlaute correctly). It is hence not recommended to call
#' \code{rdwd:::readDWD.meta} directly on a file!\cr
#' Names can later be changed to ascii with
#' \code{berryFunctions::\link{convertUmlaut}}.
#' @return data.frame
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}
#' @seealso \code{\link{readDWD}}
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' link <- selectDWD(res="daily", var="kl", per="r", meta=TRUE)
#' if(length(link)!=1) stop("length of link should be 1, but is ", length(link),
#' ":\n", berryFunctions::truncMessage(link,prefix="",sep="\n"))
#'
#' file <- dataDWD(link, dir=localtestdir(), read=FALSE)
#' meta <- readDWD(file)
#' head(meta)
#'
#' cnm <- colnames(meta)
#' if(length(cnm)!=8) stop("number of columns should be 8, but is ", length(cnm),
#' ":\n", toString(cnm))
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/daily_kl_recent_KL_Tageswerte_Beschreibung_Stationen.txt
#' @param \dots Further arguments passed to \code{\link{read.fwf}}
readDWD.meta <- function(file, ...)
{
# read one line to get column widths and names
oneline <- readLines(file, n=3, encoding="latin1")
# column widths (automatic detection across different styles used by the DWD)
spaces <- unlist(gregexpr(" ", oneline[3]))
breaks <- spaces[which(diff(spaces)!=1)]
if(substr(oneline[3],1,1)==" ") breaks <- breaks[-1]
breaks[3] <- breaks[3] -9 # right-adjusted column
breaks[4:5] <- breaks[4:5] -1 # right-adjusted columns
widths <- diff(c(0,breaks,200))
sdsf <- grepl("subdaily_standard_format", file)
if(sdsf) widths <- c(6,6,9,10,10,10,10,26,200)
# actually read metadata, suppress readLines warning about EOL:
stats <- suppressWarnings(read.fwf(file, widths=widths, skip=2, strip.white=TRUE,
fileEncoding="latin1", ...) )
# column names:
# remove duplicate spaces (2018-03 only in subdaily_stand...Beschreibung....txt)
while( grepl(" ",oneline[1]) ) oneline[1] <- gsub(" ", " ", oneline[1])
colnames(stats) <- strsplit(oneline[1], " ")[[1]]
if(sdsf)
{
stats <- stats[ ! stats[,1] %in% c("","ST_KE","-----") , ]
tf <- tempfile()
write.table(stats[,-1], file=tf, quote=FALSE, sep="\t")
stats <- read.table(tf, sep="\t")
colnames(stats) <- c("Stations_id", "von_datum", "bis_datum", "Stationshoehe",
"geoBreite", "geoLaenge", "Stationsname", "Bundesland")
}
# check classes:
classes <- c("integer", "integer", "integer", "integer", "numeric", "numeric", "factor", "factor")
actual <- sapply(stats, class)
if(actual[4]=="numeric") classes[4] <- "numeric"
if(!all(actual == classes))
{
msg <- paste0(names(actual)[actual!=classes], ": ", actual[actual!=classes],
" instead of ", classes[actual!=classes], ".")
msg <- paste(msg, collapse=" ")
warning(traceCall(3, "", ": "), "reading file '", file,
"' did not give the correct column classes. ", msg, call.=FALSE)
}
# return meta data.frame:
stats
}
# ~ multia ----
#' @title read multi_annual dwd data
#' @description read multi_annual dwd data.
#' Intended to be called via \code{\link{readDWD}}.\cr
#' All other observational data at \code{\link{dwdbase}} can be read
#' with \code{\link{readDWD.data}}, except for the multi_annual data.
#' @return data.frame
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Feb 2019
#' @seealso \code{\link{readDWD}}
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' # Temperature aggregates (2019-04 the 9th file):
#' durl <- selectDWD(res="multi_annual", var="mean_81-10", per="")[9]
#' murl <- selectDWD(res="multi_annual", var="mean_81-10", per="", meta=TRUE)[9]
#'
#' ma_temp <- dataDWD(durl, dir=localtestdir())
#' ma_meta <- dataDWD(murl, dir=localtestdir())
#'
#' head(ma_temp)
#' head(ma_meta)
#'
#' ma <- merge(ma_meta, ma_temp, all=TRUE)
#' berryFunctions::linReg(ma$Stationshoehe, ma$Jahr)
#' op <- par(mfrow=c(3,4), mar=c(0.1,2,2,0), mgp=c(3,0.6,0))
#' for(m in colnames(ma)[8:19])
#' {
#' berryFunctions::linReg(ma$Stationshoehe, ma[,m], xaxt="n", xlab="", ylab="", main=m)
#' abline(h=0)
#' }
#' par(op)
#'
#' par(bg=8)
#' berryFunctions::colPoints(ma$geogr..Laenge, ma$geogr..Breite, ma$Jahr, add=F, asp=1.4)
#'
#' data("DEU")
#' pdf("MultiAnn.pdf", width=8, height=10)
#' par(bg=8)
#' for(m in colnames(ma)[8:19])
#' {
#' raster::plot(DEU, border="darkgrey")
#' berryFunctions::colPoints(ma[-262,]$geogr..Laenge, ma[-262,]$geogr..Breite, ma[-262,m],
#' asp=1.4, # Range=range(ma[-262,8:19]),
#' col=berryFunctions::divPal(200, rev=TRUE), zlab=m, add=T)
#' }
#' dev.off()
#' berryFunctions::openFile("MultiAnn.pdf")
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/multi_annual_mean_81-10_Temperatur_1981-2010_aktStandort.txt or
#' DWDdata/multi_annual_mean_81-10_Temperatur_1981-2010_Stationsliste_aktStandort.txt
#' @param fileEncoding \link{read.table} \link{file} encoding.
#' DEFAULT: "latin1" (needed on Linux, optional but not hurting on windows)
#' @param comment.char \link{read.table} comment character.
#' DEFAULT: "\\032" (needed 2019-04 to ignore the binary
#' control character at the end of multi_annual files)
#' @param \dots Further arguments passed to \code{\link{read.table}}
readDWD.multia <- function(file, fileEncoding="latin1", comment.char="\032", ...)
{
out <- read.table(file, sep=";", header=TRUE, fileEncoding=fileEncoding,
comment.char=comment.char, ...)
nc <- ncol(out)
# presumably, all files have a trailing empty column...
if(colnames(out)[nc]=="X") out <- out[,-nc]
out
}
# read gridded data ----
# ~ binary ----
#' @title read dwd gridded radolan binary data
#' @description read gridded radolan binary data.
#' Intended to be called via \code{\link{readDWD}}.\cr
#' @return list depending on argument \code{toraster}, see there for details
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Dec 2018.
#' Significant input for the underlying \code{\link{readRadarFile}} came
#' from Henning Rust & Christoph Ritschel at FU Berlin.
#' @seealso \code{\link{readDWD}}\cr
#' \url{https://wradlib.org} for much more extensive radar analysis in Python\cr
#' Kompositformatbeschreibung at \url{https://www.dwd.de/DE/leistungen/radolan/radolan.html}
#' for format description
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' # SF file as example: ----
#'
#' SF_link <- "/daily/radolan/historical/bin/2017/SF201712.tar.gz"
#' SF_file <- dataDWD(file=SF_link, base=gridbase, joinbf=TRUE, # 204 MB
#' dir=localtestdir(), read=FALSE)
#' # exdir radardir set to speed up my tests:
#' SF_exdir <- "C:/Users/berry/Desktop/DWDbinarySF"
#' if(!file.exists(SF_exdir)) SF_exdir <- tempdir()
#' # no need to read all 24*31=744 files, so setting selection:
#' SF_rad <- readDWD(SF_file, selection=1:10, exdir=SF_exdir) #with toraster=TRUE
#' if(length(SF_rad)!=2) stop("length(SF_rad) should be 2, but is ", length(SF_rad))
#'
#' SF_radp <- projectRasterDWD(SF_rad$data)
#' raster::plot(SF_radp[[1]], main=SF_rad$meta$date[1])
#' data(DEU)
#' raster::plot(DEU, add=TRUE)
#'
#'
#' # RW file as example: ----
#'
#' RW_link <- "hourly/radolan/reproc/2017_002/bin/2017/RW2017.002_201712.tar.gz"
#' RW_file <- dataDWD(file=RW_link, base=gridbase, joinbf=TRUE, # 25 MB
#' dir=localtestdir(), read=FALSE)
#' RW_exdir <- "C:/Users/berry/Desktop/DWDbinaryRW"
#' if(!file.exists(RW_exdir)) RW_exdir <- tempdir()
#' RW_rad <- readDWD(RW_file, selection=1:10, exdir=RW_exdir)
#' RW_radp <- projectRasterDWD(RW_rad$data, extent="rw")
#' raster::plot(RW_radp[[1]], main=RW_rad$meta$date[1])
#' raster::plot(DEU, add=TRUE)
#'
#' # ToDo: why are values + patterns not the same?
#'
#' # list of all Files: ----
#' data(gridIndex)
#' head(grep("historical", gridIndex, value=TRUE))
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/daily_radolan_historical_bin_2017_SF201712.tar.gz
#' @param exdir Directory to unzip into. If existing, only the needed files
#' will be unpacked with \code{\link{untar}}. Note that exdir
#' size will be around 1.1 GB. exdir can contain other files,
#' these will be ignored for the actual reading with
#' \code{\link{readRadarFile}} (function not exported, but documented).
#' DEFAULT exdir: sub(".tar.gz$", "", file)
#' @param toraster Logical: convert output (list of matrixes + meta informations)
#' to a list with data (\code{raster \link[raster]{stack}}) +
#' meta (list from the first subfile, but with vector of dates)?
#' DEFAULT: TRUE
#' @param progbar Show messages and progress bars? \code{\link{readDWD}} will
#' keep progbar=TRUE for binary files, even if length(file)==1.
#' DEFAULT: TRUE
#' @param selection Optionally read only a subset of the ~24*31=744 files.
#' Called as \code{f[selection]}. DEFAULT: NULL (ignored)
#' @param \dots Further arguments passed to \code{\link{readRadarFile}},
#' i.e. \code{na} and \code{clutter}
readDWD.binary <- function(file, exdir=sub(".tar.gz$", "", file),
toraster=TRUE, progbar=TRUE, selection=NULL, ...)
{
pmessage <- function(...) if(progbar) message(...)
# Untar as needed:
pmessage("\nChecking which files need to be untarred to ", exdir, "...")
lf <- untar(file, list=TRUE)
tountar <- !lf %in% dir(exdir)
if(any(tountar))
{
pmessage("Unpacking ",sum(tountar), " of ",length(lf), " files in ",file,"...")
untar(file, files=lf[tountar], exdir=exdir)
} else
pmessage("All files were already untarred.")
#
# hourly files:
f <- dir(exdir, full.names=TRUE) # 31*24 = 744 files (daily/hist/2017-12)
# read only the ones from file, not other stuff at exdir:
f <- f[basename(f) %in% lf]
if(!is.null(selection)) f <- f[selection]
#
pmessage("Reading ",length(f)," binary files...")
if(progbar) lapply <- pbapply::pblapply
# Read the actual binary file:
rb <- lapply(f, readRadarFile, ...)
# list element names (time stamp):
time <- sapply(rb, function(x) as.character(x$meta$date))
names(rb) <- time
if(!toraster) return(invisible(rb))
# else if toraster:
if(!requireNamespace("raster", quietly=TRUE))
stop("To use rdwd:::readDWD.binary with toraster=TRUE, please first install raster:",
" install.packages('raster')", call.=FALSE)
pmessage("Converting to raster stack....")
rbmat <- base::lapply(rb,"[[",1)
rbmat <- base::lapply(rbmat, raster::raster)
rbmat <- raster::stack(rbmat)
# rbmeta <- base::lapply(rb,"[[",2)
# rbmeta <- base::lapply(rbmeta, function(x){x$radars <- toString(x$radars);
# x$radarn <- toString(x$radarn);
# x$dim <- toString(x$dim) ; x})
# mnames <- names(rbmeta[[1]])[-(1:2)] # filename and date will differ
# sapply(mnames, function(mn) length(unique(sapply(rbmeta, "[[", mn)))) # all equal
rbmeta <- rb[[1]]$meta
rbmeta$filename <- file
rbmeta$date <- as.POSIXct(time)
return(invisible(list(data=rbmat, meta=rbmeta)))
}
# ~ raster ----
#' @title read dwd gridded raster data
#' @description Read gridded raster data.
#' Intended to be called via \code{\link{readDWD}}.\cr
#' Note that \code{R.utils} must be installed to unzip the .asc.gz files.
#' @return \code{raster::\link[raster]{raster}} object
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Dec 2018
#' @seealso \code{\link{readDWD}}
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' rasterbase <- paste0(gridbase,"/seasonal/air_temperature_mean")
#' ftp.files <- indexFTP("/16_DJF", base=rasterbase, dir=tempdir())
#' localfiles <- dataDWD(ftp.files[1:2], base=rasterbase, joinbf=TRUE,
#' dir=localtestdir(), read=FALSE)
#' rf <- readDWD(localfiles[1])
#' rf <- readDWD(localfiles[1]) # runs faster at second time due to skip=TRUE
#' raster::plot(rf)
#'
#' rfp <- projectRasterDWD(rf, proj="seasonal", extent=rf@extent)
#' raster::plot(rfp)
#' data(DEU)
#' raster::plot(DEU, add=TRUE)
#'
#' testthat::expect_equal(raster::cellStats(rf, range), c(-8.2,4.4))
#' rf10 <- readDWD(localfiles[1], dividebyten=FALSE)
#' raster::plot(rf10)
#' testthat::expect_equal(raster::cellStats(rf10, range), c(-82,44))
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/grids_germany/seasonal/air_temperature_mean/
#' 16_DJF_grids_germany_seasonal_air_temp_mean_188216.asc.gz
#' @param gargs Named list of arguments passed to
#' \code{R.utils::\link[R.utils]{gunzip}}. The internal
#' defaults are: \code{remove=FALSE} (recommended to keep this
#' so \code{file} does not get deleted) and \code{skip=TRUE}
#' (which reads previously unzipped files as is).
#' If \code{file} has changed, you might want to use
#' \code{gargs=list(skip=FALSE, overwrite=TRUE)}
#' or alternatively \code{gargs=list(temporary=TRUE)}.
#' The \code{gunzip} default \code{destname} means that the
#' unzipped file is stored at the same path as \code{file}.
#' DEFAULT gargs: NULL
#' @param dividebyten Logical: Divide the numerical values by 10?
#' DEFAULT: TRUE
#' @param \dots Further arguments passed to \code{raster::\link[raster]{raster}}
readDWD.raster <- function(file, gargs=NULL, dividebyten, ...)
{
if(!requireNamespace("R.utils", quietly=TRUE))
stop("To use rdwd:::readDWD.raster, please first install R.utils:",
" install.packages('R.utils')", call.=FALSE)
if(!requireNamespace("raster", quietly=TRUE))
stop("To use rdwd:::readDWD.raster, please first install raster:",
" install.packages('raster')", call.=FALSE)
#https://stackoverflow.com/questions/5227444/recursively-ftp-download-then-extract-gz-files
# gunzip arguments:
gdef <- list(filename=file, remove=FALSE, skip=TRUE)
gfinal <- berryFunctions::owa(gdef, gargs, "filename")
rdata <- do.call(R.utils::gunzip, gfinal)
# raster reading:
r <- raster::raster(rdata, ...)
if(dividebyten) r <- r/10
return(invisible(r))
}
# ~ asc ----
#' @title read dwd gridded radolan asc data
#' @description read grid-interpolated radolan asc data.
#' Intended to be called via \code{\link{readDWD}}.\cr
#' See \url{ftp://ftp-cdc.dwd.de/pub/CDC/grids_germany/hourly/radolan/README.txt}
#' All layers (following \code{selection} if given) in all .tar.gz files are
#' combined into a raster stack with \code{raster::\link[raster]{stack}}.\cr
#' To project the data, use \code{\link{projectRasterDWD}}
#' @return data.frame
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, April 2019
#' @seealso \code{\link{readDWD}}
# @importFrom raster raster stack crs projection extent plot
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' # File selection and download:
#' datadir <- localtestdir()
#' # 2019-05-18, hourly radolan files not yet copied to new ftp, hence:
#' gridbase <- "ftp://ftp-cdc.dwd.de/pub/CDC/grids_germany"
#' radbase <- paste0(gridbase,"/hourly/radolan/historical/asc/")
#' radfile <- "2018/RW-201809.tar" # 25 MB to download
#' file <- dataDWD(radfile, base=radbase, joinbf=TRUE, dir=datadir,
#' dfargs=list(mode="wb"), read=FALSE) # download with mode=wb!!!
#'
#' #asc <- readDWD(file) # 4 GB in mem. ~ 20 secs unzip, 30 secs read, 10 min divide
#' asc <- readDWD(file, selection=1:20, dividebyten=TRUE)
#' asc <- projectRasterDWD(asc)
#'
#' raster::plot(asc[[1]], main=names(asc)[1])
#' data(DEU)
#' raster::plot(DEU, add=TRUE)
#'
#' rng <- range(raster::cellStats(asc, "range"))
#' nframes <- 3 # raster::nlayers(asc) for all (time intensive!)
#' viddir <- paste0(tempdir(),"/RadolanVideo")
#' dir.create(viddir)
#' png(paste0(viddir,"/Radolan_%03d.png"), width=7, height=5, units="in", res=300)
#' dummy <- pbsapply(1:nframes, function(i)
#' raster::plot(asc[[i]], main=names(asc)[i], zlim=rng)) # 3 secs per layer
#' dev.off()
#' berryFunctions::openFile(paste0(viddir,"/Radolan_001.png"))
#'
#' # Time series of a given point in space:
#' plot(as.vector(asc[800,800,]), type="l", xlab="Time [hours]")
#'
#' # if dividebyten=FALSE, raster stores things out of memory in the exdir.
#' # by default, this is in tempdir, hence you would need to save asc manually:
#' # raster::writeRaster(asc, paste0(datadir,"/RW2018-09"), overwrite=TRUE)
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/grids_germany/hourly/radolan/historical/asc/
#' 2018_RW-201809.tar.
#' Must have been downloaded with \code{mode="wb"}!
#' @param exdir Directory to unzip into. Unpacked files existing therein
#' will not be untarred again, saving up to 15 secs per file.
#' DEFAULT: NULL (subfolder of \code{\link{tempdir}()})
#' @param dividebyten Divide numerical values by 10?
#' If dividebyten=FALSE and exdir left at NULL (tempdir), save
#' the result on disc with \code{raster::\link[raster]{writeRaster}}.
#' Accessing out-of-memory raster objects won't work if
#' exdir is removed! -> Error in .local(.Object, ...)
#' DEFAULT: TRUE
#' @param progbar Show messages and progress bars? \code{\link{readDWD}} will
#' keep progbar=TRUE for asc files, even if length(file)==1.
#' DEFAULT: TRUE
#' @param selection Optionally read only a subset of the ~24*31=744 files.
#' Called as \code{f[selection]}. DEFAULT: NULL (ignored)
#' @param \dots Further arguments passed to \code{raster::\link[raster]{raster}}
readDWD.asc <- function(file, exdir=NULL, dividebyten=TRUE,
selection=NULL, progbar=TRUE, ...)
{
if(!requireNamespace("raster", quietly=TRUE))
stop("To use rdwd:::readDWD.asc, please first install raster:",
" install.packages('raster')", call.=FALSE)
if(progbar) lapply <- pbapply::pblapply
# prepare to untar data (two layers):
fn <- tools::file_path_sans_ext(basename(file))
if(is.null(exdir)) exdir <- paste0(tempdir(),"/", fn)
#
# untar layer 1:
daydir <- paste0(exdir,"/dayfiles")
untar(file, exdir=daydir) # 30/31 .tar.gz files (one for each day). overwrites existing files
dayfiles <- dir(daydir, full.names=TRUE)
#
# untar layer 2:
if(progbar) message("\nChecking if already unpacked: ", file, "...")
to_untar <- lapply(dayfiles, untar, list=TRUE)
untarred <- dir(exdir, pattern=".asc$")
to_untar <- !sapply(to_untar, function(x) all(x %in% untarred))
if(any(to_untar)){
if(progbar) message("Unpacking tar files into ",exdir,"...")
lapply(dayfiles[to_untar], untar, exdir=exdir)
} else if(progbar) message("Tar file was already unpacked into ",exdir," :)")
# yields 31 * 24 .asc files each 1.7MB, takes ~20 secs
#
#
# read data (hourly files):
f <- dir(exdir, pattern=".asc$", full.names=TRUE) # 720 files
if(!is.null(selection)) f <- f[selection]
if(progbar) message("Reading ",length(f)," files...")
dat <- lapply(f, raster::raster, ...)
#
# divide by ten (takes ~9 min!)
if(progbar & dividebyten) message("Dividing values by ten...")
if(dividebyten) dat <- lapply(dat, function(x) x/10)
#
# stack layers:
dat <- raster::stack(dat)
#
# output:
return(invisible(dat))
}
# helper functionality ----
#' @title project DWD raster data
#' @description Set projection and extent for DWD raster data. Optionally (and
#' per default) also reprojects to latlon data.
#' The internal defaults are extracted from the
#' Kompositformatbeschreibung at \url{https://www.dwd.de/DE/leistungen/radolan/radolan.html},
#' as provided 2019-04 by Antonia Hengst.
#' @return Raster object with projection and extent, invisible
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, May 2019
#' @seealso \code{raster::\link[raster]{crs}},
#' \code{raster::\link[raster]{projection}},
#' \code{raster::\link[raster]{extent}},
#' \code{raster::\link[raster]{projectRaster}},
#' \code{\link{readDWD.binary}, \link{readDWD.raster}, \link{readDWD.asc}}
#' @keywords aplot
#' @export
#' @examples
#' # To be used after readDWD.binary, readDWD.raster, readDWD.asc
#' @param r Raster object
#' @param proj Desired projection. Can be a \code{raster::\link[raster]{crs}} output,
#' a projection character string (will be passed to \code{crs}),
#' "radolan" or "seasonal" with internal defaults defined per DWD standard,
#' or NULL to not set proj+extent but still consider \code{latlon}.
#' DEFAULT: "radolan"
#' @param extent Desired \code{\link[raster]{extent}}. Can be an extent object,
#' a vector with 4 numbers, or "radolan" / "rw" / "seasonal"
#' with internal defaults.
#' DEFAULT: "radolan"
#' @param latlon Logical: reproject \code{r} to lat-lon crs? DEFAULT: TRUE
#'
projectRasterDWD <- function(r, proj="radolan", extent="radolan", latlon=TRUE)
{
# package check
if(!requireNamespace("raster", quietly=TRUE))
stop("To use rdwd::projectRasterDWD, please first install raster:",
" install.packages('raster')", call.=FALSE)
#
if(!is.null(proj))
{
# Default projection and extent:
# Projection as per Kompositbeschreibung 1.5
p_radolan <- "+proj=stere +lat_0=90 +lat_ts=90 +lon_0=10 +k=0.93301270189
+x_0=0 +y_0=0 +a=6370040 +b=6370040 +to_meter=1000 +no_defs"
# ftp://opendata.dwd.de/climate_environment/CDC/grids_germany/seasonal/air_temperature_max/
# BESCHREIBUNG_gridsgermany_seasonal_air_temperature_max_de.pdf
p_seasonal <- "+proj=tmerc +lat_0=0 +lon_0=9 +k=1 +x_0=3500000 +y_0=0
+ellps=bessel +datum=potsdam +units=m +no_defs"
#
if(is.character(proj))
{
if(proj=="radolan") proj <- p_radolan else
if(proj=="seasonal") proj <- p_seasonal
}
if(!inherits(proj, "CRS")) proj <- raster::crs(proj)
#
# Extent as per Kompositbeschreibung 1.4 / seasonal DESCRIPTION pdf:
e_radolan <- c(-523.4622,376.5378,-4658.645,-3758.645)
e_rw <- c(-443.4622,456.5378,-4758.645,-3658.645) # 1.2, Abb 3
# e_radolan <- c(-673.4656656,726.5343344,-5008.642536,-3508.642536) # ME
e_seasonal <- c(3280414.71163347, 3934414.71163347, 5237500.62890625, 6103500.62890625)
if(is.character(extent))
{
if(extent=="radolan") extent <- e_radolan else
if(extent=="rw") extent <- e_rw else
if(extent=="seasonal") extent <- e_seasonal
}
if(!inherits(extent,"Extent")) extent <- raster::extent(extent)
#
# actually project:
raster::projection(r) <- proj
raster::extent( r) <- extent
} # end if not null proj
#
# lat-lon projection:
proj_ll <- raster::crs("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
if(latlon) r <- raster::projectRaster(r, crs=proj_ll)
# invisible output:
return(invisible(r))
}
| /R/readDWD.R | no_license | aadler/rdwd | R | false | false | 34,353 | r | # read dwd data ----
#' Process data from the DWD CDC FTP Server
#'
#' Read climate data that was downloaded with \code{\link{dataDWD}}.
#' The data is unzipped and subsequently, the file is read, processed and
#' returned as a data.frame.\cr
#' New users are advised to set \code{varnames=TRUE} to obtain more informative
#' column names.\cr\cr
#' \code{readDWD} will call internal (but documented) functions depending on the
#' arguments \code{meta, binary, raster, multia, asc}:\cr
#' to read observational data: \code{\link{readDWD.data},
#' \link{readDWD.meta}, \link{readDWD.multia}}\cr
#' to read interpolated gridded data: \code{\link{readDWD.binary},
#' \link{readDWD.raster}, \link{readDWD.asc}}\cr
#' Not all arguments to \code{readDWD} are used for all functions, e.g.
#' \code{fread} is used only by \code{.data}, while \code{dividebyten}
#' is used in \code{.raster} and \code{.asc}.\cr\cr
#' \code{file} can be a vector with several filenames. Most other arguments can
#' also be a vector and will be recycled to the length of \code{file}.
#'
#' @return Invisible data.frame of the desired dataset,
#' or a named list of data.frames if length(file) > 1.
#' \code{\link{readDWD.binary}} returns a vector,
#' \code{\link{readDWD.raster}} and \code{\link{readDWD.asc}}
#' return raster objects instead of data.frames.
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Jul-Oct 2016, Winter 2018/19
#' @seealso \code{\link{dataDWD}}, \code{\link{readVars}},
#' \code{\link{readMeta}}, \code{\link{selectDWD}}
#' @keywords file chron
#' @importFrom utils read.table unzip read.fwf untar write.table
#' @importFrom berryFunctions checkFile na9 traceCall l2df owa
#' @importFrom pbapply pblapply
#' @importFrom tools file_path_sans_ext
#' @export
#' @examples
#' # see dataDWD
#'
#' @param file Char (vector): name(s) of the file(s) downloaded with
#' \code{\link{dataDWD}},
#' e.g. "~/DWDdata/tageswerte_KL_02575_akt.zip" or
#' "~/DWDdata/RR_Stundenwerte_Beschreibung_Stationen.txt"
#' @param progbar Logical: present a progress bar with estimated remaining time?
#' If missing and length(file)==1, progbar is internally set to FALSE.
#' DEFAULT: TRUE
#' @param fread Logical (vector): read fast? See \code{\link{readDWD.data}}.
#' DEFAULT: FALSE (some users complain it doesn't work on their PC)
#' @param varnames Logical (vector): Expand column names?
#' See \code{\link{readDWD.data}}. DEFAULT: FALSE
#' @param format,tz Format and time zone of time stamps, see \code{\link{readDWD.data}}
#' @param dividebyten Logical (vector): Divide the values in raster files by ten?
#' Used in \code{\link{readDWD.raster}} and \code{\link{readDWD.asc}}.
#' DEFAULT: TRUE
#' @param meta Logical (vector): is the \code{file} a meta file (Beschreibung.txt)?
#' See \code{\link{readDWD.meta}}.
#' DEFAULT: TRUE for each file ending in ".txt"
#' @param multia Logical (vector): is the \code{file} a multi_annual file?
#' Overrides \code{meta}, so set to FALSE manually if
#' \code{\link{readDWD.meta}} needs to be called on a file ending
#' with "Standort.txt". See \code{\link{readDWD.multia}}.
#' DEFAULT: TRUE for each file ending in "Standort.txt"
#' @param binary Logical (vector): does the \code{file} contain binary files?
#' See \code{\link{readDWD.binary}}.
#' DEFAULT: TRUE for each file ending in ".tar.gz"
#' @param raster Logical (vector): does the \code{file} contain a raster file?
#' See \code{\link{readDWD.raster}}.
#' DEFAULT: TRUE for each file ending in ".asc.gz"
#' @param asc Logical (vector): does the \code{file} contain asc files?
#' See \code{\link{readDWD.asc}}.
#' DEFAULT: TRUE for each file ending in ".tar"
#' @param \dots Further arguments passed to the internal \code{readDWD.*}
#' functions and from those to the underlying reading functions
#' documented in each internal function.
#'
readDWD <- function(
file,
progbar=TRUE,
fread=FALSE,
varnames=FALSE,
format=NA,
tz="GMT",
dividebyten=TRUE,
meta= grepl( '.txt$', file),
multia=grepl('Standort.txt$', file),
binary=grepl( '.tar.gz$', file),
raster=grepl( '.asc.gz$', file),
asc= grepl( '.tar$', file),
...
)
{
# recycle arguments:
len <- length(file)
if(missing(progbar) & len==1 & all(!binary) & all(!asc)) progbar <- FALSE
if(anyNA(fread)) fread[is.na(fread)] <- requireNamespace("data.table",quietly=TRUE)
if(len>1)
{
fread <- rep(fread, length.out=len)
varnames <- rep(varnames, length.out=len)
format <- rep(format, length.out=len)
tz <- rep(tz, length.out=len)
dividebyten <- rep(dividebyten, length.out=len)
meta <- rep(meta, length.out=len)
multia <- rep(multia, length.out=len)
binary <- rep(binary, length.out=len)
raster <- rep(raster, length.out=len)
asc <- rep(asc, length.out=len)
}
meta[multia] <- FALSE
# Optional progress bar:
if(progbar) lapply <- pbapply::pblapply
# check package availability:
if(any(fread)) if(!requireNamespace("data.table", quietly=TRUE))
stop("in rdwd::readDWD: to use fread=TRUE, please first install data.table:",
" install.packages('data.table')", call.=FALSE)
#
checkFile(file)
# Handle German Umlaute:
if(any(meta)) # faster to change locale once here, instead of in each readDWD.meta call
{
lct <- Sys.getlocale("LC_CTYPE")
on.exit(Sys.setlocale(category="LC_CTYPE", locale=lct), add=TRUE)
if(!grepl(pattern="german", lct, ignore.case=TRUE))
{
lctry <- c("German","de_DE","de_DE.UTF-8","de_DE.utf8","de")
for(lc in lctry) if(suppressWarnings(Sys.setlocale("LC_CTYPE", lc))!="") break
}
}
#
if(progbar) message("Reading ", length(file), " file", if(length(file)>1)"s", "...")
#
# loop over each filename
output <- lapply(seq_along(file), function(i)
{
# if meta/binary/raster/multia:
if(meta[i]) return(readDWD.meta( file[i], ...))
if(binary[i]) return(readDWD.binary(file[i], progbar=progbar, ...))
if(raster[i]) return(readDWD.raster(file[i], dividebyten=dividebyten[i], ...))
if(multia[i]) return(readDWD.multia(file[i], ...))
if(asc[i]) return(readDWD.asc( file[i], progbar=progbar, dividebyten=dividebyten[i], ...))
# if data:
readDWD.data(file[i], fread=fread[i], varnames=varnames[i],
format=format[i], tz=tz[i], ...)
}) # lapply loop end
#
names(output) <- tools::file_path_sans_ext(basename(file))
output <- if(length(file)==1) output[[1]] else output
return(invisible(output))
}
# read observational data ----
# ~ data ----
#' @title read regular dwd data
#' @description Read regular dwd data.
#' Intended to be called via \code{\link{readDWD}}.
#' @return data.frame
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}
#' @seealso \code{\link{readDWD}}, Examples in \code{\link{dataDWD}}
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/daily_kl_recent_tageswerte_KL_03987_akt.zip
#' @param fread Logical: read faster with \code{data.table::\link[data.table]{fread}}?
#' When reading many large historical files, speedup is significant.
#' NA can also be used, which means TRUE if data.table is available.
#' DEFAULT: FALSE
#' @param varnames Logical (vector): add a short description to the DWD variable
#' abbreviations in the column names?
#' E.g. change \code{FX,TNK} to \code{FX.Windspitze,TNK.Lufttemperatur_Min},
#' see \code{\link{newColumnNames}}.
#' DEFAULT: FALSE (for backwards compatibility)
#' @param format Char (vector): Format passed to
#' \code{\link{as.POSIXct}} (see \code{\link{strptime}})
#' to convert the date/time column to POSIX time format.\cr
#' If NULL, no conversion is performed (date stays a factor).
#' If NA, \code{readDWD} tries to find a suitable format based
#' on the number of characters. DEFAULT: NA
#' @param tz Char (vector): time zone for \code{\link{as.POSIXct}}.
#' "" is the current time zone, and "GMT" is UTC (Universal Time,
#' Coordinated). DEFAULT: "GMT"
#' @param \dots Further arguments passed to \code{\link{read.table}} or
#' \code{data.table::\link[data.table]{fread}}
readDWD.data <- function(file, fread=FALSE, varnames=FALSE, format=NA, tz="GMT", ...)
{
if(fread)
{
# http://dsnotes.com/post/2017-01-27-lessons-learned-from-outbrain-click-prediction-kaggle-competition/
fp <- unzip(file, list=TRUE) # file produkt*, the actual datafile
fp <- fp$Name[grepl("produkt",fp$Name)]
dat <- data.table::fread(cmd=paste("unzip -p", file, fp), na.strings=na9(nspace=0),
header=TRUE, sep=";", stringsAsFactors=TRUE, data.table=FALSE, ...)
} else
{
# temporary unzipping directory
fn <- tools::file_path_sans_ext(basename(file))
exdir <- paste0(tempdir(),"/", fn)
unzip(file, exdir=exdir)
on.exit(unlink(exdir, recursive=TRUE), add=TRUE)
# Read the actual data file:
f <- dir(exdir, pattern="produkt*", full.names=TRUE)
if(length(f)!=1) stop("There should be a single 'produkt*' file, but there are ",
length(f), " in\n ", file, "\n Consider re-downloading (with force=TRUE).")
dat <- read.table(f, na.strings=na9(), header=TRUE, sep=";", as.is=FALSE, ...)
} # end if(!fread)
#
if(varnames) dat <- newColumnNames(dat)
# return if file is empty, e.g. for daily/more_precip/hist_05988 2019-05-16:
if(nrow(dat)==0)
{
warning("File contains no rows: ", file)
return(dat)
}
# process time-stamp: http://stackoverflow.com/a/13022441
if(!is.null(format))
{
# for res=monthly data:
if("MESS_DATUM_BEGINN" %in% colnames(dat))
dat <- cbind(dat[,1, drop=FALSE], MESS_DATUM=dat$MESS_DATUM_BEGINN + 14, dat[,-1])
if(!"MESS_DATUM" %in% colnames(dat))
warning("There is no column 'MESS_DATUM' in ",file, call.=FALSE) else
{
nch <- nchar(as.character(dat$MESS_DATUM[1]))
if(is.na(format)) format <- if(nch== 8) "%Y%m%d" else
if(nch==13) "%Y%m%d%H:%M" else"%Y%m%d%H"
dat$MESS_DATUM <- as.POSIXct(as.character(dat$MESS_DATUM), format=format, tz=tz)
}
}
# final output:
return(dat)
}
# ~ meta ----
#' @title read dwd metadata (Beschreibung*.txt files)
#' @description read dwd metadata (Beschreibung*.txt files).
#' Intended to be called via \code{\link{readDWD}}.\cr
#' Column widths for \code{\link{read.fwf}} are computed internally.\cr
#' if(any(meta)), \code{\link{readDWD}} tries to set the locale to German
#' (to handle Umlaute correctly). It is hence not recommended to call
#' \code{rdwd:::readDWD.meta} directly on a file!\cr
#' Names can later be changed to ascii with
#' \code{berryFunctions::\link{convertUmlaut}}.
#' @return data.frame
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}
#' @seealso \code{\link{readDWD}}
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' link <- selectDWD(res="daily", var="kl", per="r", meta=TRUE)
#' if(length(link)!=1) stop("length of link should be 1, but is ", length(link),
#' ":\n", berryFunctions::truncMessage(link,prefix="",sep="\n"))
#'
#' file <- dataDWD(link, dir=localtestdir(), read=FALSE)
#' meta <- readDWD(file)
#' head(meta)
#'
#' cnm <- colnames(meta)
#' if(length(cnm)!=8) stop("number of columns should be 8, but is ", length(cnm),
#' ":\n", toString(cnm))
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/daily_kl_recent_KL_Tageswerte_Beschreibung_Stationen.txt
#' @param \dots Further arguments passed to \code{\link{read.fwf}}
readDWD.meta <- function(file, ...)
{
# read one line to get column widths and names
oneline <- readLines(file, n=3, encoding="latin1")
# column widths (automatic detection across different styles used by the DWD)
spaces <- unlist(gregexpr(" ", oneline[3]))
breaks <- spaces[which(diff(spaces)!=1)]
if(substr(oneline[3],1,1)==" ") breaks <- breaks[-1]
breaks[3] <- breaks[3] -9 # right-adjusted column
breaks[4:5] <- breaks[4:5] -1 # right-adjusted columns
widths <- diff(c(0,breaks,200))
sdsf <- grepl("subdaily_standard_format", file)
if(sdsf) widths <- c(6,6,9,10,10,10,10,26,200)
# actually read metadata, suppress readLines warning about EOL:
stats <- suppressWarnings(read.fwf(file, widths=widths, skip=2, strip.white=TRUE,
fileEncoding="latin1", ...) )
# column names:
# remove duplicate spaces (2018-03 only in subdaily_stand...Beschreibung....txt)
while( grepl(" ",oneline[1]) ) oneline[1] <- gsub(" ", " ", oneline[1])
colnames(stats) <- strsplit(oneline[1], " ")[[1]]
if(sdsf)
{
stats <- stats[ ! stats[,1] %in% c("","ST_KE","-----") , ]
tf <- tempfile()
write.table(stats[,-1], file=tf, quote=FALSE, sep="\t")
stats <- read.table(tf, sep="\t")
colnames(stats) <- c("Stations_id", "von_datum", "bis_datum", "Stationshoehe",
"geoBreite", "geoLaenge", "Stationsname", "Bundesland")
}
# check classes:
classes <- c("integer", "integer", "integer", "integer", "numeric", "numeric", "factor", "factor")
actual <- sapply(stats, class)
if(actual[4]=="numeric") classes[4] <- "numeric"
if(!all(actual == classes))
{
msg <- paste0(names(actual)[actual!=classes], ": ", actual[actual!=classes],
" instead of ", classes[actual!=classes], ".")
msg <- paste(msg, collapse=" ")
warning(traceCall(3, "", ": "), "reading file '", file,
"' did not give the correct column classes. ", msg, call.=FALSE)
}
# return meta data.frame:
stats
}
# ~ multia ----
#' @title read multi_annual dwd data
#' @description read multi_annual dwd data.
#' Intended to be called via \code{\link{readDWD}}.\cr
#' All other observational data at \code{\link{dwdbase}} can be read
#' with \code{\link{readDWD.data}}, except for the multi_annual data.
#' @return data.frame
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Feb 2019
#' @seealso \code{\link{readDWD}}
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' # Temperature aggregates (2019-04 the 9th file):
#' durl <- selectDWD(res="multi_annual", var="mean_81-10", per="")[9]
#' murl <- selectDWD(res="multi_annual", var="mean_81-10", per="", meta=TRUE)[9]
#'
#' ma_temp <- dataDWD(durl, dir=localtestdir())
#' ma_meta <- dataDWD(murl, dir=localtestdir())
#'
#' head(ma_temp)
#' head(ma_meta)
#'
#' ma <- merge(ma_meta, ma_temp, all=TRUE)
#' berryFunctions::linReg(ma$Stationshoehe, ma$Jahr)
#' op <- par(mfrow=c(3,4), mar=c(0.1,2,2,0), mgp=c(3,0.6,0))
#' for(m in colnames(ma)[8:19])
#' {
#' berryFunctions::linReg(ma$Stationshoehe, ma[,m], xaxt="n", xlab="", ylab="", main=m)
#' abline(h=0)
#' }
#' par(op)
#'
#' par(bg=8)
#' berryFunctions::colPoints(ma$geogr..Laenge, ma$geogr..Breite, ma$Jahr, add=F, asp=1.4)
#'
#' data("DEU")
#' pdf("MultiAnn.pdf", width=8, height=10)
#' par(bg=8)
#' for(m in colnames(ma)[8:19])
#' {
#' raster::plot(DEU, border="darkgrey")
#' berryFunctions::colPoints(ma[-262,]$geogr..Laenge, ma[-262,]$geogr..Breite, ma[-262,m],
#' asp=1.4, # Range=range(ma[-262,8:19]),
#' col=berryFunctions::divPal(200, rev=TRUE), zlab=m, add=T)
#' }
#' dev.off()
#' berryFunctions::openFile("MultiAnn.pdf")
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/multi_annual_mean_81-10_Temperatur_1981-2010_aktStandort.txt or
#' DWDdata/multi_annual_mean_81-10_Temperatur_1981-2010_Stationsliste_aktStandort.txt
#' @param fileEncoding \link{read.table} \link{file} encoding.
#' DEFAULT: "latin1" (needed on Linux, optional but not hurting on windows)
#' @param comment.char \link{read.table} comment character.
#' DEFAULT: "\\032" (needed 2019-04 to ignore the binary
#' control character at the end of multi_annual files)
#' @param \dots Further arguments passed to \code{\link{read.table}}
readDWD.multia <- function(file, fileEncoding="latin1", comment.char="\032", ...)
{
out <- read.table(file, sep=";", header=TRUE, fileEncoding=fileEncoding,
comment.char=comment.char, ...)
nc <- ncol(out)
# presumably, all files have a trailing empty column...
if(colnames(out)[nc]=="X") out <- out[,-nc]
out
}
# read gridded data ----
# ~ binary ----
#' @title read dwd gridded radolan binary data
#' @description read gridded radolan binary data.
#' Intended to be called via \code{\link{readDWD}}.\cr
#' @return list depending on argument \code{toraster}, see there for details
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Dec 2018.
#' Significant input for the underlying \code{\link{readRadarFile}} came
#' from Henning Rust & Christoph Ritschel at FU Berlin.
#' @seealso \code{\link{readDWD}}\cr
#' \url{https://wradlib.org} for much more extensive radar analysis in Python\cr
#' Kompositformatbeschreibung at \url{https://www.dwd.de/DE/leistungen/radolan/radolan.html}
#' for format description
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' # SF file as example: ----
#'
#' SF_link <- "/daily/radolan/historical/bin/2017/SF201712.tar.gz"
#' SF_file <- dataDWD(file=SF_link, base=gridbase, joinbf=TRUE, # 204 MB
#' dir=localtestdir(), read=FALSE)
#' # exdir radardir set to speed up my tests:
#' SF_exdir <- "C:/Users/berry/Desktop/DWDbinarySF"
#' if(!file.exists(SF_exdir)) SF_exdir <- tempdir()
#' # no need to read all 24*31=744 files, so setting selection:
#' SF_rad <- readDWD(SF_file, selection=1:10, exdir=SF_exdir) #with toraster=TRUE
#' if(length(SF_rad)!=2) stop("length(SF_rad) should be 2, but is ", length(SF_rad))
#'
#' SF_radp <- projectRasterDWD(SF_rad$data)
#' raster::plot(SF_radp[[1]], main=SF_rad$meta$date[1])
#' data(DEU)
#' raster::plot(DEU, add=TRUE)
#'
#'
#' # RW file as example: ----
#'
#' RW_link <- "hourly/radolan/reproc/2017_002/bin/2017/RW2017.002_201712.tar.gz"
#' RW_file <- dataDWD(file=RW_link, base=gridbase, joinbf=TRUE, # 25 MB
#' dir=localtestdir(), read=FALSE)
#' RW_exdir <- "C:/Users/berry/Desktop/DWDbinaryRW"
#' if(!file.exists(RW_exdir)) RW_exdir <- tempdir()
#' RW_rad <- readDWD(RW_file, selection=1:10, exdir=RW_exdir)
#' RW_radp <- projectRasterDWD(RW_rad$data, extent="rw")
#' raster::plot(RW_radp[[1]], main=RW_rad$meta$date[1])
#' raster::plot(DEU, add=TRUE)
#'
#' # ToDo: why are values + patterns not the same?
#'
#' # list of all Files: ----
#' data(gridIndex)
#' head(grep("historical", gridIndex, value=TRUE))
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/daily_radolan_historical_bin_2017_SF201712.tar.gz
#' @param exdir Directory to unzip into. If existing, only the needed files
#' will be unpacked with \code{\link{untar}}. Note that exdir
#' size will be around 1.1 GB. exdir can contain other files,
#' these will be ignored for the actual reading with
#' \code{\link{readRadarFile}} (function not exported, but documented).
#' DEFAULT exdir: sub(".tar.gz$", "", file)
#' @param toraster Logical: convert output (list of matrixes + meta informations)
#' to a list with data (\code{raster \link[raster]{stack}}) +
#' meta (list from the first subfile, but with vector of dates)?
#' DEFAULT: TRUE
#' @param progbar Show messages and progress bars? \code{\link{readDWD}} will
#' keep progbar=TRUE for binary files, even if length(file)==1.
#' DEFAULT: TRUE
#' @param selection Optionally read only a subset of the ~24*31=744 files.
#' Called as \code{f[selection]}. DEFAULT: NULL (ignored)
#' @param \dots Further arguments passed to \code{\link{readRadarFile}},
#' i.e. \code{na} and \code{clutter}
readDWD.binary <- function(file, exdir=sub(".tar.gz$", "", file),
toraster=TRUE, progbar=TRUE, selection=NULL, ...)
{
pmessage <- function(...) if(progbar) message(...)
# Untar as needed:
pmessage("\nChecking which files need to be untarred to ", exdir, "...")
lf <- untar(file, list=TRUE)
tountar <- !lf %in% dir(exdir)
if(any(tountar))
{
pmessage("Unpacking ",sum(tountar), " of ",length(lf), " files in ",file,"...")
untar(file, files=lf[tountar], exdir=exdir)
} else
pmessage("All files were already untarred.")
#
# hourly files:
f <- dir(exdir, full.names=TRUE) # 31*24 = 744 files (daily/hist/2017-12)
# read only the ones from file, not other stuff at exdir:
f <- f[basename(f) %in% lf]
if(!is.null(selection)) f <- f[selection]
#
pmessage("Reading ",length(f)," binary files...")
if(progbar) lapply <- pbapply::pblapply
# Read the actual binary file:
rb <- lapply(f, readRadarFile, ...)
# list element names (time stamp):
time <- sapply(rb, function(x) as.character(x$meta$date))
names(rb) <- time
if(!toraster) return(invisible(rb))
# else if toraster:
if(!requireNamespace("raster", quietly=TRUE))
stop("To use rdwd:::readDWD.binary with toraster=TRUE, please first install raster:",
" install.packages('raster')", call.=FALSE)
pmessage("Converting to raster stack....")
rbmat <- base::lapply(rb,"[[",1)
rbmat <- base::lapply(rbmat, raster::raster)
rbmat <- raster::stack(rbmat)
# rbmeta <- base::lapply(rb,"[[",2)
# rbmeta <- base::lapply(rbmeta, function(x){x$radars <- toString(x$radars);
# x$radarn <- toString(x$radarn);
# x$dim <- toString(x$dim) ; x})
# mnames <- names(rbmeta[[1]])[-(1:2)] # filename and date will differ
# sapply(mnames, function(mn) length(unique(sapply(rbmeta, "[[", mn)))) # all equal
rbmeta <- rb[[1]]$meta
rbmeta$filename <- file
rbmeta$date <- as.POSIXct(time)
return(invisible(list(data=rbmat, meta=rbmeta)))
}
# ~ raster ----
#' @title read dwd gridded raster data
#' @description Read gridded raster data.
#' Intended to be called via \code{\link{readDWD}}.\cr
#' Note that \code{R.utils} must be installed to unzip the .asc.gz files.
#' @return \code{raster::\link[raster]{raster}} object
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, Dec 2018
#' @seealso \code{\link{readDWD}}
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' rasterbase <- paste0(gridbase,"/seasonal/air_temperature_mean")
#' ftp.files <- indexFTP("/16_DJF", base=rasterbase, dir=tempdir())
#' localfiles <- dataDWD(ftp.files[1:2], base=rasterbase, joinbf=TRUE,
#' dir=localtestdir(), read=FALSE)
#' rf <- readDWD(localfiles[1])
#' rf <- readDWD(localfiles[1]) # runs faster at second time due to skip=TRUE
#' raster::plot(rf)
#'
#' rfp <- projectRasterDWD(rf, proj="seasonal", extent=rf@extent)
#' raster::plot(rfp)
#' data(DEU)
#' raster::plot(DEU, add=TRUE)
#'
#' testthat::expect_equal(raster::cellStats(rf, range), c(-8.2,4.4))
#' rf10 <- readDWD(localfiles[1], dividebyten=FALSE)
#' raster::plot(rf10)
#' testthat::expect_equal(raster::cellStats(rf10, range), c(-82,44))
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/grids_germany/seasonal/air_temperature_mean/
#' 16_DJF_grids_germany_seasonal_air_temp_mean_188216.asc.gz
#' @param gargs Named list of arguments passed to
#' \code{R.utils::\link[R.utils]{gunzip}}. The internal
#' defaults are: \code{remove=FALSE} (recommended to keep this
#' so \code{file} does not get deleted) and \code{skip=TRUE}
#' (which reads previously unzipped files as is).
#' If \code{file} has changed, you might want to use
#' \code{gargs=list(skip=FALSE, overwrite=TRUE)}
#' or alternatively \code{gargs=list(temporary=TRUE)}.
#' The \code{gunzip} default \code{destname} means that the
#' unzipped file is stored at the same path as \code{file}.
#' DEFAULT gargs: NULL
#' @param dividebyten Logical: Divide the numerical values by 10?
#' DEFAULT: TRUE
#' @param \dots Further arguments passed to \code{raster::\link[raster]{raster}}
readDWD.raster <- function(file, gargs=NULL, dividebyten, ...)
{
if(!requireNamespace("R.utils", quietly=TRUE))
stop("To use rdwd:::readDWD.raster, please first install R.utils:",
" install.packages('R.utils')", call.=FALSE)
if(!requireNamespace("raster", quietly=TRUE))
stop("To use rdwd:::readDWD.raster, please first install raster:",
" install.packages('raster')", call.=FALSE)
#https://stackoverflow.com/questions/5227444/recursively-ftp-download-then-extract-gz-files
# gunzip arguments:
gdef <- list(filename=file, remove=FALSE, skip=TRUE)
gfinal <- berryFunctions::owa(gdef, gargs, "filename")
rdata <- do.call(R.utils::gunzip, gfinal)
# raster reading:
r <- raster::raster(rdata, ...)
if(dividebyten) r <- r/10
return(invisible(r))
}
# ~ asc ----
#' @title read dwd gridded radolan asc data
#' @description read grid-interpolated radolan asc data.
#' Intended to be called via \code{\link{readDWD}}.\cr
#' See \url{ftp://ftp-cdc.dwd.de/pub/CDC/grids_germany/hourly/radolan/README.txt}
#' All layers (following \code{selection} if given) in all .tar.gz files are
#' combined into a raster stack with \code{raster::\link[raster]{stack}}.\cr
#' To project the data, use \code{\link{projectRasterDWD}}
#' @return data.frame
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, April 2019
#' @seealso \code{\link{readDWD}}
# @importFrom raster raster stack crs projection extent plot
#' @examples
#' \dontrun{ # Excluded from CRAN checks, but run in localtests
#'
#' # File selection and download:
#' datadir <- localtestdir()
#' # 2019-05-18, hourly radolan files not yet copied to new ftp, hence:
#' gridbase <- "ftp://ftp-cdc.dwd.de/pub/CDC/grids_germany"
#' radbase <- paste0(gridbase,"/hourly/radolan/historical/asc/")
#' radfile <- "2018/RW-201809.tar" # 25 MB to download
#' file <- dataDWD(radfile, base=radbase, joinbf=TRUE, dir=datadir,
#' dfargs=list(mode="wb"), read=FALSE) # download with mode=wb!!!
#'
#' #asc <- readDWD(file) # 4 GB in mem. ~ 20 secs unzip, 30 secs read, 10 min divide
#' asc <- readDWD(file, selection=1:20, dividebyten=TRUE)
#' asc <- projectRasterDWD(asc)
#'
#' raster::plot(asc[[1]], main=names(asc)[1])
#' data(DEU)
#' raster::plot(DEU, add=TRUE)
#'
#' rng <- range(raster::cellStats(asc, "range"))
#' nframes <- 3 # raster::nlayers(asc) for all (time intensive!)
#' viddir <- paste0(tempdir(),"/RadolanVideo")
#' dir.create(viddir)
#' png(paste0(viddir,"/Radolan_%03d.png"), width=7, height=5, units="in", res=300)
#' dummy <- pbsapply(1:nframes, function(i)
#' raster::plot(asc[[i]], main=names(asc)[i], zlim=rng)) # 3 secs per layer
#' dev.off()
#' berryFunctions::openFile(paste0(viddir,"/Radolan_001.png"))
#'
#' # Time series of a given point in space:
#' plot(as.vector(asc[800,800,]), type="l", xlab="Time [hours]")
#'
#' # if dividebyten=FALSE, raster stores things out of memory in the exdir.
#' # by default, this is in tempdir, hence you would need to save asc manually:
#' # raster::writeRaster(asc, paste0(datadir,"/RW2018-09"), overwrite=TRUE)
#' }
#' @param file Name of file on harddrive, like e.g.
#' DWDdata/grids_germany/hourly/radolan/historical/asc/
#' 2018_RW-201809.tar.
#' Must have been downloaded with \code{mode="wb"}!
#' @param exdir Directory to unzip into. Unpacked files existing therein
#' will not be untarred again, saving up to 15 secs per file.
#' DEFAULT: NULL (subfolder of \code{\link{tempdir}()})
#' @param dividebyten Divide numerical values by 10?
#' If dividebyten=FALSE and exdir left at NULL (tempdir), save
#' the result on disc with \code{raster::\link[raster]{writeRaster}}.
#' Accessing out-of-memory raster objects won't work if
#' exdir is removed! -> Error in .local(.Object, ...)
#' DEFAULT: TRUE
#' @param progbar Show messages and progress bars? \code{\link{readDWD}} will
#' keep progbar=TRUE for asc files, even if length(file)==1.
#' DEFAULT: TRUE
#' @param selection Optionally read only a subset of the ~24*31=744 files.
#' Called as \code{f[selection]}. DEFAULT: NULL (ignored)
#' @param \dots Further arguments passed to \code{raster::\link[raster]{raster}}
readDWD.asc <- function(file, exdir=NULL, dividebyten=TRUE,
selection=NULL, progbar=TRUE, ...)
{
if(!requireNamespace("raster", quietly=TRUE))
stop("To use rdwd:::readDWD.asc, please first install raster:",
" install.packages('raster')", call.=FALSE)
if(progbar) lapply <- pbapply::pblapply
# prepare to untar data (two layers):
fn <- tools::file_path_sans_ext(basename(file))
if(is.null(exdir)) exdir <- paste0(tempdir(),"/", fn)
#
# untar layer 1:
daydir <- paste0(exdir,"/dayfiles")
untar(file, exdir=daydir) # 30/31 .tar.gz files (one for each day). overwrites existing files
dayfiles <- dir(daydir, full.names=TRUE)
#
# untar layer 2:
if(progbar) message("\nChecking if already unpacked: ", file, "...")
to_untar <- lapply(dayfiles, untar, list=TRUE)
untarred <- dir(exdir, pattern=".asc$")
to_untar <- !sapply(to_untar, function(x) all(x %in% untarred))
if(any(to_untar)){
if(progbar) message("Unpacking tar files into ",exdir,"...")
lapply(dayfiles[to_untar], untar, exdir=exdir)
} else if(progbar) message("Tar file was already unpacked into ",exdir," :)")
# yields 31 * 24 .asc files each 1.7MB, takes ~20 secs
#
#
# read data (hourly files):
f <- dir(exdir, pattern=".asc$", full.names=TRUE) # 720 files
if(!is.null(selection)) f <- f[selection]
if(progbar) message("Reading ",length(f)," files...")
dat <- lapply(f, raster::raster, ...)
#
# divide by ten (takes ~9 min!)
if(progbar & dividebyten) message("Dividing values by ten...")
if(dividebyten) dat <- lapply(dat, function(x) x/10)
#
# stack layers:
dat <- raster::stack(dat)
#
# output:
return(invisible(dat))
}
# helper functionality ----
#' @title project DWD raster data
#' @description Set projection and extent for DWD raster data. Optionally (and
#' per default) also reprojects to latlon data.
#' The internal defaults are extracted from the
#' Kompositformatbeschreibung at \url{https://www.dwd.de/DE/leistungen/radolan/radolan.html},
#' as provided 2019-04 by Antonia Hengst.
#' @return Raster object with projection and extent, invisible
#' @author Berry Boessenkool, \email{berry-b@@gmx.de}, May 2019
#' @seealso \code{raster::\link[raster]{crs}},
#' \code{raster::\link[raster]{projection}},
#' \code{raster::\link[raster]{extent}},
#' \code{raster::\link[raster]{projectRaster}},
#' \code{\link{readDWD.binary}, \link{readDWD.raster}, \link{readDWD.asc}}
#' @keywords aplot
#' @export
#' @examples
#' # To be used after readDWD.binary, readDWD.raster, readDWD.asc
#' @param r Raster object
#' @param proj Desired projection. Can be a \code{raster::\link[raster]{crs}} output,
#' a projection character string (will be passed to \code{crs}),
#' "radolan" or "seasonal" with internal defaults defined per DWD standard,
#' or NULL to not set proj+extent but still consider \code{latlon}.
#' DEFAULT: "radolan"
#' @param extent Desired \code{\link[raster]{extent}}. Can be an extent object,
#' a vector with 4 numbers, or "radolan" / "rw" / "seasonal"
#' with internal defaults.
#' DEFAULT: "radolan"
#' @param latlon Logical: reproject \code{r} to lat-lon crs? DEFAULT: TRUE
#'
projectRasterDWD <- function(r, proj="radolan", extent="radolan", latlon=TRUE)
{
# package check
if(!requireNamespace("raster", quietly=TRUE))
stop("To use rdwd::projectRasterDWD, please first install raster:",
" install.packages('raster')", call.=FALSE)
#
if(!is.null(proj))
{
# Default projection and extent:
# Projection as per Kompositbeschreibung 1.5
p_radolan <- "+proj=stere +lat_0=90 +lat_ts=90 +lon_0=10 +k=0.93301270189
+x_0=0 +y_0=0 +a=6370040 +b=6370040 +to_meter=1000 +no_defs"
# ftp://opendata.dwd.de/climate_environment/CDC/grids_germany/seasonal/air_temperature_max/
# BESCHREIBUNG_gridsgermany_seasonal_air_temperature_max_de.pdf
p_seasonal <- "+proj=tmerc +lat_0=0 +lon_0=9 +k=1 +x_0=3500000 +y_0=0
+ellps=bessel +datum=potsdam +units=m +no_defs"
#
if(is.character(proj))
{
if(proj=="radolan") proj <- p_radolan else
if(proj=="seasonal") proj <- p_seasonal
}
if(!inherits(proj, "CRS")) proj <- raster::crs(proj)
#
# Extent as per Kompositbeschreibung 1.4 / seasonal DESCRIPTION pdf:
e_radolan <- c(-523.4622,376.5378,-4658.645,-3758.645)
e_rw <- c(-443.4622,456.5378,-4758.645,-3658.645) # 1.2, Abb 3
# e_radolan <- c(-673.4656656,726.5343344,-5008.642536,-3508.642536) # ME
e_seasonal <- c(3280414.71163347, 3934414.71163347, 5237500.62890625, 6103500.62890625)
if(is.character(extent))
{
if(extent=="radolan") extent <- e_radolan else
if(extent=="rw") extent <- e_rw else
if(extent=="seasonal") extent <- e_seasonal
}
if(!inherits(extent,"Extent")) extent <- raster::extent(extent)
#
# actually project:
raster::projection(r) <- proj
raster::extent( r) <- extent
} # end if not null proj
#
# lat-lon projection:
proj_ll <- raster::crs("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
if(latlon) r <- raster::projectRaster(r, crs=proj_ll)
# invisible output:
return(invisible(r))
}
|
#' ---
#' output: github_document
#' ---
require(dplyr)
df1 <- as.data.frame(installed.packages(),row.names = FALSE)
df2 <- df1 %>% select(Package,LibPath,Version,Priority,Built)
| /test.R | no_license | michaelblais/packages-report | R | false | false | 183 | r | #' ---
#' output: github_document
#' ---
require(dplyr)
df1 <- as.data.frame(installed.packages(),row.names = FALSE)
df2 <- df1 %>% select(Package,LibPath,Version,Priority,Built)
|
## ---- load-soc-grad
# Load datasets
# Test and Training data
load("data/Census.train.Rda")
load("data/Census.test.Rda")
# Load dataset with missingenss
load("data/SocialGrade/Census.test.tidy.miss.Rda")
# Create test.tidy and train.tidy datasets (Remove units with NCR codes for variable
# & Remove the personal identifier)
Census.train.tidy <- Census.train[!Census.train$social.grade == -9, c(-1,-17)]
Census.test.tidy <- Census.test[!Census.test$social.grade == -9, c(-1,-17)]
# Read in CANCEIS input and output
CANCEIS.test.in <- read.table("data/SocialGrade/CANCEIS/xxxUNIT01IG01.txt",
header = FALSE,
col.names = c(
"id", "social.grade", "student",
"industry", "age", "occupation",
"social.grade"
)
)[, -1]
CANCEIS.test.out <- read.table("data/SocialGrade/CANCEIS/XXXUNITIMP01IG01.txt",
header = FALSE,
col.names = c(
"id", "social.grade", "student",
"industry", "age", "occupation",
"social.grade"
)
)[, -1]
# Read in CANCEISXG input and output
CANCEISXG.test.in <- read.table("data/SocialGrade/MixedMethods/xxxUNIT01IG01.txt",
header = FALSE,
col.names = c(
"canceis.id", "social.grade", "occupation", "student",
"industry", "hours.cont", "marital.status",
"econ.act"
)
)[, -1]
CANCEISXG.test.out <- read.table("data/SocialGrade/MixedMethods/XXXUNITIMP01IG01.txt",
header = FALSE,
col.names = c(
"canceis.id", "social.grade", "occupation", "student",
"industry", "hours.cont", "marital.status",
"econ.act"
)
)[, -1]
# Load predicted values from XGBoost
load("data/SocialGrade/XGBoost/predicted.RData")
# Load model
trainSG_v1 <- xgb.load("XGBoost/xgboost.socialGrade")
## ---- eval-soc-grad
# Evaluate performance of XGBoost model
# Compare versions of the outcome variable (Actual, Predicted, Missing)
actuals <- Census.test.tidy$social.grade
missing <- Census.test.tidy.miss$social.grade
compareVar <- tibble(
Actuals = actuals, Predictions = predicted,
Missing = missing
)
compareMissing <- compareVar[compareVar$Missing == -999, ]
compareMissing$indicator <- ifelse(compareMissing$Actuals ==
compareMissing$Predictions,"Correct", "Wrong")
counts <- table(compareMissing$indicator)
barplot(counts, main = "Accuracy of predictions", xlab = "Outcome")
# Using Confusion Matrix to evaluate predictions
confusionML <- confusionMatrix(
as.factor(compareVar$Actuals),
as.factor(compareVar$Predictions)
)
qplot(Actuals, Predictions,
data = compareVar,
geom = c("jitter"), main = "predicted vs. observed in test data",
xlab = "Observed Class", ylab = "Predicted Class"
) + scale_x_discrete(limits=c("1","2","3","4")
) + scale_y_discrete(limits=c("1","2","3","4"))
ggsave("images/SGXGqplot.png")
# Evaluate performance of CANCEIS
# Compare predicted and actuals
actuals.CANCEIS <- Census.test.tidy$social.grade
missing.CANCEIS <- CANCEIS.test.in$social.grade
predicted.CANCEIS <- CANCEIS.test.out$social.grade
compare_var_CANCEIS <- tibble(
Actuals = actuals.CANCEIS, Predictions =
predicted.CANCEIS, Missing = missing.CANCEIS
)
compare_missing_CANCEIS <- compare_var_CANCEIS[
compare_var_CANCEIS$Missing == -999, ]
compare_missing_CANCEIS$indicator <- ifelse(
compare_missing_CANCEIS$Actuals ==
compare_missing_CANCEIS$Predictions,
"Correct", "Wrong"
)
counts_CANCEIS <- table(compare_missing_CANCEIS$indicator)
barplot(counts_CANCEIS, main = "Accuracy of predictions", xlab = "Outcome")
# Using Confusion Matrix to evaluate predictions
confusion_CANCEIS <- confusionMatrix(
as.factor(compare_missing_CANCEIS$Actuals),
as.factor(compare_missing_CANCEIS$Predictions)
)
qplot(Actuals, Predictions,
data = compare_missing_CANCEIS,
geom = c("jitter"), main = "predicted vs. observed in validation data",
xlab = "Observed Class", ylab = "Predicted Class"
) + scale_x_discrete(limits=c("1","2","3","4")
) + scale_y_discrete(limits=c("1","2","3","4"))
ggsave("images/SGCANCEISqplot.png")
# Evaluate performance of CANCEISXG
# Compare predicted and actuals
actuals.CANCEISXG <- Census.test.tidy$social.grade
missing.CANCEISXG <- CANCEISXG.test.in$social.grade
predicted.CANCEISXG <- CANCEISXG.test.out$social.grade
compare_var_CANCEISXG <- tibble(
Actuals = actuals.CANCEISXG, Predictions =
predicted.CANCEISXG, Missing = missing.CANCEISXG
)
compare_missing_CANCEISXG <- compare_var_CANCEISXG[
compare_var_CANCEISXG$Missing == -999, ]
compare_missing_CANCEISXG$indicator <- ifelse(
compare_missing_CANCEISXG$Actuals ==
compare_missing_CANCEISXG$Predictions,
"Correct", "Wrong"
)
counts_CANCEISXG <- table(compare_missing_CANCEISXG$indicator)
barplot(counts_CANCEISXG, main = "Accuracy of predictions", xlab = "Outcome")
# Using Confusion Matrix to evaluate predictions
confusion_CANCEISXG <- confusionMatrix(
as.factor(compare_missing_CANCEISXG$Actuals),
as.factor(compare_missing_CANCEISXG$Predictions)
)
qplot(Actuals, Predictions,
data = compare_missing_CANCEISXG,
geom = c("jitter"), main = "predicted vs. observed in validation data",
xlab = "Observed Class", ylab = "Predicted Class"
) + scale_x_discrete(limits=c("1","2","3","4")
) + scale_y_discrete(limits=c("1","2","3","4"))
ggsave("images/SGCANCEISXGqplot.png")
# Impute values using mode imputation
# Create a vector of imputable variable excluding missing values
mode.dat <- Census.test.tidy.miss[
Census.test.tidy.miss$social.grade != -999, ]
mode.val <- Mode(mode.dat$social.grade)
# Compare predicted and actuals
actuals.mode <- Census.test.tidy$social.grade
missing.mode <- Census.test.tidy.miss$social.grade
predicted.mode <- ifelse(
Census.test.tidy.miss$social.grade == -999, mode.val,
Census.test.tidy.miss$social.grade)
compare_var_mode <- tibble(
Actuals = actuals.mode, Predictions =
predicted.mode, Missing = missing.mode
)
compare_missing_mode <- compare_var_mode[
compare_var_mode$Missing == -999, ]
compare_missing_mode$indicator <- ifelse(
compare_missing_mode$Actuals ==
compare_missing_mode$Predictions,
"Correct", "Wrong"
)
counts_mode <- table(compare_missing_mode$indicator)
barplot(counts_mode, main = "Accuracy of predictions", xlab = "Outcome")
## ---- compare-soc-grad
XGBoost <- confusionML$overall[c('Accuracy','Kappa')]
CANCEIS <- confusion_CANCEIS$overall[c('Accuracy','Kappa')]
MixedMethods <- confusion_CANCEISXG$overall[c('Accuracy','Kappa')]
Mode <- c(counts_mode[['Correct']]/(counts_mode[['Correct']]+counts_mode[['Wrong']]), NA)
CompareSocGrad <- cbind(XGBoost, CANCEIS, MixedMethods, Mode)
save(CompareSocGrad, file = "data/SocialGrade/CompareSocGrad.RData")
| /R/WFZ_SocialGrade_CompareImp.R | no_license | karetsu/imputation | R | false | false | 7,361 | r | ## ---- load-soc-grad
# Load datasets
# Test and Training data
load("data/Census.train.Rda")
load("data/Census.test.Rda")
# Load dataset with missingenss
load("data/SocialGrade/Census.test.tidy.miss.Rda")
# Create test.tidy and train.tidy datasets (Remove units with NCR codes for variable
# & Remove the personal identifier)
Census.train.tidy <- Census.train[!Census.train$social.grade == -9, c(-1,-17)]
Census.test.tidy <- Census.test[!Census.test$social.grade == -9, c(-1,-17)]
# Read in CANCEIS input and output
CANCEIS.test.in <- read.table("data/SocialGrade/CANCEIS/xxxUNIT01IG01.txt",
header = FALSE,
col.names = c(
"id", "social.grade", "student",
"industry", "age", "occupation",
"social.grade"
)
)[, -1]
CANCEIS.test.out <- read.table("data/SocialGrade/CANCEIS/XXXUNITIMP01IG01.txt",
header = FALSE,
col.names = c(
"id", "social.grade", "student",
"industry", "age", "occupation",
"social.grade"
)
)[, -1]
# Read in CANCEISXG input and output
CANCEISXG.test.in <- read.table("data/SocialGrade/MixedMethods/xxxUNIT01IG01.txt",
header = FALSE,
col.names = c(
"canceis.id", "social.grade", "occupation", "student",
"industry", "hours.cont", "marital.status",
"econ.act"
)
)[, -1]
CANCEISXG.test.out <- read.table("data/SocialGrade/MixedMethods/XXXUNITIMP01IG01.txt",
header = FALSE,
col.names = c(
"canceis.id", "social.grade", "occupation", "student",
"industry", "hours.cont", "marital.status",
"econ.act"
)
)[, -1]
# Load predicted values from XGBoost
load("data/SocialGrade/XGBoost/predicted.RData")
# Load model
trainSG_v1 <- xgb.load("XGBoost/xgboost.socialGrade")
## ---- eval-soc-grad
# Evaluate performance of XGBoost model
# Compare versions of the outcome variable (Actual, Predicted, Missing)
actuals <- Census.test.tidy$social.grade
missing <- Census.test.tidy.miss$social.grade
compareVar <- tibble(
Actuals = actuals, Predictions = predicted,
Missing = missing
)
compareMissing <- compareVar[compareVar$Missing == -999, ]
compareMissing$indicator <- ifelse(compareMissing$Actuals ==
compareMissing$Predictions,"Correct", "Wrong")
counts <- table(compareMissing$indicator)
barplot(counts, main = "Accuracy of predictions", xlab = "Outcome")
# Using Confusion Matrix to evaluate predictions
confusionML <- confusionMatrix(
as.factor(compareVar$Actuals),
as.factor(compareVar$Predictions)
)
qplot(Actuals, Predictions,
data = compareVar,
geom = c("jitter"), main = "predicted vs. observed in test data",
xlab = "Observed Class", ylab = "Predicted Class"
) + scale_x_discrete(limits=c("1","2","3","4")
) + scale_y_discrete(limits=c("1","2","3","4"))
ggsave("images/SGXGqplot.png")
# Evaluate performance of CANCEIS
# Compare predicted and actuals
actuals.CANCEIS <- Census.test.tidy$social.grade
missing.CANCEIS <- CANCEIS.test.in$social.grade
predicted.CANCEIS <- CANCEIS.test.out$social.grade
compare_var_CANCEIS <- tibble(
Actuals = actuals.CANCEIS, Predictions =
predicted.CANCEIS, Missing = missing.CANCEIS
)
compare_missing_CANCEIS <- compare_var_CANCEIS[
compare_var_CANCEIS$Missing == -999, ]
compare_missing_CANCEIS$indicator <- ifelse(
compare_missing_CANCEIS$Actuals ==
compare_missing_CANCEIS$Predictions,
"Correct", "Wrong"
)
counts_CANCEIS <- table(compare_missing_CANCEIS$indicator)
barplot(counts_CANCEIS, main = "Accuracy of predictions", xlab = "Outcome")
# Using Confusion Matrix to evaluate predictions
confusion_CANCEIS <- confusionMatrix(
as.factor(compare_missing_CANCEIS$Actuals),
as.factor(compare_missing_CANCEIS$Predictions)
)
qplot(Actuals, Predictions,
data = compare_missing_CANCEIS,
geom = c("jitter"), main = "predicted vs. observed in validation data",
xlab = "Observed Class", ylab = "Predicted Class"
) + scale_x_discrete(limits=c("1","2","3","4")
) + scale_y_discrete(limits=c("1","2","3","4"))
ggsave("images/SGCANCEISqplot.png")
# Evaluate performance of CANCEISXG
# Compare predicted and actuals
actuals.CANCEISXG <- Census.test.tidy$social.grade
missing.CANCEISXG <- CANCEISXG.test.in$social.grade
predicted.CANCEISXG <- CANCEISXG.test.out$social.grade
compare_var_CANCEISXG <- tibble(
Actuals = actuals.CANCEISXG, Predictions =
predicted.CANCEISXG, Missing = missing.CANCEISXG
)
compare_missing_CANCEISXG <- compare_var_CANCEISXG[
compare_var_CANCEISXG$Missing == -999, ]
compare_missing_CANCEISXG$indicator <- ifelse(
compare_missing_CANCEISXG$Actuals ==
compare_missing_CANCEISXG$Predictions,
"Correct", "Wrong"
)
counts_CANCEISXG <- table(compare_missing_CANCEISXG$indicator)
barplot(counts_CANCEISXG, main = "Accuracy of predictions", xlab = "Outcome")
# Using Confusion Matrix to evaluate predictions
confusion_CANCEISXG <- confusionMatrix(
as.factor(compare_missing_CANCEISXG$Actuals),
as.factor(compare_missing_CANCEISXG$Predictions)
)
qplot(Actuals, Predictions,
data = compare_missing_CANCEISXG,
geom = c("jitter"), main = "predicted vs. observed in validation data",
xlab = "Observed Class", ylab = "Predicted Class"
) + scale_x_discrete(limits=c("1","2","3","4")
) + scale_y_discrete(limits=c("1","2","3","4"))
ggsave("images/SGCANCEISXGqplot.png")
# Impute values using mode imputation
# Create a vector of imputable variable excluding missing values
mode.dat <- Census.test.tidy.miss[
Census.test.tidy.miss$social.grade != -999, ]
mode.val <- Mode(mode.dat$social.grade)
# Compare predicted and actuals
actuals.mode <- Census.test.tidy$social.grade
missing.mode <- Census.test.tidy.miss$social.grade
predicted.mode <- ifelse(
Census.test.tidy.miss$social.grade == -999, mode.val,
Census.test.tidy.miss$social.grade)
compare_var_mode <- tibble(
Actuals = actuals.mode, Predictions =
predicted.mode, Missing = missing.mode
)
compare_missing_mode <- compare_var_mode[
compare_var_mode$Missing == -999, ]
compare_missing_mode$indicator <- ifelse(
compare_missing_mode$Actuals ==
compare_missing_mode$Predictions,
"Correct", "Wrong"
)
counts_mode <- table(compare_missing_mode$indicator)
barplot(counts_mode, main = "Accuracy of predictions", xlab = "Outcome")
## ---- compare-soc-grad
XGBoost <- confusionML$overall[c('Accuracy','Kappa')]
CANCEIS <- confusion_CANCEIS$overall[c('Accuracy','Kappa')]
MixedMethods <- confusion_CANCEISXG$overall[c('Accuracy','Kappa')]
Mode <- c(counts_mode[['Correct']]/(counts_mode[['Correct']]+counts_mode[['Wrong']]), NA)
CompareSocGrad <- cbind(XGBoost, CANCEIS, MixedMethods, Mode)
save(CompareSocGrad, file = "data/SocialGrade/CompareSocGrad.RData")
|
library(httr)
library(rvest)
library(tm)
library(tidyverse)
library(pushoverr)
url <- "http://www.customs.go.th/statistic_report.php?show_search=1"
ua <- user_agent("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36")
# specify month
cmth<-Sys.Date() %>% str_remove("\\d{4}-") %>%
str_extract("\\d{2}") %>% as.numeric() -1
if(cmth==0){
mth<-12
} else{
mth<-cmth
}
#extraction
thimportdata<-POST(url,body = list(
month=mth, year='2020', top_menu='menu_homepage',left_menu='',
current_id='', s_page='', order_by='', sort_type='',
lang='th', ini_menu='', ini_content='', show_search='',
fix_active='', hierarchy='', xmonth='', tab='by_country',
global_key='', imex_type='import', tariff_code='27111100000',
country_code=''),ua) %>%
httr::content(.,as="parsed") %>% html_text()
#wrangling
v<-thimportdata %>%
str_split(.,"(Baht)") %>% unlist(., use.names=FALSE) %>% .[3] %>%
str_remove_all(.,"\t") %>% str_remove_all(.,",") %>%
str_remove_all(.,"\r") %>% gsub("\n"," ",.) %>%
gsub("\\s{2,}"," ",.) %>% gsub(")","",.) %>%
str_extract_all(.,"\\D+\\s{1,}\\d+\\s\\d+") %>%
unlist(., use.names=FALSE) %>% trimws() %>% head(.,-1) %>%
str_remove(.,"\\D\\D\\s")
v1<-str_extract(v,"\\d+\\s\\d+") %>%
as.data.frame() %>%
separate(.,.,sep="\\s",into = c("Weight","Baht")) %>%
sapply(.,as.numeric) %>%
as.data.frame()
v1$Weight<-v1$Weight/1000000
Country<-str_extract(v,"\\D+\\s{1,}") %>% trimws()
thimport<-add_column(v1,Country,.before="Weight") %>%
filter(!Baht==0 & !Weight==0)
#notification
if (!sum(thimport$Weight)==0){
pushover(message='Thai imports are updated',
user="uccrmx7ajshvdsgbx2e2qy17eorpsx",
app="akhzmh5yoco7koy31oos1micwsbxh7")
} | /thai_import.R | no_license | jonfoong/argus-scrapers | R | false | false | 1,786 | r | library(httr)
library(rvest)
library(tm)
library(tidyverse)
library(pushoverr)
url <- "http://www.customs.go.th/statistic_report.php?show_search=1"
ua <- user_agent("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36")
# specify month
cmth<-Sys.Date() %>% str_remove("\\d{4}-") %>%
str_extract("\\d{2}") %>% as.numeric() -1
if(cmth==0){
mth<-12
} else{
mth<-cmth
}
#extraction
thimportdata<-POST(url,body = list(
month=mth, year='2020', top_menu='menu_homepage',left_menu='',
current_id='', s_page='', order_by='', sort_type='',
lang='th', ini_menu='', ini_content='', show_search='',
fix_active='', hierarchy='', xmonth='', tab='by_country',
global_key='', imex_type='import', tariff_code='27111100000',
country_code=''),ua) %>%
httr::content(.,as="parsed") %>% html_text()
#wrangling
v<-thimportdata %>%
str_split(.,"(Baht)") %>% unlist(., use.names=FALSE) %>% .[3] %>%
str_remove_all(.,"\t") %>% str_remove_all(.,",") %>%
str_remove_all(.,"\r") %>% gsub("\n"," ",.) %>%
gsub("\\s{2,}"," ",.) %>% gsub(")","",.) %>%
str_extract_all(.,"\\D+\\s{1,}\\d+\\s\\d+") %>%
unlist(., use.names=FALSE) %>% trimws() %>% head(.,-1) %>%
str_remove(.,"\\D\\D\\s")
v1<-str_extract(v,"\\d+\\s\\d+") %>%
as.data.frame() %>%
separate(.,.,sep="\\s",into = c("Weight","Baht")) %>%
sapply(.,as.numeric) %>%
as.data.frame()
v1$Weight<-v1$Weight/1000000
Country<-str_extract(v,"\\D+\\s{1,}") %>% trimws()
thimport<-add_column(v1,Country,.before="Weight") %>%
filter(!Baht==0 & !Weight==0)
#notification
if (!sum(thimport$Weight)==0){
pushover(message='Thai imports are updated',
user="uccrmx7ajshvdsgbx2e2qy17eorpsx",
app="akhzmh5yoco7koy31oos1micwsbxh7")
} |
tick_table<-function(u){
start=as.numeric(xpathSApply(u,"//AssayDataSet//RateSpans//StartTickIndex",xmlValue))
end=as.numeric(xpathSApply(u,"//AssayDataSet//RateSpans//EndTickIndex",xmlValue))
Measure=seq_along(start)
df<-Map(function(x,y,z){
data.frame(Tick=seq(from=x,to=y,by=1),Measure=z)
},x=start,y=end,z=seq_along(start))
do.call('rbind',df)
}
| /R/asyr_tick_table.R | no_license | JARS3N/asyr | R | false | false | 375 | r | tick_table<-function(u){
start=as.numeric(xpathSApply(u,"//AssayDataSet//RateSpans//StartTickIndex",xmlValue))
end=as.numeric(xpathSApply(u,"//AssayDataSet//RateSpans//EndTickIndex",xmlValue))
Measure=seq_along(start)
df<-Map(function(x,y,z){
data.frame(Tick=seq(from=x,to=y,by=1),Measure=z)
},x=start,y=end,z=seq_along(start))
do.call('rbind',df)
}
|
# Copyright (c) 2014 Clear Channel Broadcasting, Inc.
# https://github.com/iheartradio/ShinyBuilder
# Licensed under the MIT License (MIT)
.onLoad <- function(libname, pkgname) {
require(shiny)
require(RJSONIO)
addResourcePath("ShinyBuilder", system.file("www", package = "ShinyBuilder"))
}
.onAttach <- function(libname, pkgname) {
require(shiny)
} | /R/zzz.R | permissive | vsalesa/ShinyBuilder | R | false | false | 365 | r | # Copyright (c) 2014 Clear Channel Broadcasting, Inc.
# https://github.com/iheartradio/ShinyBuilder
# Licensed under the MIT License (MIT)
.onLoad <- function(libname, pkgname) {
require(shiny)
require(RJSONIO)
addResourcePath("ShinyBuilder", system.file("www", package = "ShinyBuilder"))
}
.onAttach <- function(libname, pkgname) {
require(shiny)
} |
## makeCacheMatrix creates a special "matrix" object that can cache its inverse.
## cacheSolve computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse from
## the cache.
##
##
## makeCacheMatrix creates a vector that is a list of functions. The functions are to:
## set substitutes y for x in the makeCacheMatrix vector
## get returns the vector from the makeCacheMatrix function
## setmatrix stores the value of the input variable m in the makeCacheMatrix function
## getmatrix returns the value of m in the makeCacheMatrix function
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## cacheSolve first verifies 'm' as already stored by getmean.
## If getmean is 'Null" then message "getting cached data" is returned and a new
## matrix is solved
## If getmean was cached from the previous function the return(m) prints the
## solved makeCacheMatrix function
cacheSolve <- function(x=matrix(), ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
| /cachematrix.R | no_license | jerrybiii/ProgrammingAssignment2 | R | false | false | 1,549 | r | ## makeCacheMatrix creates a special "matrix" object that can cache its inverse.
## cacheSolve computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve should retrieve the inverse from
## the cache.
##
##
## makeCacheMatrix creates a vector that is a list of functions. The functions are to:
## set substitutes y for x in the makeCacheMatrix vector
## get returns the vector from the makeCacheMatrix function
## setmatrix stores the value of the input variable m in the makeCacheMatrix function
## getmatrix returns the value of m in the makeCacheMatrix function
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## cacheSolve first verifies 'm' as already stored by getmean.
## If getmean is 'Null" then message "getting cached data" is returned and a new
## matrix is solved
## If getmean was cached from the previous function the return(m) prints the
## solved makeCacheMatrix function
cacheSolve <- function(x=matrix(), ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
|
## TODO: add verbose = FALSE argument cut down on chatter
#' @title Allocate soil properties within various classification systems.
#'
#' @description Generic function to allocate soil properties to different classification schemes.
#'
#' @param ... arguments to specific allocation functions, see details and examples
#'
#' @param to character specifying the classification scheme: FAO Salt Severity, FAO Black Soil (see details for the required \code{...})
#'
#' @param droplevels logical indicating whether to drop unused levels in factors. This is useful when the results have a large number of unused classes, which can waste space in tables and figures.
#'
#'
#' @details
#' This function is intended to allocate a set of soil properties to an established soil classification scheme, such as Salt Severity or Black Soil. Allocation is semantically different from classification. While classification is the 'act' of developing a grouping scheme, allocation is the assignment or identification of measurements to a established class (Powell, 2008).
#'
#' ## Usage Details
#'
#' Each classification scheme (\code{to} argument) uses a different set of arguments.
#'
#' - `FAO Salt Severity`
#' + **EC:** electrical conductivity column name, dS/m
#' + **pH:** pH column name, saturated paste extract
#' + **ESP:** exchangeable sodium percentage column name, percent
#'
#' - `FAO Black Soils`
#' + **object:** a `data.frame` or `SoilProfileCollection`
#' + **pedonid:** pedon ID column name, required when \code{object} is a \code{data.frame}
#' + **hztop:** horizon top depth column name, required when \code{object} is a \code{data.frame}
#' + **hzbot:** horizon bottom depth column name, required when \code{object} is a \code{data.frame}
#' + **OC**: organic carbon column name, percent
#' + **m_chroma:** moist Munsell chroma column name
#' + **m_value:** moist Munsell value column name
#' + **d_value:** dry Munsell value column name
#' + **CEC:** cation exchange capacity column name (NH4OAc at pH 7), units of cmol(+)/kg soil
#' + **BS:** base saturation column name (NH4OAc at pH 7), percent
#' + **tropical:** logical, data are associated with "tropical soils"
#'
#' - `ST Diagnostic Features`
#' + **object:** a `data.frame` or `SoilProfileCollection`
#' + **pedonid:** pedon ID column name, required when \code{object} is a \code{data.frame}
#' + **hzname:** horizon name column, required when \code{object} is a \code{data.frame}
#' + **hztop:** horizon top depth column name, required when \code{object} is a \code{data.frame}
#' + **hzbot:** horizon bottom depth column name, required when \code{object} is a \code{data.frame}
#' + **texcl:** soil texture class (USDA) column name
#' + **rupresblkcem:** rupture resistance column name
#' + **m_value:** moist Munsell value column name
#' + **m_chroma:** moist Munsell chroma column name
#' + **d_value:** dry Munsell value column name
#' + **BS:** base saturation column name (method ??), percent
#' + **OC**: organic carbon column name, percent
#' + **n_value:** ??
#' + **featkind:** ??
#'
#' @note The results returned by \code{allocate(to = "ST Diagnostic Features")} currently return a limited set of diagnostic features that are easily defined. Also, the logic implemented for some features does not include all the criteria defined in the Keys to Soil Taxonomy.
#'
#'
#'
#' @return A vector or \code{data.frame} object.
#'
#' @references
#' Abrol, I., Yadav, J. & Massoud, F. 1988. \href{https://www.fao.org/3/x5871e/x5871e00.htm}{Salt-affected soils and their management}. No. Bulletin 39. Rome, FAO Soils.
#'
#' FAO. 2006. \href{https://www.fao.org/publications/card/en/c/903943c7-f56a-521a-8d32-459e7e0cdae9/}{Guidelines for soil description}. Rome, Food and Agriculture Organization of the United Nations.
#'
#' FAO. 2020. DEFINITION | What is a black soil? (online). (Cited 28 December 2020). http://www.fao.org/global-soil-partnership/intergovernmental-technical-panel-soils/gsoc17-implementation/internationalnetworkblacksoils/more-on-black-soils/definition-what-is-a-black-soil/es/
#'
#' Powell, B., 2008. Classifying soil and land, in: McKenzie, N.J., Grundy, M.J., Webster, R., Ringrose-Voase, A.J. (Eds.), Guidelines for Survey Soil and Land Resources, Australian Soil and Land Survey Handbook Series. CSIRO, Melbourne, p. 572.
#'
#' Richards, L.A. 1954. \href{https://www.ars.usda.gov/ARSUserFiles/20360500/hb60_pdf/hb60complete.pdf}{Diagnosis and Improvement of Saline and Alkali Soils}. U. S. Government Printing Office. 166 pp.
#'
#' Soil Survey Staff, 2014. Keys to Soil Taxonomy, 12th ed. USDA-Natural Resources Conservation Service, Washington, D.C.
#'
#'
#' @export
#'
#' @examples
#'
#' # Salt Severity
#' test <- expand.grid(
#' EC = sort(sapply(c(0, 0.75, 2, 4, 8, 15, 30), function(x) x + c(0, -0.05, 0.05))),
#' pH = c(8.1, 8.2, 8.3, 8.4, 8.5, 8.6),
#' ESP = sort(sapply(c(0, 15, 30, 50, 70, 100), function(x) x + c(0, 0.1, -0.1)))
#' )
#' test$ss <- with(test, allocate(EC = EC, pH = pH, ESP = ESP, to = "FAO Salt Severity"))
#' table(test$ss)
#'
#' # Black Soil Category 1 (BS1)
#' test <- expand.grid(
#' dept = seq(0, 50, 10),
#' OC = sort(sapply(c(0, 0.6, 1.2, 20, 40), function(x) x + c(0, -0.05, 0.05))),
#' chroma_moist = 2:4,
#' value_moist = 2:4,
#' value_dry = 4:6,
#' thickness = 24:26,
#' CEC = 24:26,
#' BS = 49:51,
#' tropical = c(TRUE, FALSE)
#' )
#' test$pedon_id <- rep(1:21870, each = 6)
#' test$depb <- test$dept + 10
#'
#' bs1 <- allocate(test, pedonid = "pedon_id", hztop = "dept", hzbot = "depb",
#' OC = "OC", m_chroma = "chroma_moist", m_value = "value_moist",
#' d_value = "value_dry", CEC = "CEC", BS = "BS",
#' to = "FAO Black Soil"
#' )
#'
#' table(BS1 = bs1$BS1, BS2 = bs1$BS2)
#'
#'
#' # SoilProfileCollection interface
#'
#' data(sp3)
#' depths(sp3) <- id ~ top + bottom
#' hzdesgnname(sp3) <- 'name'
#'
#' # fake base saturation
#' horizons(sp3)$bs <- 75
#'
#' plotSPC(sp3)
#'
#' allocate(
#' sp3,
#' to = 'FAO Black Soil',
#' OC = 'tc',
#' m_chroma = 'chroma',
#' m_value = 'value',
#' d_value = 'value',
#' CEC = 'cec',
#' BS = 'bs'
#' )
#'
#' # make a copy and edit horizon values
#' x <- sp3
#' x$value <- 2
#' x$chroma <- 2
#' x$cec <- 26
#' x$tc <- 2
#'
#' x$soil_color <- munsell2rgb(x$hue, x$value, x$chroma)
#'
#' plotSPC(x)
#'
#' allocate(
#' x,
#' to = 'FAO Black Soil',
#' OC = 'tc',
#' m_chroma = 'chroma',
#' m_value = 'value',
#' d_value = 'value',
#' CEC = 'cec',
#' BS = 'bs'
#' )
#'
#'
#' # Soil Taxonomy Diagnostic Features
#' data(sp1)
#' sp1$texcl = gsub("gr|grv|cbv", "", sp1$texture)
#' df <- allocate(object = sp1, pedonid = "id", hzname = "name",
#' hzdept = "top", hzdepb = "bottom", texcl = "texcl",
#' to = "ST Diagnostic Features"
#' )
#' aggregate(featdept ~ id, data = df, summary)
#'
allocate <- function(..., to = c("FAO Salt Severity", "FAO Black Soil", "ST Diagnostic Features"), droplevels = FALSE) {
# sanity check
to <- match.arg(to, several.ok = FALSE)
# select the appropriate system
a <- switch(
to,
"FAO Salt Severity" = {
.rank_salts(..., system = to, droplevels = droplevels)
},
"FAO Black Soil" = {
.black_soil(...)
},
"ST Diagnostic Features" = {
# object = object
# pedonid = "peiid"
# hzname = "hzname"
# hzdept = "hzdept"
# hzdepb = "hzdepb"
# texcl = "texcl"
# hz_pat = ""
# tex_pat = "br"
# featkind = "argillic horizon"
featkind <- c("lithic contact", "paralithic contact", "densic contact", "petrocalcic horizon", "calcic horizon", "secondary carbonates", "mollic epipedon") #, "reduced matrix")
a <- lapply(featkind, function(x) {
# a <- .guess_df(pedonid = "peiid", hzname = "hzname", hzdept = "hzdept", hzdepb = "hzdepb", texcl = "texcl", featkind = "lithic contact")
a <- .guess_df(..., featkind = x)
})
a <- do.call("rbind", a)
}
)
return(a)
}
# To do add USDA and other salt classes
## TODO consider optional object = NULL
## TODO safe handling of NA
.rank_salts <- function(EC = NULL, pH = NULL, ESP = NULL, SAR = NULL, system = "FAO Salt Severity", droplevels = FALSE) {
# EC = 1; pH = 3; ESP = 50
l <- list(EC = EC, pH = pH, ESP = ESP, SAR = SAR)
# tests ----
# ESP vs SAR
if (!is.null(SAR)) {
warning("SAR will be converted to ESP via Richards (1958) conversion formula.")
}
if (!is.null(ESP) & !is.null(SAR)) {
warning("Both ESP & SAR are present, SAR will only be used where ESP is missing.")
}
# minimum dataset
if (any(sapply(l[c(1, 3)], is.null)) & any(sapply(l[c(1, 4)], is.null))) {
warning("the minimum dataset of soil properites for allocating to the Salt Severity classes are: EC (aka Electrial Conductivity), and ESP (aka Exchangable Sodium Percentage) or SAR (aka Sodium Adsorption Ratio)")
}
# pH rule
if (any(!complete.cases(EC, ESP)) | any(!complete.cases(EC, SAR))) {
warning("pH is used in where both ESP and SAR are missing")
}
# length
n <- sapply(l, length)
if (! all(max(n) == n[1:3]) & ! all(max(n) == n[c(1:2, 4)])) {
stop("all arguments must have the same length")
}
# levels ----
fao_lev <- c(
c("none", "slightly saline", "moderately saline", "strongly saline", "very strongly saline", "extremely saline"),
c("none", "slightly sodic", "moderately sodic", "strongly sodic", "very strongly sodic")
)
## TODO: why?
sc <- rep("none", times = length(EC))
## TODO: consider separate saline / sodic classification
# estimate ESP from SAR ----
if (is.null(ESP)) ESP <- rep(NA_real_, times = length(EC))
if (is.null(SAR)) SAR <- rep(NA_real_, times = length(EC))
.esp <- function(SAR) {
(100 * (-0.0126 + 0.01475 * SAR)) /
(1 + (-0.0126 + 0.01475 * SAR))
}
ESPx <- .esp(SAR)
ESP <- ifelse(is.na(ESP) & !is.na(SAR), ESPx, ESP)
# rank ----
# saline soils
sc <- ifelse(EC > -1 & (ESP <= 15 | (is.na(ESP) & pH <= 8.2)), # & EC > 4 & pH <= 8.5,
as.character(
cut(EC,
breaks = c(-1, 0.75, 2, 4, 8, 15, 1500),
labels = fao_lev[1:6],
right = FALSE
)),
sc
)
# sodic soils
# ESP
sc <- ifelse(EC <= 4 & ESP > 15, # | pH > 8.2
as.character(
cut(ESP,
# breaks = c(0, 15, 30, 50, 70, 100),
breaks = c(-2, 30, 50, 70, 102),
# labels = fao_lev[7:11],
labels = fao_lev[8:11],
right = FALSE
)),
sc
)
# saline-sodic soils
sc <- ifelse(EC > 4 & (ESP > 15 | (is.na(ESP) & pH > 8.2)), "saline-sodic", sc)
# convert to factor
sc <- factor(sc, levels = c(fao_lev[6:1], fao_lev[8:11], "saline-sodic"))
# droplevels
if (droplevels == TRUE) {
sc <- droplevels(sc)
}
return(sc)
}
.codify <- function(x, system = "salt severity", droplevels = FALSE) {
if (system == "salt severity") {
.codify_salt_severity(x, droplevels = droplevels)
}
}
.codify_salt_severity <- function(x, droplevels = FALSE) {
# set levels
fao_lev <- c(
c("none", "slightly saline", "moderately saline", "strongly saline", "very strongly saline", "extremely saline"),
c("none", "slightly sodic", "moderately sodic", "strongly sodic", "very strongly sodic")
)
# test
if (!is.integer(x)) stop("x is not an integer")
if (!all(unique(x) %in% c(1:11, NA))) warning("some x values do not match the lookup table")
sc <- factor(
x,
levels = 1:11,
labels = c(fao_lev[6:1], fao_lev[8:11], "saline-sodic")
)
if (droplevels == TRUE) {
sc <- droplevels(sc)
}
return(sc)
}
## TODO: useful error message, this isn't helpful:
# Error in .black_soil(...) :
# column names in object must match the other character vector input arguments
## TODO: there is currently no way to document these arguments, critical because each has an expected unit of measure
.black_soil <- function(object, pedonid = "peiid", hztop = "hzdept", hzbot = "hzdepb", OC = NULL, m_chroma = "m_chroma", m_value = "m_value", d_value = "d_value", CEC = NULL, BS = NULL, tropical = FALSE) { # thickness = NULL, horizon = TRUE
# OC = 1; chroma_moist = 3; value_moist = 3; value_dry = 5; thickness = 25; CEC = 20; BS = 50
# pedonid = "123"; hztop = 0; hzbot = 25; OC = 1.6; m_chroma = 3; m_value = 3; d_value = 5; CEC = 25; BS = 50; tropical = FALSE
# object <- data.frame(pedonid, hztop, hzbot, OC, m_chroma, m_value, d_value, CEC, BS)
# pedonid = "pedonid"; hztop = "hztop"; hzbot = "hzbot"; OC = "OC"; m_chroma = "m_chroma"; m_value = "m_value"; d_value = "d_value"; CEC = "CEC"; BS = "BS"
# pedonid = "idp"; hztop = "top"; hzbot = "bot"; OC = "oc"; m_chroma = "w_chroma"; m_value = "w_value"; d_value = "d_value"; CEC = "cec"; BS = NULL
# check object type
# SoilProfileCollection objects have a number of required arguments defined internally
if (inherits(object, 'SoilProfileCollection')) {
# extract relevant metadata from the SPC
pID <- idname(object)
hztb <- horizonDepths(object)
# horizons as data.frame-like obj
df <- horizons(object)
# setup variables used later, using SPC metadata if possible
vars <- list(pedonid = pID, hztop = hztb[1], hzbot = hztb[2], OC = OC, m_chroma = m_chroma, m_value = m_value, d_value = d_value, CEC = CEC, BS = BS)
} else {
# this is likely a data.frame
df <- object
# setup variables used later, all from provided arguments
vars <- list(pedonid = pedonid, hztop = hztop, hzbot = hzbot, OC = OC, m_chroma = m_chroma, m_value = m_value, d_value = d_value, CEC = CEC, BS = BS)
}
# check length of arguments
if (any(sapply(vars, length) > 1)) {
stop("the length of all arguments must be 1, except for object")
# this will drop NULL arguments, which is ok for CEC & BS
} else vars <- unlist(vars)
# check arguments match df colnames & subset
# no vars should be NA, but this will catch them if they are
idx <- !is.na(vars)
if (! all(vars[idx] %in% names(df))) {
stop("column names in object must match the other character vector input arguments")
} else {
df <- df[vars[idx]]
vars2 <- names(vars[idx])
names(df) <- vars2
}
# criteria
# 2nd category of Black Soils
# minimum dataset
if (any(sapply(df[vars2][1:7], function(x) all(is.na(x))))) {
stop("the minimum dataset of soil properites for allocating to the 2nd category of Black Soils are: OC (aka Organic Carbon), m_chroma, m_value, and d_value") # and thickness
}
bs2 <- with(df,
(OC <= 20 & (OC >= 1.2 | (tropical == TRUE & OC >= 0.6)))
& m_chroma <= 3
& (m_value <= 3 & d_value <= 5)
)
# 1st category of Black Soils
# minimum dataset
if (!is.null(CEC) & !is.null(BS) & all(c("CEC", "BS") %in% names(df))) {
bs1 <- bs2 & df$CEC >= 25 & df$BS >= 50
} else {
message("the minimum dataset of soil properites for allocating to the 1nd category of Black Soils, in addition to the 2nd category, are: CEC (aka Cation Exchange Capacity), BS (aka Base Saturation)")
bs1 <- NA[1:nrow(df)]
}
# combine results and subset to 0-25cm
df_bs <- cbind(df[vars2[1:3]], BS1 = bs1, BS2 = bs2)
df_bs <- segment(df_bs, intervals = c(0, 25), hzdepcols = c("hztop", "hzbot"))
df_bs <- df_bs[df_bs$segment_id == "00-25", -6]
# aggregate the horizons
df_bs2 <- aggregate(cbind(BS1, BS2) ~ pedonid, data = df_bs, FUN = all, na.action = na.pass)
df_bot <- aggregate(hzbot ~ pedonid, data = df_bs, FUN = function(x) max(x, na.rm = TRUE))
# filter thickness > 25cm
df_bs <- merge(df_bs2, df_bot, by = "pedonid", all.x = TRUE)
df_bs <- within(df_bs, {
BS1 = BS1 & as.integer(hzbot) == 25L
BS2 = BS2 & as.integer(hzbot) == 25L
hzbot = NULL
})
names(df_bs)[1] <- pedonid
return(df_bs)
}
# guess diagnostic features
.guess_df <- function(object = NULL, pedonid = "peiid", hzname = "hzname", hzdept = "hzdept", hzdepb = "hzdepb", texcl = "texcl", rupresblkcem = "rupresblkcem", m_value = "m_value", d_value = "d_value", m_chroma = "m_chroma", BS = "BS", OC = "OC", n_value = "n_value", featkind = NULL) {
# pedonid = "peiid"; hzname = "hzname"; hzdept = "hzdept"; hzdepb = "hzdepb"; texcl = "texcl"; hz_pat = ""; tex_pat = "br"; featkind = "mollic epipedon"; rupresblkcem = "rupresblkcem"; m_value = "m_value"; d_value = "d_value"; m_chroma = "m_chroma"; BS = NA; OC = NA; n_value = "n_value"
# object = sp1; pedonid = "id"; hzname = "name"; hzdept = "top"; hzdepb = "bot"; texcl = "texture"
# vars <- list(pedonid = "peiid", hzname = "hzname", hzdept = "hzdept", hzdepb = "hzdepb", texcl = "texture", rupresblkcem = "rupresblkcem", m_value = "m_value", d_value = "d_value", m_chroma = "m_chroma", OC = "OC", BS = "BS", n_value = "n_value")
# standardize inputs
vars <- list(pedonid = pedonid, hzname = hzname, hzdept = hzdept, hzdepb = hzdepb, texcl = texcl, rupresblkcem = rupresblkcem, m_value = m_value, d_value = d_value, m_chroma = m_chroma, OC = OC, BS = BS, n_value = n_value)
# standardize inputs
if (class(object)[1] == "SoilProfileCollection") {
df <- horizons(object)
} else df <- object
# check length of arguments
if (any(sapply(vars, length) > 1)) {
stop("the length of all arguments must be 1, except for object")
# this will drop NULL arguments, which is ok for CEC & BS
} else vars <- unlist(vars)
# check arguments match df colnames & subset
# no vars should be NA, but this will catch them if they are
if (! all(vars %in% names(df))) {
warning("the minimum dataset includes: pedonid, hzdept, hzdepb, and hzname; if texcl or rupreblkcem are missing the resulting diagnostic features are inferred from the available information")
idx <- vars %in% names(df)
mis <- vars[! idx]
df <- df[vars[idx]]
df[names(mis)] <- NA
vars2 <- names(vars)
names(df) <- vars2
} else {
df <- df[vars]
vars2 <- names(vars)
names(df) <- vars2
}
df$texcl <- tolower(df$texcl)
df$rupresblkcem <- tolower(df$rupresblkcem)
# match pattern
# lithic contact ----
if (featkind == "lithic contact") {
message(paste("guessing", featkind))
idx_hzn <- grepl("R|Dr", df$hzname) & !grepl("\\/", df$hzname)
idx_tex <- !grepl("Cr|CR", df$hzname) & (df$texcl %in% c("br", "wb", "uwb") | is.na(df$texcl))
lev <- c("strongly cemented", "very strongly cemented", "indurated", "strongly", "extremely strongly", "H", "moderately coherent", "strongly coherent", "very strongly coherent")
idx_cem <- df$rupresblkcem %in% lev | is.na(df$rupresblkcem)
# error
idx_err <- idx_hzn & (!idx_tex | !idx_cem)
if (any(idx_err)) {
message(paste("the following pedonid have R horizons that do not meeting the texcl or rupture resistance cementation criteria and will be excluded: ", paste0(df$pedonid[idx_err], collapse = ", ")))
}
df$featkind <- ifelse(idx_hzn & idx_tex & idx_cem, featkind, NA)
}
# paralithic contact ----
if (featkind == "paralithic contact") {
message(paste("guessing", featkind))
idx_hzn <- grepl("Cr|CR", df$hzname)
idx_tex <- !grepl("R|Dr", df$hzname) & (df$texcl %in% c("br", "wb", "uwb") | is.na(df$texcl))
lev <- c("extremely weakly cememented", "very weakly cemented", "weakly cemented", "moderately cemented", "weakly", "moderately", "S")
idx_cem <- df$rupresblkcem %in% lev | is.na(df$rupresblkcem)
# error
idx_err <- idx_hzn & (!idx_tex | !idx_cem)
if (any(idx_err)) {
message(paste("the following pedonid have Cr horizons that do not meeting the texcl or rupture resistance cementation criteria and will be excluded: ", paste0(df$pedonid[idx_err], collapse = ", ")))
}
df$featkind <- ifelse(idx_hzn & idx_tex & idx_cem, featkind, NA)
}
# densic contact ----
if (featkind == "densic contact") {
message(paste("guessing", featkind))
idx <- grepl("d$|D$|d[1:9]|D[1:9]", df$hzname)
df$featkind <- ifelse(idx == TRUE, featkind, NA)
}
# petrocalcic horizon ----
if (featkind == "petrocalcic horizon") {
message(paste("guessing", featkind))
idx_hzn <- grepl("kkm|kkqm", df$hzname)
idx_tex <- ((grepl("cem", df$texcl) & !grepl("-br", df$texcl)) | is.na(df$texcl))
lev <- "noncemented"
idx_cem <- !df$rupresblkcem %in% lev | is.na(df$rupresblkcem)
# error
idx_err <- idx_hzn & (!idx_tex | !idx_cem)
if (any(idx_err)) {
message(paste("the following pedonid have Bkkm|Bkkqm horizons that do not meeting the texcl or rupture resistance cementation criteria and will be excluded: ", paste0(df$pedonid[idx_err], collapse = ", ")))
}
df$featkind <- ifelse(idx_hzn & idx_tex & idx_cem, featkind, NA)
}
# calcic horizon ----
if (featkind == "calcic horizon") {
message(paste("guessing", featkind))
idx_hzn <- grepl("kk$|kk[1:9]|kkq$|kkq[1:9]|kkb$|kkb[1:9]", df$hzname)
idx_tex <- (!grepl("cem-", df$texcl) | is.na(df$texcl))
lev <- "noncemented"
idx_cem <- df$rupresblkcem %in% lev | is.na(df$rupresblkcem)
# error
idx_err <- idx_hzn & (!idx_tex | !idx_cem)
if (any(idx_err)) {
message(paste("the following pedonid have Bkk horizons that do not meeting the texcl or rupture resistance cementation criteria and will be excluded: ", paste0(df$pedonid[idx_err], collapse = ", ")))
}
df$featkind <- ifelse(idx_hzn & idx_tex & idx_cem, featkind, NA)
}
# secondary carbonates ----
if (featkind == "secondary carbonates") {
message(paste("guessing", featkind))
idx_hzn <- grepl("k", df$hzname) & !grepl("kk", df$hzname)
df$featkind <- ifelse(idx_hzn, featkind, NA)
}
# mollic epipedon ----
if (featkind == "mollic epipedon") {
message(paste("guessing", featkind))
idx_hzn <- !grepl("O|Ao|R|W|M|C|\\/", df$hzname)
idx_tex <- df$texcl %in% levels(SoilTextureLevels()) | is.na(df$texcl)
# need to add structure to fetchNASIS
idx_col <-
(df$m_value <= 3 | is.na(df$m_value)) &
(df$d_value <= 5 | is.na(df$d_value)) &
df$m_chroma <= 3 &
(!is.na(df$m_value) | !is.na(df$d_value))
idx_bs <- (df$BS >= 50 | is.na(df$BS)) # & (df$ph1to1 > 6 | is.na(df$ph1to1))
idx_oc <-
((df$OC >= 2.5 | is.na(df$OC)) & (df$m_value %in% 4:5 | is.na(df$m_value))) |
((df$OC >= 0.6 | is.na(df$OC)) & (df$m_value < 4 | is.na(df$m_value)))
idx_nv <- (df$n_value < 0.7 | is.na(df$n_value))
df$featkind <- ifelse(idx_hzn & idx_tex & idx_col & idx_bs & idx_oc & idx_nv, featkind, NA)
}
# subset features
idx <- "featkind" %in% names(df)
if (idx) {
df_sub <- df[!is.na(df$featkind), ]
# aggregate depths ----
idx <- !is.na(df_sub$featkind)
if (any(idx) & sum(idx, na.rm = TRUE) > 1) {
sp <- aggregate(hzdept ~ pedonid + featkind, data = df_sub, FUN = function(x) min(x, na.rm = TRUE))
sp$featdepb <- aggregate(hzdepb ~ pedonid + featkind, data = df_sub, FUN = function(x) max(x, na.rm = TRUE))$hzdepb
names(sp)[3] <- "featdept"
} else {
sp <- df_sub[c("pedonid", "featkind", "hzdept", "hzdepb")]
names(sp)[3:4] <- c("featdept", "featdepb")
}
names(sp)[1] <- vars[1]
if (featkind == "petrocalcic horizon") {
sp <- sp[(sp$featdepb - sp$featdept) >= 10, ]
}
# need to add more logic to capture when < 10cm is cemented
if (featkind == "calcic horizon") {
sp <- sp[(sp$featdepb - sp$featdept) >= 15, ]
}
# needs additional criteria to get a variable depth or use Andrew's function
if (featkind == "mollic epipedon") {
sp <- sp[(sp$featdepb - sp$featdept) >= 18, ]
}
} else sp <- NULL
return(sp)
}
| /R/allocate.R | no_license | ncss-tech/aqp | R | false | false | 24,404 | r |
## TODO: add verbose = FALSE argument cut down on chatter
#' @title Allocate soil properties within various classification systems.
#'
#' @description Generic function to allocate soil properties to different classification schemes.
#'
#' @param ... arguments to specific allocation functions, see details and examples
#'
#' @param to character specifying the classification scheme: FAO Salt Severity, FAO Black Soil (see details for the required \code{...})
#'
#' @param droplevels logical indicating whether to drop unused levels in factors. This is useful when the results have a large number of unused classes, which can waste space in tables and figures.
#'
#'
#' @details
#' This function is intended to allocate a set of soil properties to an established soil classification scheme, such as Salt Severity or Black Soil. Allocation is semantically different from classification. While classification is the 'act' of developing a grouping scheme, allocation is the assignment or identification of measurements to a established class (Powell, 2008).
#'
#' ## Usage Details
#'
#' Each classification scheme (\code{to} argument) uses a different set of arguments.
#'
#' - `FAO Salt Severity`
#' + **EC:** electrical conductivity column name, dS/m
#' + **pH:** pH column name, saturated paste extract
#' + **ESP:** exchangeable sodium percentage column name, percent
#'
#' - `FAO Black Soils`
#' + **object:** a `data.frame` or `SoilProfileCollection`
#' + **pedonid:** pedon ID column name, required when \code{object} is a \code{data.frame}
#' + **hztop:** horizon top depth column name, required when \code{object} is a \code{data.frame}
#' + **hzbot:** horizon bottom depth column name, required when \code{object} is a \code{data.frame}
#' + **OC**: organic carbon column name, percent
#' + **m_chroma:** moist Munsell chroma column name
#' + **m_value:** moist Munsell value column name
#' + **d_value:** dry Munsell value column name
#' + **CEC:** cation exchange capacity column name (NH4OAc at pH 7), units of cmol(+)/kg soil
#' + **BS:** base saturation column name (NH4OAc at pH 7), percent
#' + **tropical:** logical, data are associated with "tropical soils"
#'
#' - `ST Diagnostic Features`
#' + **object:** a `data.frame` or `SoilProfileCollection`
#' + **pedonid:** pedon ID column name, required when \code{object} is a \code{data.frame}
#' + **hzname:** horizon name column, required when \code{object} is a \code{data.frame}
#' + **hztop:** horizon top depth column name, required when \code{object} is a \code{data.frame}
#' + **hzbot:** horizon bottom depth column name, required when \code{object} is a \code{data.frame}
#' + **texcl:** soil texture class (USDA) column name
#' + **rupresblkcem:** rupture resistance column name
#' + **m_value:** moist Munsell value column name
#' + **m_chroma:** moist Munsell chroma column name
#' + **d_value:** dry Munsell value column name
#' + **BS:** base saturation column name (method ??), percent
#' + **OC**: organic carbon column name, percent
#' + **n_value:** ??
#' + **featkind:** ??
#'
#' @note The results returned by \code{allocate(to = "ST Diagnostic Features")} currently return a limited set of diagnostic features that are easily defined. Also, the logic implemented for some features does not include all the criteria defined in the Keys to Soil Taxonomy.
#'
#'
#'
#' @return A vector or \code{data.frame} object.
#'
#' @references
#' Abrol, I., Yadav, J. & Massoud, F. 1988. \href{https://www.fao.org/3/x5871e/x5871e00.htm}{Salt-affected soils and their management}. No. Bulletin 39. Rome, FAO Soils.
#'
#' FAO. 2006. \href{https://www.fao.org/publications/card/en/c/903943c7-f56a-521a-8d32-459e7e0cdae9/}{Guidelines for soil description}. Rome, Food and Agriculture Organization of the United Nations.
#'
#' FAO. 2020. DEFINITION | What is a black soil? (online). (Cited 28 December 2020). http://www.fao.org/global-soil-partnership/intergovernmental-technical-panel-soils/gsoc17-implementation/internationalnetworkblacksoils/more-on-black-soils/definition-what-is-a-black-soil/es/
#'
#' Powell, B., 2008. Classifying soil and land, in: McKenzie, N.J., Grundy, M.J., Webster, R., Ringrose-Voase, A.J. (Eds.), Guidelines for Survey Soil and Land Resources, Australian Soil and Land Survey Handbook Series. CSIRO, Melbourne, p. 572.
#'
#' Richards, L.A. 1954. \href{https://www.ars.usda.gov/ARSUserFiles/20360500/hb60_pdf/hb60complete.pdf}{Diagnosis and Improvement of Saline and Alkali Soils}. U. S. Government Printing Office. 166 pp.
#'
#' Soil Survey Staff, 2014. Keys to Soil Taxonomy, 12th ed. USDA-Natural Resources Conservation Service, Washington, D.C.
#'
#'
#' @export
#'
#' @examples
#'
#' # Salt Severity
#' test <- expand.grid(
#' EC = sort(sapply(c(0, 0.75, 2, 4, 8, 15, 30), function(x) x + c(0, -0.05, 0.05))),
#' pH = c(8.1, 8.2, 8.3, 8.4, 8.5, 8.6),
#' ESP = sort(sapply(c(0, 15, 30, 50, 70, 100), function(x) x + c(0, 0.1, -0.1)))
#' )
#' test$ss <- with(test, allocate(EC = EC, pH = pH, ESP = ESP, to = "FAO Salt Severity"))
#' table(test$ss)
#'
#' # Black Soil Category 1 (BS1)
#' test <- expand.grid(
#' dept = seq(0, 50, 10),
#' OC = sort(sapply(c(0, 0.6, 1.2, 20, 40), function(x) x + c(0, -0.05, 0.05))),
#' chroma_moist = 2:4,
#' value_moist = 2:4,
#' value_dry = 4:6,
#' thickness = 24:26,
#' CEC = 24:26,
#' BS = 49:51,
#' tropical = c(TRUE, FALSE)
#' )
#' test$pedon_id <- rep(1:21870, each = 6)
#' test$depb <- test$dept + 10
#'
#' bs1 <- allocate(test, pedonid = "pedon_id", hztop = "dept", hzbot = "depb",
#' OC = "OC", m_chroma = "chroma_moist", m_value = "value_moist",
#' d_value = "value_dry", CEC = "CEC", BS = "BS",
#' to = "FAO Black Soil"
#' )
#'
#' table(BS1 = bs1$BS1, BS2 = bs1$BS2)
#'
#'
#' # SoilProfileCollection interface
#'
#' data(sp3)
#' depths(sp3) <- id ~ top + bottom
#' hzdesgnname(sp3) <- 'name'
#'
#' # fake base saturation
#' horizons(sp3)$bs <- 75
#'
#' plotSPC(sp3)
#'
#' allocate(
#' sp3,
#' to = 'FAO Black Soil',
#' OC = 'tc',
#' m_chroma = 'chroma',
#' m_value = 'value',
#' d_value = 'value',
#' CEC = 'cec',
#' BS = 'bs'
#' )
#'
#' # make a copy and edit horizon values
#' x <- sp3
#' x$value <- 2
#' x$chroma <- 2
#' x$cec <- 26
#' x$tc <- 2
#'
#' x$soil_color <- munsell2rgb(x$hue, x$value, x$chroma)
#'
#' plotSPC(x)
#'
#' allocate(
#' x,
#' to = 'FAO Black Soil',
#' OC = 'tc',
#' m_chroma = 'chroma',
#' m_value = 'value',
#' d_value = 'value',
#' CEC = 'cec',
#' BS = 'bs'
#' )
#'
#'
#' # Soil Taxonomy Diagnostic Features
#' data(sp1)
#' sp1$texcl = gsub("gr|grv|cbv", "", sp1$texture)
#' df <- allocate(object = sp1, pedonid = "id", hzname = "name",
#' hzdept = "top", hzdepb = "bottom", texcl = "texcl",
#' to = "ST Diagnostic Features"
#' )
#' aggregate(featdept ~ id, data = df, summary)
#'
allocate <- function(..., to = c("FAO Salt Severity", "FAO Black Soil", "ST Diagnostic Features"), droplevels = FALSE) {
# sanity check
to <- match.arg(to, several.ok = FALSE)
# select the appropriate system
a <- switch(
to,
"FAO Salt Severity" = {
.rank_salts(..., system = to, droplevels = droplevels)
},
"FAO Black Soil" = {
.black_soil(...)
},
"ST Diagnostic Features" = {
# object = object
# pedonid = "peiid"
# hzname = "hzname"
# hzdept = "hzdept"
# hzdepb = "hzdepb"
# texcl = "texcl"
# hz_pat = ""
# tex_pat = "br"
# featkind = "argillic horizon"
featkind <- c("lithic contact", "paralithic contact", "densic contact", "petrocalcic horizon", "calcic horizon", "secondary carbonates", "mollic epipedon") #, "reduced matrix")
a <- lapply(featkind, function(x) {
# a <- .guess_df(pedonid = "peiid", hzname = "hzname", hzdept = "hzdept", hzdepb = "hzdepb", texcl = "texcl", featkind = "lithic contact")
a <- .guess_df(..., featkind = x)
})
a <- do.call("rbind", a)
}
)
return(a)
}
# To do add USDA and other salt classes
## TODO consider optional object = NULL
## TODO safe handling of NA
.rank_salts <- function(EC = NULL, pH = NULL, ESP = NULL, SAR = NULL, system = "FAO Salt Severity", droplevels = FALSE) {
# EC = 1; pH = 3; ESP = 50
l <- list(EC = EC, pH = pH, ESP = ESP, SAR = SAR)
# tests ----
# ESP vs SAR
if (!is.null(SAR)) {
warning("SAR will be converted to ESP via Richards (1958) conversion formula.")
}
if (!is.null(ESP) & !is.null(SAR)) {
warning("Both ESP & SAR are present, SAR will only be used where ESP is missing.")
}
# minimum dataset
if (any(sapply(l[c(1, 3)], is.null)) & any(sapply(l[c(1, 4)], is.null))) {
warning("the minimum dataset of soil properites for allocating to the Salt Severity classes are: EC (aka Electrial Conductivity), and ESP (aka Exchangable Sodium Percentage) or SAR (aka Sodium Adsorption Ratio)")
}
# pH rule
if (any(!complete.cases(EC, ESP)) | any(!complete.cases(EC, SAR))) {
warning("pH is used in where both ESP and SAR are missing")
}
# length
n <- sapply(l, length)
if (! all(max(n) == n[1:3]) & ! all(max(n) == n[c(1:2, 4)])) {
stop("all arguments must have the same length")
}
# levels ----
fao_lev <- c(
c("none", "slightly saline", "moderately saline", "strongly saline", "very strongly saline", "extremely saline"),
c("none", "slightly sodic", "moderately sodic", "strongly sodic", "very strongly sodic")
)
## TODO: why?
sc <- rep("none", times = length(EC))
## TODO: consider separate saline / sodic classification
# estimate ESP from SAR ----
if (is.null(ESP)) ESP <- rep(NA_real_, times = length(EC))
if (is.null(SAR)) SAR <- rep(NA_real_, times = length(EC))
.esp <- function(SAR) {
(100 * (-0.0126 + 0.01475 * SAR)) /
(1 + (-0.0126 + 0.01475 * SAR))
}
ESPx <- .esp(SAR)
ESP <- ifelse(is.na(ESP) & !is.na(SAR), ESPx, ESP)
# rank ----
# saline soils
sc <- ifelse(EC > -1 & (ESP <= 15 | (is.na(ESP) & pH <= 8.2)), # & EC > 4 & pH <= 8.5,
as.character(
cut(EC,
breaks = c(-1, 0.75, 2, 4, 8, 15, 1500),
labels = fao_lev[1:6],
right = FALSE
)),
sc
)
# sodic soils
# ESP
sc <- ifelse(EC <= 4 & ESP > 15, # | pH > 8.2
as.character(
cut(ESP,
# breaks = c(0, 15, 30, 50, 70, 100),
breaks = c(-2, 30, 50, 70, 102),
# labels = fao_lev[7:11],
labels = fao_lev[8:11],
right = FALSE
)),
sc
)
# saline-sodic soils
sc <- ifelse(EC > 4 & (ESP > 15 | (is.na(ESP) & pH > 8.2)), "saline-sodic", sc)
# convert to factor
sc <- factor(sc, levels = c(fao_lev[6:1], fao_lev[8:11], "saline-sodic"))
# droplevels
if (droplevels == TRUE) {
sc <- droplevels(sc)
}
return(sc)
}
.codify <- function(x, system = "salt severity", droplevels = FALSE) {
if (system == "salt severity") {
.codify_salt_severity(x, droplevels = droplevels)
}
}
.codify_salt_severity <- function(x, droplevels = FALSE) {
# set levels
fao_lev <- c(
c("none", "slightly saline", "moderately saline", "strongly saline", "very strongly saline", "extremely saline"),
c("none", "slightly sodic", "moderately sodic", "strongly sodic", "very strongly sodic")
)
# test
if (!is.integer(x)) stop("x is not an integer")
if (!all(unique(x) %in% c(1:11, NA))) warning("some x values do not match the lookup table")
sc <- factor(
x,
levels = 1:11,
labels = c(fao_lev[6:1], fao_lev[8:11], "saline-sodic")
)
if (droplevels == TRUE) {
sc <- droplevels(sc)
}
return(sc)
}
## TODO: useful error message, this isn't helpful:
# Error in .black_soil(...) :
# column names in object must match the other character vector input arguments
## TODO: there is currently no way to document these arguments, critical because each has an expected unit of measure
.black_soil <- function(object, pedonid = "peiid", hztop = "hzdept", hzbot = "hzdepb", OC = NULL, m_chroma = "m_chroma", m_value = "m_value", d_value = "d_value", CEC = NULL, BS = NULL, tropical = FALSE) { # thickness = NULL, horizon = TRUE
# OC = 1; chroma_moist = 3; value_moist = 3; value_dry = 5; thickness = 25; CEC = 20; BS = 50
# pedonid = "123"; hztop = 0; hzbot = 25; OC = 1.6; m_chroma = 3; m_value = 3; d_value = 5; CEC = 25; BS = 50; tropical = FALSE
# object <- data.frame(pedonid, hztop, hzbot, OC, m_chroma, m_value, d_value, CEC, BS)
# pedonid = "pedonid"; hztop = "hztop"; hzbot = "hzbot"; OC = "OC"; m_chroma = "m_chroma"; m_value = "m_value"; d_value = "d_value"; CEC = "CEC"; BS = "BS"
# pedonid = "idp"; hztop = "top"; hzbot = "bot"; OC = "oc"; m_chroma = "w_chroma"; m_value = "w_value"; d_value = "d_value"; CEC = "cec"; BS = NULL
# check object type
# SoilProfileCollection objects have a number of required arguments defined internally
if (inherits(object, 'SoilProfileCollection')) {
# extract relevant metadata from the SPC
pID <- idname(object)
hztb <- horizonDepths(object)
# horizons as data.frame-like obj
df <- horizons(object)
# setup variables used later, using SPC metadata if possible
vars <- list(pedonid = pID, hztop = hztb[1], hzbot = hztb[2], OC = OC, m_chroma = m_chroma, m_value = m_value, d_value = d_value, CEC = CEC, BS = BS)
} else {
# this is likely a data.frame
df <- object
# setup variables used later, all from provided arguments
vars <- list(pedonid = pedonid, hztop = hztop, hzbot = hzbot, OC = OC, m_chroma = m_chroma, m_value = m_value, d_value = d_value, CEC = CEC, BS = BS)
}
# check length of arguments
if (any(sapply(vars, length) > 1)) {
stop("the length of all arguments must be 1, except for object")
# this will drop NULL arguments, which is ok for CEC & BS
} else vars <- unlist(vars)
# check arguments match df colnames & subset
# no vars should be NA, but this will catch them if they are
idx <- !is.na(vars)
if (! all(vars[idx] %in% names(df))) {
stop("column names in object must match the other character vector input arguments")
} else {
df <- df[vars[idx]]
vars2 <- names(vars[idx])
names(df) <- vars2
}
# criteria
# 2nd category of Black Soils
# minimum dataset
if (any(sapply(df[vars2][1:7], function(x) all(is.na(x))))) {
stop("the minimum dataset of soil properites for allocating to the 2nd category of Black Soils are: OC (aka Organic Carbon), m_chroma, m_value, and d_value") # and thickness
}
bs2 <- with(df,
(OC <= 20 & (OC >= 1.2 | (tropical == TRUE & OC >= 0.6)))
& m_chroma <= 3
& (m_value <= 3 & d_value <= 5)
)
# 1st category of Black Soils
# minimum dataset
if (!is.null(CEC) & !is.null(BS) & all(c("CEC", "BS") %in% names(df))) {
bs1 <- bs2 & df$CEC >= 25 & df$BS >= 50
} else {
message("the minimum dataset of soil properites for allocating to the 1nd category of Black Soils, in addition to the 2nd category, are: CEC (aka Cation Exchange Capacity), BS (aka Base Saturation)")
bs1 <- NA[1:nrow(df)]
}
# combine results and subset to 0-25cm
df_bs <- cbind(df[vars2[1:3]], BS1 = bs1, BS2 = bs2)
df_bs <- segment(df_bs, intervals = c(0, 25), hzdepcols = c("hztop", "hzbot"))
df_bs <- df_bs[df_bs$segment_id == "00-25", -6]
# aggregate the horizons
df_bs2 <- aggregate(cbind(BS1, BS2) ~ pedonid, data = df_bs, FUN = all, na.action = na.pass)
df_bot <- aggregate(hzbot ~ pedonid, data = df_bs, FUN = function(x) max(x, na.rm = TRUE))
# filter thickness > 25cm
df_bs <- merge(df_bs2, df_bot, by = "pedonid", all.x = TRUE)
df_bs <- within(df_bs, {
BS1 = BS1 & as.integer(hzbot) == 25L
BS2 = BS2 & as.integer(hzbot) == 25L
hzbot = NULL
})
names(df_bs)[1] <- pedonid
return(df_bs)
}
# guess diagnostic features
.guess_df <- function(object = NULL, pedonid = "peiid", hzname = "hzname", hzdept = "hzdept", hzdepb = "hzdepb", texcl = "texcl", rupresblkcem = "rupresblkcem", m_value = "m_value", d_value = "d_value", m_chroma = "m_chroma", BS = "BS", OC = "OC", n_value = "n_value", featkind = NULL) {
# pedonid = "peiid"; hzname = "hzname"; hzdept = "hzdept"; hzdepb = "hzdepb"; texcl = "texcl"; hz_pat = ""; tex_pat = "br"; featkind = "mollic epipedon"; rupresblkcem = "rupresblkcem"; m_value = "m_value"; d_value = "d_value"; m_chroma = "m_chroma"; BS = NA; OC = NA; n_value = "n_value"
# object = sp1; pedonid = "id"; hzname = "name"; hzdept = "top"; hzdepb = "bot"; texcl = "texture"
# vars <- list(pedonid = "peiid", hzname = "hzname", hzdept = "hzdept", hzdepb = "hzdepb", texcl = "texture", rupresblkcem = "rupresblkcem", m_value = "m_value", d_value = "d_value", m_chroma = "m_chroma", OC = "OC", BS = "BS", n_value = "n_value")
# standardize inputs
vars <- list(pedonid = pedonid, hzname = hzname, hzdept = hzdept, hzdepb = hzdepb, texcl = texcl, rupresblkcem = rupresblkcem, m_value = m_value, d_value = d_value, m_chroma = m_chroma, OC = OC, BS = BS, n_value = n_value)
# standardize inputs
if (class(object)[1] == "SoilProfileCollection") {
df <- horizons(object)
} else df <- object
# check length of arguments
if (any(sapply(vars, length) > 1)) {
stop("the length of all arguments must be 1, except for object")
# this will drop NULL arguments, which is ok for CEC & BS
} else vars <- unlist(vars)
# check arguments match df colnames & subset
# no vars should be NA, but this will catch them if they are
if (! all(vars %in% names(df))) {
warning("the minimum dataset includes: pedonid, hzdept, hzdepb, and hzname; if texcl or rupreblkcem are missing the resulting diagnostic features are inferred from the available information")
idx <- vars %in% names(df)
mis <- vars[! idx]
df <- df[vars[idx]]
df[names(mis)] <- NA
vars2 <- names(vars)
names(df) <- vars2
} else {
df <- df[vars]
vars2 <- names(vars)
names(df) <- vars2
}
df$texcl <- tolower(df$texcl)
df$rupresblkcem <- tolower(df$rupresblkcem)
# match pattern
# lithic contact ----
if (featkind == "lithic contact") {
message(paste("guessing", featkind))
idx_hzn <- grepl("R|Dr", df$hzname) & !grepl("\\/", df$hzname)
idx_tex <- !grepl("Cr|CR", df$hzname) & (df$texcl %in% c("br", "wb", "uwb") | is.na(df$texcl))
lev <- c("strongly cemented", "very strongly cemented", "indurated", "strongly", "extremely strongly", "H", "moderately coherent", "strongly coherent", "very strongly coherent")
idx_cem <- df$rupresblkcem %in% lev | is.na(df$rupresblkcem)
# error
idx_err <- idx_hzn & (!idx_tex | !idx_cem)
if (any(idx_err)) {
message(paste("the following pedonid have R horizons that do not meeting the texcl or rupture resistance cementation criteria and will be excluded: ", paste0(df$pedonid[idx_err], collapse = ", ")))
}
df$featkind <- ifelse(idx_hzn & idx_tex & idx_cem, featkind, NA)
}
# paralithic contact ----
if (featkind == "paralithic contact") {
message(paste("guessing", featkind))
idx_hzn <- grepl("Cr|CR", df$hzname)
idx_tex <- !grepl("R|Dr", df$hzname) & (df$texcl %in% c("br", "wb", "uwb") | is.na(df$texcl))
lev <- c("extremely weakly cememented", "very weakly cemented", "weakly cemented", "moderately cemented", "weakly", "moderately", "S")
idx_cem <- df$rupresblkcem %in% lev | is.na(df$rupresblkcem)
# error
idx_err <- idx_hzn & (!idx_tex | !idx_cem)
if (any(idx_err)) {
message(paste("the following pedonid have Cr horizons that do not meeting the texcl or rupture resistance cementation criteria and will be excluded: ", paste0(df$pedonid[idx_err], collapse = ", ")))
}
df$featkind <- ifelse(idx_hzn & idx_tex & idx_cem, featkind, NA)
}
# densic contact ----
if (featkind == "densic contact") {
message(paste("guessing", featkind))
idx <- grepl("d$|D$|d[1:9]|D[1:9]", df$hzname)
df$featkind <- ifelse(idx == TRUE, featkind, NA)
}
# petrocalcic horizon ----
if (featkind == "petrocalcic horizon") {
message(paste("guessing", featkind))
idx_hzn <- grepl("kkm|kkqm", df$hzname)
idx_tex <- ((grepl("cem", df$texcl) & !grepl("-br", df$texcl)) | is.na(df$texcl))
lev <- "noncemented"
idx_cem <- !df$rupresblkcem %in% lev | is.na(df$rupresblkcem)
# error
idx_err <- idx_hzn & (!idx_tex | !idx_cem)
if (any(idx_err)) {
message(paste("the following pedonid have Bkkm|Bkkqm horizons that do not meeting the texcl or rupture resistance cementation criteria and will be excluded: ", paste0(df$pedonid[idx_err], collapse = ", ")))
}
df$featkind <- ifelse(idx_hzn & idx_tex & idx_cem, featkind, NA)
}
# calcic horizon ----
if (featkind == "calcic horizon") {
message(paste("guessing", featkind))
idx_hzn <- grepl("kk$|kk[1:9]|kkq$|kkq[1:9]|kkb$|kkb[1:9]", df$hzname)
idx_tex <- (!grepl("cem-", df$texcl) | is.na(df$texcl))
lev <- "noncemented"
idx_cem <- df$rupresblkcem %in% lev | is.na(df$rupresblkcem)
# error
idx_err <- idx_hzn & (!idx_tex | !idx_cem)
if (any(idx_err)) {
message(paste("the following pedonid have Bkk horizons that do not meeting the texcl or rupture resistance cementation criteria and will be excluded: ", paste0(df$pedonid[idx_err], collapse = ", ")))
}
df$featkind <- ifelse(idx_hzn & idx_tex & idx_cem, featkind, NA)
}
# secondary carbonates ----
if (featkind == "secondary carbonates") {
message(paste("guessing", featkind))
idx_hzn <- grepl("k", df$hzname) & !grepl("kk", df$hzname)
df$featkind <- ifelse(idx_hzn, featkind, NA)
}
# mollic epipedon ----
if (featkind == "mollic epipedon") {
message(paste("guessing", featkind))
idx_hzn <- !grepl("O|Ao|R|W|M|C|\\/", df$hzname)
idx_tex <- df$texcl %in% levels(SoilTextureLevels()) | is.na(df$texcl)
# need to add structure to fetchNASIS
idx_col <-
(df$m_value <= 3 | is.na(df$m_value)) &
(df$d_value <= 5 | is.na(df$d_value)) &
df$m_chroma <= 3 &
(!is.na(df$m_value) | !is.na(df$d_value))
idx_bs <- (df$BS >= 50 | is.na(df$BS)) # & (df$ph1to1 > 6 | is.na(df$ph1to1))
idx_oc <-
((df$OC >= 2.5 | is.na(df$OC)) & (df$m_value %in% 4:5 | is.na(df$m_value))) |
((df$OC >= 0.6 | is.na(df$OC)) & (df$m_value < 4 | is.na(df$m_value)))
idx_nv <- (df$n_value < 0.7 | is.na(df$n_value))
df$featkind <- ifelse(idx_hzn & idx_tex & idx_col & idx_bs & idx_oc & idx_nv, featkind, NA)
}
# subset features
idx <- "featkind" %in% names(df)
if (idx) {
df_sub <- df[!is.na(df$featkind), ]
# aggregate depths ----
idx <- !is.na(df_sub$featkind)
if (any(idx) & sum(idx, na.rm = TRUE) > 1) {
sp <- aggregate(hzdept ~ pedonid + featkind, data = df_sub, FUN = function(x) min(x, na.rm = TRUE))
sp$featdepb <- aggregate(hzdepb ~ pedonid + featkind, data = df_sub, FUN = function(x) max(x, na.rm = TRUE))$hzdepb
names(sp)[3] <- "featdept"
} else {
sp <- df_sub[c("pedonid", "featkind", "hzdept", "hzdepb")]
names(sp)[3:4] <- c("featdept", "featdepb")
}
names(sp)[1] <- vars[1]
if (featkind == "petrocalcic horizon") {
sp <- sp[(sp$featdepb - sp$featdept) >= 10, ]
}
# need to add more logic to capture when < 10cm is cemented
if (featkind == "calcic horizon") {
sp <- sp[(sp$featdepb - sp$featdept) >= 15, ]
}
# needs additional criteria to get a variable depth or use Andrew's function
if (featkind == "mollic epipedon") {
sp <- sp[(sp$featdepb - sp$featdept) >= 18, ]
}
} else sp <- NULL
return(sp)
}
|
setwd("C:/Users/Desktop/Coursera/Data Science Specialization/4. Exploratory Data Analysis/Project 1") #change this to your working directory
unzip("exdata%2Fdata%2Fhousehold_power_consumption.zip", exdir = getwd())
#### Import Data ####
# Measurements of electric power consumption in one household with a one-minute sampling rate
# over a period of almost 4 years. Different electrical quantities and some sub-metering values are available.
hpc <- read.table("./household_power_consumption.txt", sep = ';', header = T, na.strings = "?", colClasses = c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
names(hpc) <- tolower(names(hpc)) #change variable names to lower case
library(tidyverse)
hpc_data <- hpc %>% filter(date %in% c('1/2/2007', '2/2/2007'))
# Q2.
hpc_data$date_time <- strptime(paste(hpc_data$date, hpc_data$time, sep = " "), "%d/%m/%Y %H:%M:%S")
png("plot2.png", width=480, height = 480)
plot(
hpc_data$date_time,
hpc_data$global_active_power,
type = "l",
xlab = "",
ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | kmesso1/ExData_Plotting1 | R | false | false | 1,355 | r |
setwd("C:/Users/Desktop/Coursera/Data Science Specialization/4. Exploratory Data Analysis/Project 1") #change this to your working directory
unzip("exdata%2Fdata%2Fhousehold_power_consumption.zip", exdir = getwd())
#### Import Data ####
# Measurements of electric power consumption in one household with a one-minute sampling rate
# over a period of almost 4 years. Different electrical quantities and some sub-metering values are available.
hpc <- read.table("./household_power_consumption.txt", sep = ';', header = T, na.strings = "?", colClasses = c("character", "character", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
names(hpc) <- tolower(names(hpc)) #change variable names to lower case
library(tidyverse)
hpc_data <- hpc %>% filter(date %in% c('1/2/2007', '2/2/2007'))
# Q2.
hpc_data$date_time <- strptime(paste(hpc_data$date, hpc_data$time, sep = " "), "%d/%m/%Y %H:%M:%S")
png("plot2.png", width=480, height = 480)
plot(
hpc_data$date_time,
hpc_data$global_active_power,
type = "l",
xlab = "",
ylab="Global Active Power (kilowatts)")
dev.off() |
#####################################
## Method Definitions for SYSargs2 ##
#####################################
## Methods to return SYSargs2 components
setMethod(f = "targets", signature = "SYSargs2", definition = function(x) {
return(x@targets)
})
setMethod(f = "targetsheader", signature = "SYSargs2", definition = function(x) {
return(x@targetsheader)
})
setMethod(f = "modules", signature = "SYSargs2", definition = function(x) {
return(setNames(as.character(x@modules), names(x@modules)))
})
setMethod(f = "wf", signature = "SYSargs2", definition = function(x) {
return(x@wf)
})
setMethod(f = "clt", signature = "SYSargs2", definition = function(x) {
return(x@clt)
})
setMethod(f = "yamlinput", signature = "SYSargs2", definition = function(x) {
return(x@yamlinput)
})
setMethod(f = "cmdlist", signature = "SYSargs2", definition = function(x) {
return(x@cmdlist)
})
setMethod(f = "input", signature = "SYSargs2", definition = function(x) {
return(x@input)
})
setMethod(f = "output", signature = "SYSargs2", definition = function(x) {
return(x@output)
})
setMethod(f = "files", signature = "SYSargs2", definition = function(x) {
return(x@files)
})
setMethod(f = "inputvars", signature = "SYSargs2", definition = function(x) {
return(x@inputvars)
})
setMethod(f = "cmdToCwl", signature = "SYSargs2", definition = function(x) {
return(x@cmdToCwl)
})
setMethod(f = "status", signature = "SYSargs2", definition = function(x) {
return(x@status)
})
## Constructor methods
## List to SYSargs2 with: as(mylist, "SYSargs2")
setAs(
from = "list", to = "SYSargs2",
def = function(from) {
new("SYSargs2",
targets = from$targets,
targetsheader = from$targetsheader,
modules = from$modules,
wf = from$wf,
clt = from$clt,
yamlinput = from$yamlinput,
cmdlist = from$cmdlist,
input = from$input,
output = from$output,
files = from$files,
inputvars = from$inputvars,
cmdToCwl = from$cmdToCwl,
status = from$status,
internal_outfiles = from$internal_outfiles
)
}
)
setMethod(f = "sysargs2", signature = "SYSargs2", definition = function(x) {
sysargs2 <- list(
targets = x@targets, targetsheader = x@targetsheader, modules = x@modules, wf = x@wf,
clt = x@clt, yamlinput = x@yamlinput, cmdlist = x@cmdlist, input = x@input, output = x@output,
files = x@files, inputvars = x@inputvars, cmdToCwl = x@cmdToCwl,
status = x@status, internal_outfiles = x@internal_outfiles
)
return(sysargs2)
})
## SYSargs2 to list with: as(SYSargs2, "list")
setAs(from = "SYSargs2", to = "list", def = function(from) {
sysargs2(from)
})
## Define print behavior for SYSargs2
setMethod(
f = "show", signature = "SYSargs2",
definition = function(object) {
cat(crayon::green$bold(paste0("Instance of '", class(object), "':")),
paste0(" Slot names/accessors: "),
paste0(
" targets: ", length(object@targets),
" (", head(names(object@targets), 1), "...",
tail(names(object@targets), 1), ")",
", targetsheader: ", length(unlist(object@targetsheader)), " (lines)"
),
paste0(" modules: ", length(object@modules)),
paste0(
" wf: ", length(object@wf$steps),
", clt: ", length(object@clt),
", yamlinput: ", length(object@yamlinput), " (inputs)"
),
paste0(
" input: ", length(object@input),
", output: ", length(object@output)
),
paste0(" cmdlist: ", length(object@cmdlist)),
" Sub Steps:",
paste0(
" ", seq_along(object@clt), ". ", object@files$steps,
" (rendered: ", length(object@cmdlist[[1]]) != 0, ")"
),
"\n",
sep = "\n"
)
}
)
## Extend names() method
setMethod(
f = "names", signature = "SYSargs2",
definition = function(x) {
return(slotNames(x))
}
)
## Extend infile1() method
setMethod(f = "infile1", signature = "SYSargs2", definition = function(x, input=c("FileName", "FileName1")) {
subset_input <- input(x)
input_sub <- input[input %in% names(input(x)[[1]])]
subset_sample <- sapply(names(subset_input), function(y) subset_input[[y]][[input_sub]])
subset_sample <- sapply(names(subset_sample), function(y) ifelse(is.null(subset_sample[[y]]), "", subset_sample[y]))
return(subset_sample)
})
## Extend infile2() method
setMethod(f = "infile2", signature = "SYSargs2", definition = function(x, input="FileName2") {
subset_input <- input(x)
input_sub <- input[input %in% names(input(x)[[1]])]
if(length(input_sub)==0) input_sub <- ""
subset_sample <- sapply(names(subset_input), function(y) subset_input[[y]][[input_sub]])
subset_sample <- sapply(names(subset_sample), function(y) ifelse(is.null(subset_sample[[y]]), "", subset_sample[y]))
return(subset_sample)
})
## Extend length() method
setMethod(
f = "length", signature = "SYSargs2", definition = function(x) {
return(length(x@cmdlist))
}
)
# Behavior of "[" operator for SYSargs2
setMethod(f = "[", signature = "SYSargs2", definition = function(x, i, ..., drop) {
if (is.logical(i)) {
i <- which(i)
}
x@targets <- x@targets[i]
x@input <- x@input[i]
x@output <- x@output[i]
x@internal_outfiles <- x@internal_outfiles[i]
x@cmdlist <- x@cmdlist[i]
return(x)
})
## Behavior of "[[" operator for SYSargs2
setMethod(
f = "[[", signature = c("SYSargs2", "ANY", "missing"),
definition = function(x, i, ..., drop) {
return(as(x, "list")[[i]])
}
)
## Behavior of "$" operator for SYSargs2
setMethod("$",
signature = "SYSargs2",
definition = function(x, name) {
slot(x, name)
}
)
## Convert targets data.frame to list
targets.as.list <- function(x, id="SampleName") {
targetslist <- yaml::yaml.load(yaml::as.yaml(x, column.major = FALSE))
names(targetslist) <- x$SampleName
return(targetslist)
}
## Usage:
# targets <- read.delim("targets.txt", comment.char = "#")
# targetslist <- targets.as.list(x=targets)
## Convert targets list to data.frame
targets.as.df <- function(x) {
targetstmp <- sapply(x, as.character, simplify = FALSE)
targetsDF <- as.data.frame(do.call("rbind", targetstmp))
rownames(targetsDF) <- NULL
colnames(targetsDF) <- names(x[[1]])
return(targetsDF)
}
## Usage:
# targets.as.df(x=targetslist)
## targets slot from a SYSargs2 obj to df with: as(SYSargs2, "data.frame")
setAs(from = "SYSargs2", to = "DataFrame", def = function(from) {
S4Vectors::DataFrame(targets.as.df(targets(from)))
})
setMethod("baseCommand", signature = "SYSargs2", definition = function(x) {
return(x@clt[[1]]$baseCommand[[1]])
})
setMethod("SampleName", signature = "SYSargs2", definition = function(x) {
targets_x <- targets(x)
if (length(targets_x) > 0) {
sample_name_x <- as(x, "DataFrame")
return(sample_name_x$SampleName)
} else if (length(targets_x) == 0) {
message("This step doesn't contain multiple samples.")
}
})
## Replacement method for SYSargs2 using "[" operator
setReplaceMethod(f = "[[", signature = "SYSargs2", definition = function(x, i, j, value) {
if (i == 1) x@targets <- value
if (i == 2) x@targetsheader <- value
if (i == 3) x@modules <- value
if (i == 4) x@wf <- value
if (i == 5) x@clt <- value
if (i == 6) x@yamlinput <- value
if (i == 7) x@cmdlist <- value
if (i == 8) x@input <- value
if (i == 9) x@output <- value
if (i == 10) x@files <- value
if (i == 11) x@status <- value
if (i == 12) x@internal_outfiles <- value
if (i == "targets") x@targets <- value
if (i == "targetsheader") x@targetsheader <- value
if (i == "modules") x@modules <- value
if (i == "wf") x@wf <- value
if (i == "clt") x@clt <- value
if (i == "yamlinput") x@yamlinput <- value
if (i == "cmdlist") x@cmdlist <- value
if (i == "input") x@input <- value
if (i == "output") x@output <- value
if (i == "files") x@files <- value
if (i == "cmdToCwl") x@cmdToCwl <- value
if (i == "status") x@status <- value
if (i == "internal_outfiles") x@internal_outfiles <- value
return(x)
})
## Replacement method
setReplaceMethod("yamlinput", c("SYSargs2"), function(x, paramName, value) {
x <- as(x, "list")
## Check paramName
if (!paramName %in% names(x$yamlinput)) stop(paste0("'paramName' argument must be one of the following:", "\n",
paste0(names(x$yamlinput), collapse = ", ")))
## Check class of value
if (!identical(class(x$yamlinput[[paramName]]), class(value)))
stop(paste0("'value' argument must be the same class of the 'paramName':", "\n",
class(x$yamlinput[[paramName]])))
x$yamlinput[[paramName]] <- value
x <- as(x, "SYSargs2")
x <- updateWF(x)
x
})
setReplaceMethod("cmdToCwl", c("SYSargs2"), function(x, ..., value) {
x@cmdToCwl <- value
x
})
| /R/sysargs2_methods.R | no_license | JulongWei/systemPipeR | R | false | false | 9,370 | r | #####################################
## Method Definitions for SYSargs2 ##
#####################################
## Methods to return SYSargs2 components
setMethod(f = "targets", signature = "SYSargs2", definition = function(x) {
return(x@targets)
})
setMethod(f = "targetsheader", signature = "SYSargs2", definition = function(x) {
return(x@targetsheader)
})
setMethod(f = "modules", signature = "SYSargs2", definition = function(x) {
return(setNames(as.character(x@modules), names(x@modules)))
})
setMethod(f = "wf", signature = "SYSargs2", definition = function(x) {
return(x@wf)
})
setMethod(f = "clt", signature = "SYSargs2", definition = function(x) {
return(x@clt)
})
setMethod(f = "yamlinput", signature = "SYSargs2", definition = function(x) {
return(x@yamlinput)
})
setMethod(f = "cmdlist", signature = "SYSargs2", definition = function(x) {
return(x@cmdlist)
})
setMethod(f = "input", signature = "SYSargs2", definition = function(x) {
return(x@input)
})
setMethod(f = "output", signature = "SYSargs2", definition = function(x) {
return(x@output)
})
setMethod(f = "files", signature = "SYSargs2", definition = function(x) {
return(x@files)
})
setMethod(f = "inputvars", signature = "SYSargs2", definition = function(x) {
return(x@inputvars)
})
setMethod(f = "cmdToCwl", signature = "SYSargs2", definition = function(x) {
return(x@cmdToCwl)
})
setMethod(f = "status", signature = "SYSargs2", definition = function(x) {
return(x@status)
})
## Constructor methods
## List to SYSargs2 with: as(mylist, "SYSargs2")
setAs(
from = "list", to = "SYSargs2",
def = function(from) {
new("SYSargs2",
targets = from$targets,
targetsheader = from$targetsheader,
modules = from$modules,
wf = from$wf,
clt = from$clt,
yamlinput = from$yamlinput,
cmdlist = from$cmdlist,
input = from$input,
output = from$output,
files = from$files,
inputvars = from$inputvars,
cmdToCwl = from$cmdToCwl,
status = from$status,
internal_outfiles = from$internal_outfiles
)
}
)
setMethod(f = "sysargs2", signature = "SYSargs2", definition = function(x) {
sysargs2 <- list(
targets = x@targets, targetsheader = x@targetsheader, modules = x@modules, wf = x@wf,
clt = x@clt, yamlinput = x@yamlinput, cmdlist = x@cmdlist, input = x@input, output = x@output,
files = x@files, inputvars = x@inputvars, cmdToCwl = x@cmdToCwl,
status = x@status, internal_outfiles = x@internal_outfiles
)
return(sysargs2)
})
## SYSargs2 to list with: as(SYSargs2, "list")
setAs(from = "SYSargs2", to = "list", def = function(from) {
sysargs2(from)
})
## Define print behavior for SYSargs2
setMethod(
f = "show", signature = "SYSargs2",
definition = function(object) {
cat(crayon::green$bold(paste0("Instance of '", class(object), "':")),
paste0(" Slot names/accessors: "),
paste0(
" targets: ", length(object@targets),
" (", head(names(object@targets), 1), "...",
tail(names(object@targets), 1), ")",
", targetsheader: ", length(unlist(object@targetsheader)), " (lines)"
),
paste0(" modules: ", length(object@modules)),
paste0(
" wf: ", length(object@wf$steps),
", clt: ", length(object@clt),
", yamlinput: ", length(object@yamlinput), " (inputs)"
),
paste0(
" input: ", length(object@input),
", output: ", length(object@output)
),
paste0(" cmdlist: ", length(object@cmdlist)),
" Sub Steps:",
paste0(
" ", seq_along(object@clt), ". ", object@files$steps,
" (rendered: ", length(object@cmdlist[[1]]) != 0, ")"
),
"\n",
sep = "\n"
)
}
)
## Extend names() method
setMethod(
f = "names", signature = "SYSargs2",
definition = function(x) {
return(slotNames(x))
}
)
## Extend infile1() method
setMethod(f = "infile1", signature = "SYSargs2", definition = function(x, input=c("FileName", "FileName1")) {
subset_input <- input(x)
input_sub <- input[input %in% names(input(x)[[1]])]
subset_sample <- sapply(names(subset_input), function(y) subset_input[[y]][[input_sub]])
subset_sample <- sapply(names(subset_sample), function(y) ifelse(is.null(subset_sample[[y]]), "", subset_sample[y]))
return(subset_sample)
})
## Extend infile2() method
setMethod(f = "infile2", signature = "SYSargs2", definition = function(x, input="FileName2") {
subset_input <- input(x)
input_sub <- input[input %in% names(input(x)[[1]])]
if(length(input_sub)==0) input_sub <- ""
subset_sample <- sapply(names(subset_input), function(y) subset_input[[y]][[input_sub]])
subset_sample <- sapply(names(subset_sample), function(y) ifelse(is.null(subset_sample[[y]]), "", subset_sample[y]))
return(subset_sample)
})
## Extend length() method
setMethod(
f = "length", signature = "SYSargs2", definition = function(x) {
return(length(x@cmdlist))
}
)
# Behavior of "[" operator for SYSargs2
setMethod(f = "[", signature = "SYSargs2", definition = function(x, i, ..., drop) {
if (is.logical(i)) {
i <- which(i)
}
x@targets <- x@targets[i]
x@input <- x@input[i]
x@output <- x@output[i]
x@internal_outfiles <- x@internal_outfiles[i]
x@cmdlist <- x@cmdlist[i]
return(x)
})
## Behavior of "[[" operator for SYSargs2
setMethod(
f = "[[", signature = c("SYSargs2", "ANY", "missing"),
definition = function(x, i, ..., drop) {
return(as(x, "list")[[i]])
}
)
## Behavior of "$" operator for SYSargs2
setMethod("$",
signature = "SYSargs2",
definition = function(x, name) {
slot(x, name)
}
)
## Convert targets data.frame to list
targets.as.list <- function(x, id="SampleName") {
targetslist <- yaml::yaml.load(yaml::as.yaml(x, column.major = FALSE))
names(targetslist) <- x$SampleName
return(targetslist)
}
## Usage:
# targets <- read.delim("targets.txt", comment.char = "#")
# targetslist <- targets.as.list(x=targets)
## Convert targets list to data.frame
targets.as.df <- function(x) {
targetstmp <- sapply(x, as.character, simplify = FALSE)
targetsDF <- as.data.frame(do.call("rbind", targetstmp))
rownames(targetsDF) <- NULL
colnames(targetsDF) <- names(x[[1]])
return(targetsDF)
}
## Usage:
# targets.as.df(x=targetslist)
## targets slot from a SYSargs2 obj to df with: as(SYSargs2, "data.frame")
setAs(from = "SYSargs2", to = "DataFrame", def = function(from) {
S4Vectors::DataFrame(targets.as.df(targets(from)))
})
setMethod("baseCommand", signature = "SYSargs2", definition = function(x) {
return(x@clt[[1]]$baseCommand[[1]])
})
setMethod("SampleName", signature = "SYSargs2", definition = function(x) {
targets_x <- targets(x)
if (length(targets_x) > 0) {
sample_name_x <- as(x, "DataFrame")
return(sample_name_x$SampleName)
} else if (length(targets_x) == 0) {
message("This step doesn't contain multiple samples.")
}
})
## Replacement method for SYSargs2 using "[" operator
setReplaceMethod(f = "[[", signature = "SYSargs2", definition = function(x, i, j, value) {
if (i == 1) x@targets <- value
if (i == 2) x@targetsheader <- value
if (i == 3) x@modules <- value
if (i == 4) x@wf <- value
if (i == 5) x@clt <- value
if (i == 6) x@yamlinput <- value
if (i == 7) x@cmdlist <- value
if (i == 8) x@input <- value
if (i == 9) x@output <- value
if (i == 10) x@files <- value
if (i == 11) x@status <- value
if (i == 12) x@internal_outfiles <- value
if (i == "targets") x@targets <- value
if (i == "targetsheader") x@targetsheader <- value
if (i == "modules") x@modules <- value
if (i == "wf") x@wf <- value
if (i == "clt") x@clt <- value
if (i == "yamlinput") x@yamlinput <- value
if (i == "cmdlist") x@cmdlist <- value
if (i == "input") x@input <- value
if (i == "output") x@output <- value
if (i == "files") x@files <- value
if (i == "cmdToCwl") x@cmdToCwl <- value
if (i == "status") x@status <- value
if (i == "internal_outfiles") x@internal_outfiles <- value
return(x)
})
## Replacement method
setReplaceMethod("yamlinput", c("SYSargs2"), function(x, paramName, value) {
x <- as(x, "list")
## Check paramName
if (!paramName %in% names(x$yamlinput)) stop(paste0("'paramName' argument must be one of the following:", "\n",
paste0(names(x$yamlinput), collapse = ", ")))
## Check class of value
if (!identical(class(x$yamlinput[[paramName]]), class(value)))
stop(paste0("'value' argument must be the same class of the 'paramName':", "\n",
class(x$yamlinput[[paramName]])))
x$yamlinput[[paramName]] <- value
x <- as(x, "SYSargs2")
x <- updateWF(x)
x
})
setReplaceMethod("cmdToCwl", c("SYSargs2"), function(x, ..., value) {
x@cmdToCwl <- value
x
})
|
# BNP Random Evolver
# Made available under Apache 2.0 License
# By Andy White
# This script evolves Generation X-1 into Generation X
# It saves each set of predictions as a CSV
# and the parameters and error scores in a metadata CSV file
# Leave running overnight and then run all features through
# a master model in the morning!
# Load required libraries
library(data.table)
library(Matrix)
library(xgboost)
######################################################################################
## SETUP PARAMETERS (Set these for each session)
######################################################################################
setwd("C:/Users/Andrew.000/Desktop/MSc Data Science and Analytics/KAGGLE/BNP Paribas")
predictions_file_prefix = "base_GEN010_"
metadata_file = "base_metadata_all_gens.csv"
number_of_parents = 100
number_of_children = 100
mutation_probability = 0.125
######################################################################################
# Function for calculating logloss
quicklogloss = function(preds, actual) {
preds[preds==1] = 0.999999
preds[preds==0] = 0.000001
logloss = sum(actual * log(preds) + (1 - actual) * log(1 - preds)) / -length(preds)
return(logloss)
}
# Load data and mark trainA and trainB splits
train_dt = fread("train.csv", na.strings=c(NA, "", "NA"))
test_dt = fread("test.csv", na.strings=c(NA, "", "NA"))
train_dt[,train_test := "train"]
test_dt[,train_test := "test"]
all_dt = rbind(train_dt, test_dt, fill=T)
rm(list=c("train_dt", "test_dt"))
setkey(all_dt, ID)
n = nrow(all_dt)
trainA = sample(all_dt[train_test=="train",ID], floor(length(all_dt[train_test=="train",ID]) / 2))
all_dt[train_test=="train" & ID %in% trainA, basesplit := "trainA"]
all_dt[train_test=="train" & !(ID %in% trainA), basesplit := "trainB"]
all_dt[train_test=="test", basesplit := "test"]
variable_names = setdiff(names(all_dt), c("ID", "target", "train_test", "basesplit"))
variable_types = rep("num", length(variable_names))
variable_types[sapply(all_dt, is.character)[variable_names]] = "cat"
variable_types[sapply(all_dt, is.integer)[variable_names]] = "int"
names(variable_types) = variable_names
# Load metadata
metadata = fread(metadata_file)
parent_max = sort(metadata[,max_logloss])[number_of_parents]
parent_rows = which(metadata[,max_logloss] <= parent_max)
random_seeds = sample(1:5000, number_of_children)
for (i in 1:number_of_children) {
set.seed(random_seeds[i])
print.noquote(paste("Random model:", i))
# Set up file names
file_suffix = formatC(i, width=3, flag="0")
predictions_filename = paste(predictions_file_prefix, file_suffix, ".csv", sep="")
# Choose random parameters
# These are: -variable choice (always 8 variables)
# -treat integers as categoricals?
# -include 2-way products of numerics?
# -include 2-way sums of numerics?
# -include 2-way differences of numerics?
# -include 2-way quotients of numerics? (N.B. order matters so this is calculated in both directions)
# -rare value cut off [0:1000]
# -include probabilities of categoricals?
# -include probabilities of binned numerics?
# -XGB params:
# - max_depth [4:30]
# - subsample [0.4:1.0]
# - colsample_bytree [0.4:1.0]
# - eta [0.01:0.20]
# - rounds [50:500]
# - seed[0:2000]
# Set up parents
parents = sample(parent_rows, 2)
parent_vars = c(as.character(subset(metadata[parents[1],], select=paste("Variable", 1:10))),
as.character(subset(metadata[parents[2],], select=paste("Variable", 1:10))))
parent_vars = parent_vars[parent_vars != "NA" & parent_vars != ""]
parent_inclusions = as.logical(subset(metadata[parents[1],],
select=c("ints_as_cats", "prods", "sums", "diffs", "quots", "probs", "num_probs"))) |
as.logical(subset(metadata[parents[2],], select=c("ints_as_cats", "prods", "sums", "diffs", "quots", "probs", "num_probs")))
inclusion_mutations = sample(c(TRUE, FALSE), 7, c(mutation_probability, 1-mutation_probability), replace=T)
parent_inclusions[inclusion_mutations] = !parent_inclusions[inclusion_mutations] # Random switch of inherited gene
selected_vars = sample(c(parent_vars, variable_names), 10, prob=c(rep((1-mutation_probability) / length(parent_vars), length(parent_vars)),
rep(mutation_probability / 131, 131)))
while(length(unique(selected_vars)) < 10) {
selected_vars = unique(selected_vars)
selected_vars = c(selected_vars, sample(c(parent_vars, variable_names), 1, prob=c(rep((1-mutation_probability) / length(parent_vars), length(parent_vars)),
rep(mutation_probability / 131, 131))))
}
selected_types = variable_types[selected_vars]
inclusions = parent_inclusions
rare_cutoff = sample(c(metadata[parents[1],rare_cutoff],
metadata[parents[2],rare_cutoff],
0:1000), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 1001, 1001)))
r_max_depth = sample(c(metadata[parents[1],xgb_depth],
metadata[parents[2],xgb_depth],
4:30), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 27, 27)))
r_subsample = sample(c(metadata[parents[1],xgb_subsample],
metadata[parents[2],xgb_subsample],
seq(0.4, 1, 0.05)), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 13, 13)))
r_colsample = sample(c(metadata[parents[1],xgb_colsample],
metadata[parents[2],xgb_colsample],
seq(0.4, 1, 0.05)), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 13, 13)))
r_eta = sample(c(metadata[parents[1],xgb_eta],
metadata[parents[2],xgb_eta],
seq(0.01, 0.3, 0.005)), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 59, 59)))
r_rounds = sample(c(metadata[parents[1],xgb_rounds],
metadata[parents[2],xgb_rounds],
50:500), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 451, 451)))
r_seed = sample(c(metadata[parents[1],xgb_seed],
metadata[parents[2],xgb_seed],
0:2000), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 2001, 2001)))
output_metadata = selected_vars
names(output_metadata) = paste("Variable", 1:10)
output_metadata = c(output_metadata,
ints_as_cats=inclusions[1],
prods=inclusions[2],
sums=inclusions[3],
diffs=inclusions[4],
quots=inclusions[5],
probs=inclusions[6],
num_probs=inclusions[7],
rare_cutoff=rare_cutoff,
xgb_depth=r_max_depth,
xgb_subsample=r_subsample,
xgb_colsample=r_colsample,
xgb_eta=r_eta,
xgb_rounds=r_rounds,
xgb_seed=r_seed)
print.noquote(output_metadata)
temp_dt = subset(all_dt, select=c("ID", "target", "train_test", "basesplit", selected_vars))
# Treat integers as categoricals?
if(inclusions[1]) {
selected_types[selected_types=="int"] = "cat"
} else {
selected_types[selected_types=="int"] = "num"
}
# Now count the number of categoricals and numerics
cats = sum(selected_types=="cat")
catvars = names(selected_types[selected_types=="cat"])
nums = sum(selected_types=="num")
numvars = names(selected_types[selected_types=="num"])
# 2-way products of numerics
if(nums > 1 & inclusions[2]) {
for (v in numvars) {
for (w in numvars) {
if (which(numvars==v) < which(numvars==w)) {
temp_dt[,paste(v, "x", w, sep="") := temp_dt[,get(v)] * temp_dt[,get(w)]]
}
}
}
}
# 2-way sums of numerics
if(nums > 1 & inclusions[3]) {
for (v in numvars) {
for (w in numvars) {
if (which(numvars==v) < which(numvars==w)) {
temp_dt[,paste(v, "plus", w, sep="") := temp_dt[,get(v)] + temp_dt[,get(w)]]
}
}
}
}
# 2-way differences of numerics
if(nums > 1 & inclusions[4]) {
for (v in numvars) {
for (w in numvars) {
if (which(numvars==v) < which(numvars==w)) {
temp_dt[,paste(v, "minus", w, sep="") := temp_dt[,get(v)] - temp_dt[,get(w)]]
}
}
}
}
# 2-way quotients of numerics
if(nums > 1 & inclusions[5]) {
for (v in numvars) {
for (w in numvars) {
if(v!=w) {
quotvar = paste(v, "by", w, sep="")
temp_dt[,(quotvar) := temp_dt[,get(v)] / temp_dt[,get(w)]]
temp_dt[get(quotvar)==Inf, (quotvar) := NA]
}
}
}
}
# Rare value cut off on categoricals
if(cats > 0) {
for (v in catvars) {
raretable = table(temp_dt[,get(v)])
raretable = raretable[raretable < rare_cutoff]
temp_dt[as.character(get(v)) %in% names(raretable), (v) := NA]
}
}
# y probabilities of categoricals (we do this on splits to avoid leakage)
if(cats > 0 & inclusions[6]) {
for (v in catvars) {
probvar = paste("prob_", v, sep="")
# fill trainB and test with the trainA probabilities
probtab = table(temp_dt[basesplit=="trainA",get(v)], temp_dt[basesplit=="trainA",target])
probtab = (probtab / cbind(rowSums(probtab), rowSums(probtab)))[,2]
temp_dt[basesplit=="trainB" | basesplit=="test", (probvar) := probtab[as.character(all_dt[basesplit=="trainB" | basesplit=="test",get(v)])]]
# fill trainA with the trainB probabilities
probtab = table(temp_dt[basesplit=="trainB",get(v)], temp_dt[basesplit=="trainB",target])
probtab = (probtab / cbind(rowSums(probtab), rowSums(probtab)))[,2]
temp_dt[basesplit=="trainA", (probvar) := probtab[as.character(temp_dt[basesplit=="trainA",get(v)])]]
}
}
# bin numerics and take y probabilities of numeric bins (on splits)
if(nums > 0 & inclusions[7]) {
howmanynumvars = length(numvars)
i = 0
for (v in numvars) {
i = i + 1
cat(paste("Calculating num probs for", v, "(", i, "of", howmanynumvars, ") ..."))
probvar = paste("prob_", v, sep="")
probs_dt[,(v) := round(get(v), 1)]
# fill trainB and test with the trainA probabilities
probtab = table(probs_dt[basesplit=="trainA",get(v)])
probtab = probtab / nrow(probs_dt[basesplit=="trainA",])
probs_dt[basesplit=="trainB" | basesplit=="test", (probvar) := probtab[as.character(probs_dt[basesplit=="trainB" | basesplit=="test",get(v)])]]
# fill trainA with the trainB probabilities
probtab = table(probs_dt[basesplit=="trainB",get(v)])
probtab = probtab / nrow(probs_dt[basesplit=="trainB",])
probs_dt[basesplit=="trainA", (probvar) := probtab[as.character(probs_dt[basesplit=="trainA",get(v)])]]
}
}
# Convert all categoricals to integer values (XGBoost should be able to handle this okay, especially with rare value cutoff)
if(cats > 0) {
for (v in catvars) {
set(temp_dt, j=v, value=as.numeric(as.factor(temp_dt[,get(v)])))
}
}
# Now set up our DMatrix objects
trainingvars = setdiff(names(temp_dt), c("ID", "target", "train_test", "basesplit"))
dtrainA = xgb.DMatrix(as.matrix(subset(temp_dt, select=trainingvars, subset=basesplit=="trainA")), label=temp_dt[basesplit=="trainA",target], missing=NA)
dtrainB = xgb.DMatrix(as.matrix(subset(temp_dt, select=trainingvars, subset=basesplit=="trainB")), label=temp_dt[basesplit=="trainB",target], missing=NA)
dtest = xgb.DMatrix(as.matrix(subset(temp_dt, select=trainingvars, subset=basesplit=="test")), missing=NA)
# Set XGB parameters
param = list(objective = "binary:logistic", eta = r_eta,
max_depth = r_max_depth, subsample = r_subsample, colsample_bytree=r_colsample,
metrics = "logloss")
# Train model A and make predictions on B and test
set.seed(r_seed)
bstA = xgboost(metrics = "logloss", nrounds=r_rounds, params=param, verbose=0, data=dtrainA)
temp_dt[basesplit=="trainB", predictions := predict(bstA, dtrainB)]
temp_dt[basesplit=="test", predictions := predict(bstA, dtest)]
# Train model B and make predictions on A
set.seed(r_seed)
bstB = xgboost(metrics = "logloss", nrounds=r_rounds, params=param, verbose=0, data=dtrainB)
temp_dt[basesplit=="trainA", predictions := predict(bstB, dtrainA)]
# Estimate trainA and trainB error
trainA_logloss = quicklogloss(temp_dt[basesplit=="trainA", predictions], temp_dt[basesplit=="trainA", target])
trainB_logloss = quicklogloss(temp_dt[basesplit=="trainB", predictions], temp_dt[basesplit=="trainB", target])
print.noquote(paste("Mean loss:", round(mean(c(trainA_logloss, trainB_logloss)), 4),
"| trainA loss:", round(trainA_logloss, 4),
"| trainB loss:", round(trainB_logloss, 4)))
# Output information to file
output_metadata = c(filename=predictions_filename, max_logloss = max(c(trainA_logloss, trainB_logloss)),
A_logloss=trainA_logloss, B_logloss=trainB_logloss, output_metadata)
write.table(t(output_metadata), metadata_file, quote=F, row.names=F, sep=",", append=T, col.names=F)
# Output predictions to file
write.csv(temp_dt[,predictions], predictions_filename, quote=F, row.names=F)
# Tidy up the bigger objects before going again
rm(list=c("temp_dt", "dtrainA", "dtrainB", "dtest"))
gc()
}
| /BNP_Evolver.R | permissive | ajrwhite/KaggleBNP | R | false | false | 14,293 | r | # BNP Random Evolver
# Made available under Apache 2.0 License
# By Andy White
# This script evolves Generation X-1 into Generation X
# It saves each set of predictions as a CSV
# and the parameters and error scores in a metadata CSV file
# Leave running overnight and then run all features through
# a master model in the morning!
# Load required libraries
library(data.table)
library(Matrix)
library(xgboost)
######################################################################################
## SETUP PARAMETERS (Set these for each session)
######################################################################################
setwd("C:/Users/Andrew.000/Desktop/MSc Data Science and Analytics/KAGGLE/BNP Paribas")
predictions_file_prefix = "base_GEN010_"
metadata_file = "base_metadata_all_gens.csv"
number_of_parents = 100
number_of_children = 100
mutation_probability = 0.125
######################################################################################
# Function for calculating logloss
quicklogloss = function(preds, actual) {
preds[preds==1] = 0.999999
preds[preds==0] = 0.000001
logloss = sum(actual * log(preds) + (1 - actual) * log(1 - preds)) / -length(preds)
return(logloss)
}
# Load data and mark trainA and trainB splits
train_dt = fread("train.csv", na.strings=c(NA, "", "NA"))
test_dt = fread("test.csv", na.strings=c(NA, "", "NA"))
train_dt[,train_test := "train"]
test_dt[,train_test := "test"]
all_dt = rbind(train_dt, test_dt, fill=T)
rm(list=c("train_dt", "test_dt"))
setkey(all_dt, ID)
n = nrow(all_dt)
trainA = sample(all_dt[train_test=="train",ID], floor(length(all_dt[train_test=="train",ID]) / 2))
all_dt[train_test=="train" & ID %in% trainA, basesplit := "trainA"]
all_dt[train_test=="train" & !(ID %in% trainA), basesplit := "trainB"]
all_dt[train_test=="test", basesplit := "test"]
variable_names = setdiff(names(all_dt), c("ID", "target", "train_test", "basesplit"))
variable_types = rep("num", length(variable_names))
variable_types[sapply(all_dt, is.character)[variable_names]] = "cat"
variable_types[sapply(all_dt, is.integer)[variable_names]] = "int"
names(variable_types) = variable_names
# Load metadata
metadata = fread(metadata_file)
parent_max = sort(metadata[,max_logloss])[number_of_parents]
parent_rows = which(metadata[,max_logloss] <= parent_max)
random_seeds = sample(1:5000, number_of_children)
for (i in 1:number_of_children) {
set.seed(random_seeds[i])
print.noquote(paste("Random model:", i))
# Set up file names
file_suffix = formatC(i, width=3, flag="0")
predictions_filename = paste(predictions_file_prefix, file_suffix, ".csv", sep="")
# Choose random parameters
# These are: -variable choice (always 8 variables)
# -treat integers as categoricals?
# -include 2-way products of numerics?
# -include 2-way sums of numerics?
# -include 2-way differences of numerics?
# -include 2-way quotients of numerics? (N.B. order matters so this is calculated in both directions)
# -rare value cut off [0:1000]
# -include probabilities of categoricals?
# -include probabilities of binned numerics?
# -XGB params:
# - max_depth [4:30]
# - subsample [0.4:1.0]
# - colsample_bytree [0.4:1.0]
# - eta [0.01:0.20]
# - rounds [50:500]
# - seed[0:2000]
# Set up parents
parents = sample(parent_rows, 2)
parent_vars = c(as.character(subset(metadata[parents[1],], select=paste("Variable", 1:10))),
as.character(subset(metadata[parents[2],], select=paste("Variable", 1:10))))
parent_vars = parent_vars[parent_vars != "NA" & parent_vars != ""]
parent_inclusions = as.logical(subset(metadata[parents[1],],
select=c("ints_as_cats", "prods", "sums", "diffs", "quots", "probs", "num_probs"))) |
as.logical(subset(metadata[parents[2],], select=c("ints_as_cats", "prods", "sums", "diffs", "quots", "probs", "num_probs")))
inclusion_mutations = sample(c(TRUE, FALSE), 7, c(mutation_probability, 1-mutation_probability), replace=T)
parent_inclusions[inclusion_mutations] = !parent_inclusions[inclusion_mutations] # Random switch of inherited gene
selected_vars = sample(c(parent_vars, variable_names), 10, prob=c(rep((1-mutation_probability) / length(parent_vars), length(parent_vars)),
rep(mutation_probability / 131, 131)))
while(length(unique(selected_vars)) < 10) {
selected_vars = unique(selected_vars)
selected_vars = c(selected_vars, sample(c(parent_vars, variable_names), 1, prob=c(rep((1-mutation_probability) / length(parent_vars), length(parent_vars)),
rep(mutation_probability / 131, 131))))
}
selected_types = variable_types[selected_vars]
inclusions = parent_inclusions
rare_cutoff = sample(c(metadata[parents[1],rare_cutoff],
metadata[parents[2],rare_cutoff],
0:1000), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 1001, 1001)))
r_max_depth = sample(c(metadata[parents[1],xgb_depth],
metadata[parents[2],xgb_depth],
4:30), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 27, 27)))
r_subsample = sample(c(metadata[parents[1],xgb_subsample],
metadata[parents[2],xgb_subsample],
seq(0.4, 1, 0.05)), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 13, 13)))
r_colsample = sample(c(metadata[parents[1],xgb_colsample],
metadata[parents[2],xgb_colsample],
seq(0.4, 1, 0.05)), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 13, 13)))
r_eta = sample(c(metadata[parents[1],xgb_eta],
metadata[parents[2],xgb_eta],
seq(0.01, 0.3, 0.005)), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 59, 59)))
r_rounds = sample(c(metadata[parents[1],xgb_rounds],
metadata[parents[2],xgb_rounds],
50:500), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 451, 451)))
r_seed = sample(c(metadata[parents[1],xgb_seed],
metadata[parents[2],xgb_seed],
0:2000), 1, prob=c((1-mutation_probability)/2, (1-mutation_probability)/2, rep(mutation_probability / 2001, 2001)))
output_metadata = selected_vars
names(output_metadata) = paste("Variable", 1:10)
output_metadata = c(output_metadata,
ints_as_cats=inclusions[1],
prods=inclusions[2],
sums=inclusions[3],
diffs=inclusions[4],
quots=inclusions[5],
probs=inclusions[6],
num_probs=inclusions[7],
rare_cutoff=rare_cutoff,
xgb_depth=r_max_depth,
xgb_subsample=r_subsample,
xgb_colsample=r_colsample,
xgb_eta=r_eta,
xgb_rounds=r_rounds,
xgb_seed=r_seed)
print.noquote(output_metadata)
temp_dt = subset(all_dt, select=c("ID", "target", "train_test", "basesplit", selected_vars))
# Treat integers as categoricals?
if(inclusions[1]) {
selected_types[selected_types=="int"] = "cat"
} else {
selected_types[selected_types=="int"] = "num"
}
# Now count the number of categoricals and numerics
cats = sum(selected_types=="cat")
catvars = names(selected_types[selected_types=="cat"])
nums = sum(selected_types=="num")
numvars = names(selected_types[selected_types=="num"])
# 2-way products of numerics
if(nums > 1 & inclusions[2]) {
for (v in numvars) {
for (w in numvars) {
if (which(numvars==v) < which(numvars==w)) {
temp_dt[,paste(v, "x", w, sep="") := temp_dt[,get(v)] * temp_dt[,get(w)]]
}
}
}
}
# 2-way sums of numerics
if(nums > 1 & inclusions[3]) {
for (v in numvars) {
for (w in numvars) {
if (which(numvars==v) < which(numvars==w)) {
temp_dt[,paste(v, "plus", w, sep="") := temp_dt[,get(v)] + temp_dt[,get(w)]]
}
}
}
}
# 2-way differences of numerics
if(nums > 1 & inclusions[4]) {
for (v in numvars) {
for (w in numvars) {
if (which(numvars==v) < which(numvars==w)) {
temp_dt[,paste(v, "minus", w, sep="") := temp_dt[,get(v)] - temp_dt[,get(w)]]
}
}
}
}
# 2-way quotients of numerics
if(nums > 1 & inclusions[5]) {
for (v in numvars) {
for (w in numvars) {
if(v!=w) {
quotvar = paste(v, "by", w, sep="")
temp_dt[,(quotvar) := temp_dt[,get(v)] / temp_dt[,get(w)]]
temp_dt[get(quotvar)==Inf, (quotvar) := NA]
}
}
}
}
# Rare value cut off on categoricals
if(cats > 0) {
for (v in catvars) {
raretable = table(temp_dt[,get(v)])
raretable = raretable[raretable < rare_cutoff]
temp_dt[as.character(get(v)) %in% names(raretable), (v) := NA]
}
}
# y probabilities of categoricals (we do this on splits to avoid leakage)
if(cats > 0 & inclusions[6]) {
for (v in catvars) {
probvar = paste("prob_", v, sep="")
# fill trainB and test with the trainA probabilities
probtab = table(temp_dt[basesplit=="trainA",get(v)], temp_dt[basesplit=="trainA",target])
probtab = (probtab / cbind(rowSums(probtab), rowSums(probtab)))[,2]
temp_dt[basesplit=="trainB" | basesplit=="test", (probvar) := probtab[as.character(all_dt[basesplit=="trainB" | basesplit=="test",get(v)])]]
# fill trainA with the trainB probabilities
probtab = table(temp_dt[basesplit=="trainB",get(v)], temp_dt[basesplit=="trainB",target])
probtab = (probtab / cbind(rowSums(probtab), rowSums(probtab)))[,2]
temp_dt[basesplit=="trainA", (probvar) := probtab[as.character(temp_dt[basesplit=="trainA",get(v)])]]
}
}
# bin numerics and take y probabilities of numeric bins (on splits)
if(nums > 0 & inclusions[7]) {
howmanynumvars = length(numvars)
i = 0
for (v in numvars) {
i = i + 1
cat(paste("Calculating num probs for", v, "(", i, "of", howmanynumvars, ") ..."))
probvar = paste("prob_", v, sep="")
probs_dt[,(v) := round(get(v), 1)]
# fill trainB and test with the trainA probabilities
probtab = table(probs_dt[basesplit=="trainA",get(v)])
probtab = probtab / nrow(probs_dt[basesplit=="trainA",])
probs_dt[basesplit=="trainB" | basesplit=="test", (probvar) := probtab[as.character(probs_dt[basesplit=="trainB" | basesplit=="test",get(v)])]]
# fill trainA with the trainB probabilities
probtab = table(probs_dt[basesplit=="trainB",get(v)])
probtab = probtab / nrow(probs_dt[basesplit=="trainB",])
probs_dt[basesplit=="trainA", (probvar) := probtab[as.character(probs_dt[basesplit=="trainA",get(v)])]]
}
}
# Convert all categoricals to integer values (XGBoost should be able to handle this okay, especially with rare value cutoff)
if(cats > 0) {
for (v in catvars) {
set(temp_dt, j=v, value=as.numeric(as.factor(temp_dt[,get(v)])))
}
}
# Now set up our DMatrix objects
trainingvars = setdiff(names(temp_dt), c("ID", "target", "train_test", "basesplit"))
dtrainA = xgb.DMatrix(as.matrix(subset(temp_dt, select=trainingvars, subset=basesplit=="trainA")), label=temp_dt[basesplit=="trainA",target], missing=NA)
dtrainB = xgb.DMatrix(as.matrix(subset(temp_dt, select=trainingvars, subset=basesplit=="trainB")), label=temp_dt[basesplit=="trainB",target], missing=NA)
dtest = xgb.DMatrix(as.matrix(subset(temp_dt, select=trainingvars, subset=basesplit=="test")), missing=NA)
# Set XGB parameters
param = list(objective = "binary:logistic", eta = r_eta,
max_depth = r_max_depth, subsample = r_subsample, colsample_bytree=r_colsample,
metrics = "logloss")
# Train model A and make predictions on B and test
set.seed(r_seed)
bstA = xgboost(metrics = "logloss", nrounds=r_rounds, params=param, verbose=0, data=dtrainA)
temp_dt[basesplit=="trainB", predictions := predict(bstA, dtrainB)]
temp_dt[basesplit=="test", predictions := predict(bstA, dtest)]
# Train model B and make predictions on A
set.seed(r_seed)
bstB = xgboost(metrics = "logloss", nrounds=r_rounds, params=param, verbose=0, data=dtrainB)
temp_dt[basesplit=="trainA", predictions := predict(bstB, dtrainA)]
# Estimate trainA and trainB error
trainA_logloss = quicklogloss(temp_dt[basesplit=="trainA", predictions], temp_dt[basesplit=="trainA", target])
trainB_logloss = quicklogloss(temp_dt[basesplit=="trainB", predictions], temp_dt[basesplit=="trainB", target])
print.noquote(paste("Mean loss:", round(mean(c(trainA_logloss, trainB_logloss)), 4),
"| trainA loss:", round(trainA_logloss, 4),
"| trainB loss:", round(trainB_logloss, 4)))
# Output information to file
output_metadata = c(filename=predictions_filename, max_logloss = max(c(trainA_logloss, trainB_logloss)),
A_logloss=trainA_logloss, B_logloss=trainB_logloss, output_metadata)
write.table(t(output_metadata), metadata_file, quote=F, row.names=F, sep=",", append=T, col.names=F)
# Output predictions to file
write.csv(temp_dt[,predictions], predictions_filename, quote=F, row.names=F)
# Tidy up the bigger objects before going again
rm(list=c("temp_dt", "dtrainA", "dtrainB", "dtest"))
gc()
}
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.5,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/breast/breast_058.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/breast/breast_058.R | no_license | leon1003/QSMART | R | false | false | 351 | r | library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.5,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/breast/breast_058.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datadoc.R
\docType{data}
\name{FiktivDelelinje}
\alias{FiktivDelelinje}
\title{FiktivDelelinje}
\format{
\if{html}{\out{<div class="sourceCode">}}\preformatted{Simple feature collection with 1956 features and 1 field
Geometry type: LINESTRING
Dimension: XY
Bounding box: xmin: -94276.32 ymin: 6436015 xmax: 1119065 ymax: 7956336
Projected CRS: ETRS89 / UTM zone 33N
# A tibble: 1,956 × 2
oppdateringsdato geometry
* <date> <LINESTRING [m]>
1 2017-01-05 (1e+06 7866606, 1e+06 7869714)
2 2017-01-05 (1e+06 7866387, 1e+06 7866606)
3 2017-01-05 (1e+06 7865264, 1e+06 7866387)
4 2017-01-05 (1e+06 7865261, 1e+06 7865264)
5 2017-01-05 (8e+05 7875160, 8e+05 7877324)
6 2017-01-05 (1e+06 7874608, 1e+06 7874640, 1e+06 7874930, 1e+06 7876809)
7 2017-01-05 (8e+05 7874601, 8e+05 7875160)
8 2017-01-05 (8e+05 7874095, 8e+05 7874601)
9 2017-01-05 (1e+06 7873838, 1e+06 7874608)
10 2017-01-05 (8e+05 7873663, 8e+05 7874095)
# ℹ 1,946 more rows
# ℹ Use `print(n = ...)` to see more rows
}\if{html}{\out{</div>}}
}
\source{
\code{Basisdata_0000_Norge_25833_N1000Arealdekke_GML.gml}
}
\usage{
FiktivDelelinje
}
\description{
FiktivDelelinje
}
\author{
© \href{https://kartverket.no/}{Kartverket}
}
\keyword{datasets}
| /man/FiktivDelelinje.Rd | permissive | hmalmedal/N1000 | R | false | true | 1,739 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datadoc.R
\docType{data}
\name{FiktivDelelinje}
\alias{FiktivDelelinje}
\title{FiktivDelelinje}
\format{
\if{html}{\out{<div class="sourceCode">}}\preformatted{Simple feature collection with 1956 features and 1 field
Geometry type: LINESTRING
Dimension: XY
Bounding box: xmin: -94276.32 ymin: 6436015 xmax: 1119065 ymax: 7956336
Projected CRS: ETRS89 / UTM zone 33N
# A tibble: 1,956 × 2
oppdateringsdato geometry
* <date> <LINESTRING [m]>
1 2017-01-05 (1e+06 7866606, 1e+06 7869714)
2 2017-01-05 (1e+06 7866387, 1e+06 7866606)
3 2017-01-05 (1e+06 7865264, 1e+06 7866387)
4 2017-01-05 (1e+06 7865261, 1e+06 7865264)
5 2017-01-05 (8e+05 7875160, 8e+05 7877324)
6 2017-01-05 (1e+06 7874608, 1e+06 7874640, 1e+06 7874930, 1e+06 7876809)
7 2017-01-05 (8e+05 7874601, 8e+05 7875160)
8 2017-01-05 (8e+05 7874095, 8e+05 7874601)
9 2017-01-05 (1e+06 7873838, 1e+06 7874608)
10 2017-01-05 (8e+05 7873663, 8e+05 7874095)
# ℹ 1,946 more rows
# ℹ Use `print(n = ...)` to see more rows
}\if{html}{\out{</div>}}
}
\source{
\code{Basisdata_0000_Norge_25833_N1000Arealdekke_GML.gml}
}
\usage{
FiktivDelelinje
}
\description{
FiktivDelelinje
}
\author{
© \href{https://kartverket.no/}{Kartverket}
}
\keyword{datasets}
|
estimate <- function(full_formula, null_formula, dat){
full_mod <- robu(as.formula(full_formula),
studynum = study,
var.eff.size = v,
small = FALSE,
data = dat)
# Single coefficient ------------------------------------------------------
cov_mat_cr0 <- vcovCR(full_mod, type = "CR0")
cov_mat_cr2 <- vcovCR(full_mod, type = "CR2")
res_s_naive_b2 <- Wald_test(full_mod,
constraints = constrain_zero(2),
vcov = cov_mat_cr0,
test = "Naive-F")
res_s_htz_b2 <- Wald_test(full_mod,
constraints = constrain_zero(2),
vcov = cov_mat_cr2,
test = "HTZ")
# multiple contrast hypothesis --------------------------------------------
res_mch_naive <- Wald_test(full_mod,
constraints = constrain_zero(2:6),
vcov = cov_mat_cr0,
test = "Naive-F")
res_mch_htz <- Wald_test(full_mod,
constraints = constrain_zero(2:6),
vcov = cov_mat_cr2,
test = "HTZ")
}
| /prep_sim/estimation/scratch/1_estimation_functions.R | no_license | meghapsimatrix/dissertation_sim | R | false | false | 1,315 | r |
estimate <- function(full_formula, null_formula, dat){
full_mod <- robu(as.formula(full_formula),
studynum = study,
var.eff.size = v,
small = FALSE,
data = dat)
# Single coefficient ------------------------------------------------------
cov_mat_cr0 <- vcovCR(full_mod, type = "CR0")
cov_mat_cr2 <- vcovCR(full_mod, type = "CR2")
res_s_naive_b2 <- Wald_test(full_mod,
constraints = constrain_zero(2),
vcov = cov_mat_cr0,
test = "Naive-F")
res_s_htz_b2 <- Wald_test(full_mod,
constraints = constrain_zero(2),
vcov = cov_mat_cr2,
test = "HTZ")
# multiple contrast hypothesis --------------------------------------------
res_mch_naive <- Wald_test(full_mod,
constraints = constrain_zero(2:6),
vcov = cov_mat_cr0,
test = "Naive-F")
res_mch_htz <- Wald_test(full_mod,
constraints = constrain_zero(2:6),
vcov = cov_mat_cr2,
test = "HTZ")
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{pipeR-package}
\alias{pipeR-package}
\title{The pipeR package}
\description{
The pipeR package
}
\details{
pipeR provides various styles of function chaining methods: Pipe operator,
Pipe object, and pipeline function, each representing a distinct pipeline
model yet sharing almost a common set of features: A value can be piped to
the first unnamed argument of a function, to dot symbol in an enclosed expression,
by formula as lambda expression, for side-effect, and with assignment.
The set of syntax is designed to make the pipeline more readable and fluent to
a wide variety of operations.
pipeR Tutorial (\url{http://renkun.me/pipeR-tutorial}) is a highly recommended
complete guide to pipeR.
}
| /man/pipeR-package.Rd | permissive | BrianDiggs/pipeR | R | false | false | 779 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{pipeR-package}
\alias{pipeR-package}
\title{The pipeR package}
\description{
The pipeR package
}
\details{
pipeR provides various styles of function chaining methods: Pipe operator,
Pipe object, and pipeline function, each representing a distinct pipeline
model yet sharing almost a common set of features: A value can be piped to
the first unnamed argument of a function, to dot symbol in an enclosed expression,
by formula as lambda expression, for side-effect, and with assignment.
The set of syntax is designed to make the pipeline more readable and fluent to
a wide variety of operations.
pipeR Tutorial (\url{http://renkun.me/pipeR-tutorial}) is a highly recommended
complete guide to pipeR.
}
|
train_data_corpus = corpus(train_data)
tokenizer <- function(data) {
tokens(train_data_corpus,
what = "fastestword",
remove_numbers = TRUE,
remove_url = TRUE,
split_hyphens = TRUE)
}
train_data_corpus = tokenizer(train_data_corpus)
print("tokenization DONE")
end3 = Sys.time()
rm(train_data)
unigrams_var = tokens_ngrams(train_data_corpus, n = 1, concatenator = " ")
print("tokens 1 DONE")
bigrams_var = tokens_ngrams(train_data_corpus, n = 2, concatenator = " ")
print("tokens 2 DONE")
trigrams_var = tokens_ngrams(train_data_corpus, n = 3, concatenator = " ")
print("tokens 3 DONE")
end4 = Sys.time()
Unigram_Words = as.character(unigrams_var) %>% list()
Bigram_Words = as.character(bigrams_var) %>% list()
Trigram_Words = as.character(trigrams_var) %>% list()
print("converting to list DONE")
end5 = Sys.time()
rm(train_data_corpus)
rm(unigrams_var)
rm(bigrams_var)
rm(trigrams_var) | /tokenization.R | no_license | Gomathy-Sankar-K-11101998/Shiny-Text-Prediction-App | R | false | false | 932 | r | train_data_corpus = corpus(train_data)
tokenizer <- function(data) {
tokens(train_data_corpus,
what = "fastestword",
remove_numbers = TRUE,
remove_url = TRUE,
split_hyphens = TRUE)
}
train_data_corpus = tokenizer(train_data_corpus)
print("tokenization DONE")
end3 = Sys.time()
rm(train_data)
unigrams_var = tokens_ngrams(train_data_corpus, n = 1, concatenator = " ")
print("tokens 1 DONE")
bigrams_var = tokens_ngrams(train_data_corpus, n = 2, concatenator = " ")
print("tokens 2 DONE")
trigrams_var = tokens_ngrams(train_data_corpus, n = 3, concatenator = " ")
print("tokens 3 DONE")
end4 = Sys.time()
Unigram_Words = as.character(unigrams_var) %>% list()
Bigram_Words = as.character(bigrams_var) %>% list()
Trigram_Words = as.character(trigrams_var) %>% list()
print("converting to list DONE")
end5 = Sys.time()
rm(train_data_corpus)
rm(unigrams_var)
rm(bigrams_var)
rm(trigrams_var) |
#' Check that a GRanges object has been snapped
#' to bins
#'
#' @param x GRanges object
#' @param binSize Integer (numeric) describing
#' the new size of each range.
#'
#' @return Logical
#'
#' @importFrom GenomicRanges start end
#'
#' @noRd
.checkSnappedRanges <- function(x, binSize) {
all((start(x) / binSize) %% 1 == 0) &
all((end(x) / binSize) %% 1 == 0)
}
#' Check that a GInteractions object has been
#' snapped to bins
#'
#' @param x GInteractions object
#' @param binSize Integer (numeric) describing
#' the new size of each range.
#'
#' @return Logical
#'
#' @importFrom S4Vectors first second
#'
#' @noRd
.checkSnappedPairs <- function(x, binSize) {
r1 <- .checkSnappedRanges(x = first(x), binSize = binSize)
r2 <- .checkSnappedRanges(x = second(x), binSize = binSize)
return(r1 & r2)
}
#' Check that a GRanges object has been binned
#'
#' Starts are 0-based for interfacing with the
#' `strawr` package. Therefore, widths of correctly
#' binned objects will be `binSize+1`.
#'
#' @param x GRanges object
#' @param binSize Integer (numeric) describing
#' the new size of each range.
#'
#' @return Logical
#'
#' @importFrom GenomicRanges width
#'
#' @noRd
.checkBinnedRanges <- function(x, binSize) {
length(which(width(x) != binSize+1)) == 0
}
#' Check that a GInteractions object has been binned
#'
#' Starts are 0-based for interfacing with the
#' `strawr` package. Therefore, widths of correctly
#' binned objects will be `binSize+1`.
#'
#' @param x GInteractions object
#' @param binSize Integer (numeric) describing
#' the new size of each range.
#'
#' @return Logical
#'
#' @importFrom S4Vectors first second
#'
#' @noRd
.checkBinnedPairs <- function(x, binSize) {
r1 <- .checkBinnedRanges(x = first(x), binSize = binSize)
r2 <- .checkBinnedRanges(x = second(x), binSize = binSize)
return(r1 & r2)
}
#' Return the mode(s)
#' @param x numeric vector
#' @returns A vector of the mode(s)
#' @noRd
.modes <- function(x) {
ux <- unique(x)
tab <- tabulate(match(x, ux))
ux[tab == max(tab)]
}
#' Check input types
#'
#' Derived types:
#' string - length one character vector
#' number - length one numeric vector
#' boolean - a length one logical vector that is not NA
#'
#' @param types Named vector or list of arguments and their types
#' @importFrom rlang abort
#' @importFrom glue glue
#' @importFrom assertthat is.string is.number is.flag
#' @returns NULL or an error message incorrect types.
#' @noRd
.checkTypes <- function(types, env=parent.frame()) {
args <- names(types)
for(i in seq_along(types)) {
if (types[i] == "string") {
if(any(!is.string(get(args[i], envir=env)))) {
abort(glue(
"{args[i]} is not a string \\
(a length one character vector)."
))
}
}
if (types[i] == "number") {
if(any(!is.number(get(args[i], envir=env)))) {
abort(glue(
"{args[i]} is not a number \\
(a length one numeric vector)."
))
}
}
if (types[i] == "boolean") {
arg <- get(args[i], envir=env)
if(any(!is.flag(arg) | is.na(arg))) {
abort(glue(
"{args[i]} is not a boolean \\
(a length one logical vector that is not NA)."
))
}
}
}
}
## TODO: Write method for combining
## HDF5 data into a single file in blocks
#' Check that a list of objects contains
#' the same data in a slot.
#' @param x List of objects.
#' @param FUN Slot accessor function.
#' @returns Logical that all objects contain the same
#' data or not.
#' @noRd
.checkEqualSlot <- function(x, FUN) {
d <- lapply(x, FUN)
all(vapply(seq_along(d), \(i) identical(d[[1L]], d[[i]]), logical(1L)))
}
#' Internal rbind method for InteractionMatrix/InteractionArray
#' @param ... InteractionMatrix objects to be combined row-wise.
#' All objects must be the same class.
#' @param deparse.level An integer scalar; see `?base::rbind` for
#' a description of this argument.
#' @importFrom S4Vectors metadata `metadata<-`
#' @importFrom SummarizedExperiment colData `colData<-`
#' @importFrom rlang abort
#' @importFrom glue glue
#' @noRd
.rbindIsetDerived <- function(..., deparse.level=1) {
args <- unname(list(...))
type <- class(args[[1L]]) # get class name
## Check equivalent metadata before binding
if (!.checkEqualSlot(args, metadata)) {
abort(glue("Can't rbind {type} \\
objects with different metadata."))
}
## Check equivalent colData before binding
if (!.checkEqualSlot(args, colData)) {
abort(glue("Can't rbind {type} \\
objects with different colData."))
}
ans <- new(type, callNextMethod())
metadata(ans) <- metadata(args[[1L]])
colData(ans) <- colData(args[[1L]])
ans
}
#' Internal cbind method for InteractionMatrix/InteractionArray
#' @param ... InteractionMatrix objects to be combined column-wise.
#' All objects must be the same class.
#' @param deparse.level An integer scalar; see `?base::cbind` for
#' a description of this argument.
#' @importFrom S4Vectors metadata `metadata<-`
#' @importFrom rlang abort
#' @importFrom glue glue
#' @noRd
.cbindIsetDerived <- function(..., deparse.level=1) {
args <- unname(list(...))
type <- class(args[[1L]]) # get class name
## Check equivalent metadata before binding
if (!.checkEqualSlot(args, metadata)) {
abort(glue("Can't cbind {type} \\
objects with different metadata."))
}
tryCatch({
ans <- new(type, callNextMethod())
}, error=\(e) {
abort(e$message, call=parent.frame(4L))
})
metadata(ans) <- metadata(args[[1L]])
ans
}
#' Stop if buffer is not the same
#' @param b1 buffer (numeric) from first object
#' @param b2 buffer (numeric) from second object
#' @importFrom rlang abort
#' @return NULL or error message if not the same.
#' @noRd
.checkBuffer <- function(b1, b2) {
if (b1 != b2) {
abort("`buffer` must be the same for both selections.")
}
}
#' Get binSize or throw error
#' @param x GInteractions object.
#' @importFrom S4Vectors first second
#' @importFrom IRanges width
#' @importFrom rlang abort
#' @noRd
.getBinSize <- function(x) {
widths <- unique(width(regions(x))) - 1
if (length(widths) != 1L) {
abort(c("All ranges in `x` must be equal widths.",
"i"="Use `assignToBins()` to bin into equal widths."))
}
return(widths)
}
#' Function to extract Nindex from system call
#' Modified from S4Arrays/DelayedArray
#' "extract_Nindex_from_syscall"
#' @param call sys.call()
#' @param eframe environment frame (i.e. `parent.frame()`)
#' @importFrom utils tail
#' @returns Nindex, a list of user supplied subscripts.
#' Missing subscripts are set to `NULL`.
#' @noRd
.getNindexFromSyscall <- function(call, eframe) {
Nindex <- lapply(seq_len(length(call) - 2L), \(i) {
subscript <- call[[2L + i]]
if (missing(subscript))
return(NULL)
subscript <- eval(subscript, envir=eframe, enclos=eframe)
if (is.null(subscript))
return(integer(0))
subscript
})
argnames <- tail(names(call), n=-2L)
if (!is.null(argnames))
Nindex <- Nindex[!(argnames %in% c("drop", "exact", "value"))]
if (length(Nindex) == 1L && is.null(Nindex[[1L]]))
Nindex <- vector("list", 4L)
if (length(Nindex) < 4)
stop("incorrect number of subscripts", call.=FALSE)
Nindex
}
#' Stop if matrices are not odd and square
#' @param x InteractionArray
#' @importFrom rlang abort
#' @importFrom glue glue
#' @return NULL or error message if not odd and square.
#' @noRd
.checkOddSquareMatrices <- function(x){
dims <- dim(counts(x))
## Check that input is a square matrix
if(dims[1] != dims[2]){
abort(c("`x` must have square count matrices.",
"i"="Dimensions of count matrices must be equal.",
"x"=glue("`dim(counts(x))[1] != dim(counts(x))[2]`",
", {dims[1]} != {dims[2]}."),
"i"="See `?pullHicMatrices` for more information."))
}
## Check that buffer for InteractionArray is odd
if((dims[1] %% 2) == 0){
abort(c(glue("Enrichment scores can only be calculated for matrices",
" with a center pixel."),
"i"="Dimensions of count matrices must be odd.",
"i"=glue("Dimensions of count matrices are {dims[1]} x {dims[2]}."),
"x"= glue("{dims[1]} is not odd."),
"i"="See `?pixelsToMatrices` for help."))
}
}
#' Return default buffer
#' If InteractionArray is supplied,
#' it uses the dimensions of counts matrices
#' to set the buffer dimensions.
#' @param x InteractionArray
#' @return 5 (set default),
#' the buffer of the provided InteractionArray,
#' or an error message if the InteractionArray
#' is not odd and square (no buffer)
#' @rdname defaultBuffer
#' @export
defaultBuffer <- function(x) {
if (missing(x)) {
return(5)
}
.checkOddSquareMatrices(x)
buffer <- (dim(counts(x))[1] - 1) /2
buffer
}
#' Return non-conflicting variable name
#' @param x string (character vector of length 1)
#' of the variable name to check for and change
#' @param argNames character vector of arguments
#' to check against
#' @return a non-conflicting name for `x`. Either `x`
#' if `x` is not in the list of arguments, or `x`
#' followed by a number
#' @noRd
.reconcileArgs <- function(x, argNames){
if(x %in% argNames){
xNums <- grep(paste0("^",x,"\\d+$"), argNames, value=T)
nums <- as.numeric(gsub(x, "", xNums))
xNew <- paste0(x, ifelse(length(nums)>0, max(nums)+1, 1))
return(xNew)
}
return(x)
}
| /R/utils.R | no_license | EricSDavis/mariner | R | false | false | 9,871 | r | #' Check that a GRanges object has been snapped
#' to bins
#'
#' @param x GRanges object
#' @param binSize Integer (numeric) describing
#' the new size of each range.
#'
#' @return Logical
#'
#' @importFrom GenomicRanges start end
#'
#' @noRd
.checkSnappedRanges <- function(x, binSize) {
all((start(x) / binSize) %% 1 == 0) &
all((end(x) / binSize) %% 1 == 0)
}
#' Check that a GInteractions object has been
#' snapped to bins
#'
#' @param x GInteractions object
#' @param binSize Integer (numeric) describing
#' the new size of each range.
#'
#' @return Logical
#'
#' @importFrom S4Vectors first second
#'
#' @noRd
.checkSnappedPairs <- function(x, binSize) {
r1 <- .checkSnappedRanges(x = first(x), binSize = binSize)
r2 <- .checkSnappedRanges(x = second(x), binSize = binSize)
return(r1 & r2)
}
#' Check that a GRanges object has been binned
#'
#' Starts are 0-based for interfacing with the
#' `strawr` package. Therefore, widths of correctly
#' binned objects will be `binSize+1`.
#'
#' @param x GRanges object
#' @param binSize Integer (numeric) describing
#' the new size of each range.
#'
#' @return Logical
#'
#' @importFrom GenomicRanges width
#'
#' @noRd
.checkBinnedRanges <- function(x, binSize) {
length(which(width(x) != binSize+1)) == 0
}
#' Check that a GInteractions object has been binned
#'
#' Starts are 0-based for interfacing with the
#' `strawr` package. Therefore, widths of correctly
#' binned objects will be `binSize+1`.
#'
#' @param x GInteractions object
#' @param binSize Integer (numeric) describing
#' the new size of each range.
#'
#' @return Logical
#'
#' @importFrom S4Vectors first second
#'
#' @noRd
.checkBinnedPairs <- function(x, binSize) {
r1 <- .checkBinnedRanges(x = first(x), binSize = binSize)
r2 <- .checkBinnedRanges(x = second(x), binSize = binSize)
return(r1 & r2)
}
#' Return the mode(s)
#' @param x numeric vector
#' @returns A vector of the mode(s)
#' @noRd
.modes <- function(x) {
ux <- unique(x)
tab <- tabulate(match(x, ux))
ux[tab == max(tab)]
}
#' Check input types
#'
#' Derived types:
#' string - length one character vector
#' number - length one numeric vector
#' boolean - a length one logical vector that is not NA
#'
#' @param types Named vector or list of arguments and their types
#' @importFrom rlang abort
#' @importFrom glue glue
#' @importFrom assertthat is.string is.number is.flag
#' @returns NULL or an error message incorrect types.
#' @noRd
.checkTypes <- function(types, env=parent.frame()) {
args <- names(types)
for(i in seq_along(types)) {
if (types[i] == "string") {
if(any(!is.string(get(args[i], envir=env)))) {
abort(glue(
"{args[i]} is not a string \\
(a length one character vector)."
))
}
}
if (types[i] == "number") {
if(any(!is.number(get(args[i], envir=env)))) {
abort(glue(
"{args[i]} is not a number \\
(a length one numeric vector)."
))
}
}
if (types[i] == "boolean") {
arg <- get(args[i], envir=env)
if(any(!is.flag(arg) | is.na(arg))) {
abort(glue(
"{args[i]} is not a boolean \\
(a length one logical vector that is not NA)."
))
}
}
}
}
## TODO: Write method for combining
## HDF5 data into a single file in blocks
#' Check that a list of objects contains
#' the same data in a slot.
#' @param x List of objects.
#' @param FUN Slot accessor function.
#' @returns Logical that all objects contain the same
#' data or not.
#' @noRd
.checkEqualSlot <- function(x, FUN) {
d <- lapply(x, FUN)
all(vapply(seq_along(d), \(i) identical(d[[1L]], d[[i]]), logical(1L)))
}
#' Internal rbind method for InteractionMatrix/InteractionArray
#' @param ... InteractionMatrix objects to be combined row-wise.
#' All objects must be the same class.
#' @param deparse.level An integer scalar; see `?base::rbind` for
#' a description of this argument.
#' @importFrom S4Vectors metadata `metadata<-`
#' @importFrom SummarizedExperiment colData `colData<-`
#' @importFrom rlang abort
#' @importFrom glue glue
#' @noRd
.rbindIsetDerived <- function(..., deparse.level=1) {
args <- unname(list(...))
type <- class(args[[1L]]) # get class name
## Check equivalent metadata before binding
if (!.checkEqualSlot(args, metadata)) {
abort(glue("Can't rbind {type} \\
objects with different metadata."))
}
## Check equivalent colData before binding
if (!.checkEqualSlot(args, colData)) {
abort(glue("Can't rbind {type} \\
objects with different colData."))
}
ans <- new(type, callNextMethod())
metadata(ans) <- metadata(args[[1L]])
colData(ans) <- colData(args[[1L]])
ans
}
#' Internal cbind method for InteractionMatrix/InteractionArray
#' @param ... InteractionMatrix objects to be combined column-wise.
#' All objects must be the same class.
#' @param deparse.level An integer scalar; see `?base::cbind` for
#' a description of this argument.
#' @importFrom S4Vectors metadata `metadata<-`
#' @importFrom rlang abort
#' @importFrom glue glue
#' @noRd
.cbindIsetDerived <- function(..., deparse.level=1) {
args <- unname(list(...))
type <- class(args[[1L]]) # get class name
## Check equivalent metadata before binding
if (!.checkEqualSlot(args, metadata)) {
abort(glue("Can't cbind {type} \\
objects with different metadata."))
}
tryCatch({
ans <- new(type, callNextMethod())
}, error=\(e) {
abort(e$message, call=parent.frame(4L))
})
metadata(ans) <- metadata(args[[1L]])
ans
}
#' Stop if buffer is not the same
#' @param b1 buffer (numeric) from first object
#' @param b2 buffer (numeric) from second object
#' @importFrom rlang abort
#' @return NULL or error message if not the same.
#' @noRd
.checkBuffer <- function(b1, b2) {
if (b1 != b2) {
abort("`buffer` must be the same for both selections.")
}
}
#' Get binSize or throw error
#' @param x GInteractions object.
#' @importFrom S4Vectors first second
#' @importFrom IRanges width
#' @importFrom rlang abort
#' @noRd
.getBinSize <- function(x) {
widths <- unique(width(regions(x))) - 1
if (length(widths) != 1L) {
abort(c("All ranges in `x` must be equal widths.",
"i"="Use `assignToBins()` to bin into equal widths."))
}
return(widths)
}
#' Function to extract Nindex from system call
#' Modified from S4Arrays/DelayedArray
#' "extract_Nindex_from_syscall"
#' @param call sys.call()
#' @param eframe environment frame (i.e. `parent.frame()`)
#' @importFrom utils tail
#' @returns Nindex, a list of user supplied subscripts.
#' Missing subscripts are set to `NULL`.
#' @noRd
.getNindexFromSyscall <- function(call, eframe) {
Nindex <- lapply(seq_len(length(call) - 2L), \(i) {
subscript <- call[[2L + i]]
if (missing(subscript))
return(NULL)
subscript <- eval(subscript, envir=eframe, enclos=eframe)
if (is.null(subscript))
return(integer(0))
subscript
})
argnames <- tail(names(call), n=-2L)
if (!is.null(argnames))
Nindex <- Nindex[!(argnames %in% c("drop", "exact", "value"))]
if (length(Nindex) == 1L && is.null(Nindex[[1L]]))
Nindex <- vector("list", 4L)
if (length(Nindex) < 4)
stop("incorrect number of subscripts", call.=FALSE)
Nindex
}
#' Stop if matrices are not odd and square
#' @param x InteractionArray
#' @importFrom rlang abort
#' @importFrom glue glue
#' @return NULL or error message if not odd and square.
#' @noRd
.checkOddSquareMatrices <- function(x){
dims <- dim(counts(x))
## Check that input is a square matrix
if(dims[1] != dims[2]){
abort(c("`x` must have square count matrices.",
"i"="Dimensions of count matrices must be equal.",
"x"=glue("`dim(counts(x))[1] != dim(counts(x))[2]`",
", {dims[1]} != {dims[2]}."),
"i"="See `?pullHicMatrices` for more information."))
}
## Check that buffer for InteractionArray is odd
if((dims[1] %% 2) == 0){
abort(c(glue("Enrichment scores can only be calculated for matrices",
" with a center pixel."),
"i"="Dimensions of count matrices must be odd.",
"i"=glue("Dimensions of count matrices are {dims[1]} x {dims[2]}."),
"x"= glue("{dims[1]} is not odd."),
"i"="See `?pixelsToMatrices` for help."))
}
}
#' Return default buffer
#' If InteractionArray is supplied,
#' it uses the dimensions of counts matrices
#' to set the buffer dimensions.
#' @param x InteractionArray
#' @return 5 (set default),
#' the buffer of the provided InteractionArray,
#' or an error message if the InteractionArray
#' is not odd and square (no buffer)
#' @rdname defaultBuffer
#' @export
defaultBuffer <- function(x) {
if (missing(x)) {
return(5)
}
.checkOddSquareMatrices(x)
buffer <- (dim(counts(x))[1] - 1) /2
buffer
}
#' Return non-conflicting variable name
#' @param x string (character vector of length 1)
#' of the variable name to check for and change
#' @param argNames character vector of arguments
#' to check against
#' @return a non-conflicting name for `x`. Either `x`
#' if `x` is not in the list of arguments, or `x`
#' followed by a number
#' @noRd
.reconcileArgs <- function(x, argNames){
if(x %in% argNames){
xNums <- grep(paste0("^",x,"\\d+$"), argNames, value=T)
nums <- as.numeric(gsub(x, "", xNums))
xNew <- paste0(x, ifelse(length(nums)>0, max(nums)+1, 1))
return(xNew)
}
return(x)
}
|
library(xlsx)
# Create standard plate numbering
Z <- t(array(1:12, dim=c(12,8)));
X <- array(c("A","B","C","D","E","F","G","H"), dim=c(8,12));
Y <- paste(X, Z, sep="");
IDs <- array(Y, c(8,12)) # Writes 12 Ax-Hx columns into one.
IDs <- array(t(IDs),c(96,1))
# Import plates and make list with Ct values
rowname <- letters[1:8]
colname <- as.character(1:12)
plates <- list.files("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/samples/")
qPCRs <- list.files("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/qPCRs/")
tableqPCR <- matrix(data = NA, nrow = 1, ncol = 3, byrow = TRUE, dimnames = NULL)
#tableqPCR[,1] <- IDs
for (i in c(1:10)) {
samples <- read.csv(paste0("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/samples/", plates[i]), header=FALSE, stringsAsFactors=FALSE, row.names = rowname, col.names = colname, nrows = 8)
primers <- read.csv(paste0("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/samples/", plates[i]), header=FALSE, stringsAsFactors=FALSE, row.names = rowname, col.names = colname, skip = 9, nrows = 8)
samples <- as.array(as.matrix(samples))
primers <- as.array(as.matrix(primers))
samples <- array(t(samples), c(96,1))
primers <- array(t(primers), c(96,1))
Cts <- read.xlsx(paste0("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/qPCRs/", qPCRs[i]), 1)
colnames(Cts) <- c("well","type","threshold","Cq")
Cts <- Cts[Cts$Cq != "Reference", 4]
Cts <- as.character(Cts)
newtable <- cbind(samples, primers, Cts)
tableqPCR <- rbind(tableqPCR, newtable)
}
colnames(tableqPCR)<- c("sample","primer","Cq")
write.csv(tableqPCR, "tableqPCRs.csv")
| /R scripts/qPCRs/better way to join sample primer and ct data.R | no_license | jucapitanio/Software | R | false | false | 1,739 | r | library(xlsx)
# Create standard plate numbering
Z <- t(array(1:12, dim=c(12,8)));
X <- array(c("A","B","C","D","E","F","G","H"), dim=c(8,12));
Y <- paste(X, Z, sep="");
IDs <- array(Y, c(8,12)) # Writes 12 Ax-Hx columns into one.
IDs <- array(t(IDs),c(96,1))
# Import plates and make list with Ct values
rowname <- letters[1:8]
colname <- as.character(1:12)
plates <- list.files("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/samples/")
qPCRs <- list.files("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/qPCRs/")
tableqPCR <- matrix(data = NA, nrow = 1, ncol = 3, byrow = TRUE, dimnames = NULL)
#tableqPCR[,1] <- IDs
for (i in c(1:10)) {
samples <- read.csv(paste0("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/samples/", plates[i]), header=FALSE, stringsAsFactors=FALSE, row.names = rowname, col.names = colname, nrows = 8)
primers <- read.csv(paste0("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/samples/", plates[i]), header=FALSE, stringsAsFactors=FALSE, row.names = rowname, col.names = colname, skip = 9, nrows = 8)
samples <- as.array(as.matrix(samples))
primers <- as.array(as.matrix(primers))
samples <- array(t(samples), c(96,1))
primers <- array(t(primers), c(96,1))
Cts <- read.xlsx(paste0("~/Lab Stuff 2015 2nd sem/Experiments/qPCR HeLa 293T KD results/plates for R/qPCRs/", qPCRs[i]), 1)
colnames(Cts) <- c("well","type","threshold","Cq")
Cts <- Cts[Cts$Cq != "Reference", 4]
Cts <- as.character(Cts)
newtable <- cbind(samples, primers, Cts)
tableqPCR <- rbind(tableqPCR, newtable)
}
colnames(tableqPCR)<- c("sample","primer","Cq")
write.csv(tableqPCR, "tableqPCRs.csv")
|
\name{shape}
\alias{shape}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to estimate eccentricity, variance on Gmax and volume.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
shape(name, names_pop)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{name}{
prefix of the Gmatrices
}
\item{names_pop}{
names of the populations used for the comparison
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
data(animal_results)
Gmat_1=ani_1$VCV #extract variance part of the model to the first MCMCglmm result
Gmat_1=Gmat_1[,grep(".animal",colnames(Gmat_1))] # extract the genetic variances and covariances
Gmat_2=ani_2$VCV
Gmat_2=Gmat_2[,grep(".animal",colnames(Gmat_1))]
Gmat_3=ani_3$VCV #extract variance part of the model
Gmat_3=Gmat_3[,grep(".animal",colnames(Gmat_1))]
shape(name = "Gmat_",names_pop = c("pop1","pop2","pop3"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/shape.Rd | no_license | schantepie/Gmatools | R | false | false | 1,605 | rd | \name{shape}
\alias{shape}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to estimate eccentricity, variance on Gmax and volume.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
shape(name, names_pop)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{name}{
prefix of the Gmatrices
}
\item{names_pop}{
names of the populations used for the comparison
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
data(animal_results)
Gmat_1=ani_1$VCV #extract variance part of the model to the first MCMCglmm result
Gmat_1=Gmat_1[,grep(".animal",colnames(Gmat_1))] # extract the genetic variances and covariances
Gmat_2=ani_2$VCV
Gmat_2=Gmat_2[,grep(".animal",colnames(Gmat_1))]
Gmat_3=ani_3$VCV #extract variance part of the model
Gmat_3=Gmat_3[,grep(".animal",colnames(Gmat_1))]
shape(name = "Gmat_",names_pop = c("pop1","pop2","pop3"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# stl関数による要因分解
plot(stl(AirPassengers, "periodic")) | /chapter15/stl.R | no_license | AnguillaJaponica/RProgramming | R | false | false | 69 | r | # stl関数による要因分解
plot(stl(AirPassengers, "periodic")) |
plotlyGeneralizedExtremeValueGEVDistribution <- function(plotrange, input, distType, probrange) {
xseq<-seq(round(min(0,as.numeric(plotrange[1])),0),round(max(as.numeric(plotrange[2]),10),0),1)
f29 <- 0
graphtype<-""
if(input$FunctionType == "PDF/PMF"){
f29<-dgev(xseq, as.numeric(input$GEVMiu), as.numeric(input$GEVSigma), as.numeric(input$GEVEpsilon) )
graphtype<-"PMF"
}
else if(input$FunctionType == "CDF/CMF"){
f29<-pgev(xseq, as.numeric(input$GEVMiu), as.numeric(input$GEVSigma), as.numeric(input$GEVEpsilon) )
graphtype<-"CMF"
}
else{
graphtype<-""
}
if(graphtype != ""){
xsize = length(xseq)
colors = c(rep('rgb(31, 119, 180)', xsize))
for (index in 1:xsize){
if (xseq[index] >= round(probrange[1],0) && xseq[index] <= round(probrange[2],0)){
colors[index] = 'rgb(255, 127, 14)'
}
}
fig<-plot_ly(x = xseq,
y = f29,
name = distType,
type = 'bar',
marker = list(color = colors),
text = f29,
##text = c(rep( ppois(round(as.numeric(probrange[2]),0),as.numeric(input$PoiLambda))-ppois(round(as.numeric(probrange[1]),0)-1,as.numeric(input$PoiLambda)),xsize)),
hovertemplate = paste('<br><b>Prob. </b>: %{y}</br>',
'<b>X</b>: %{x}',
'<b>Y</b>: %{y}'
),
)
fig<-fig %>% plotly::layout(title = paste(distributions[29],' - ',graphtype,sep = ""),
hovermode = 'x',
hoverlabel = list(
namelength = 100
),
yaxis = list(fixedrange = TRUE,
zeroline = TRUE,
range = c(min(f29),max(f29)),
type = 'linear'
),
xaxis=list(showticklabels=TRUE,
title = "* All x values rounded to nearest integers",
zeroline = TRUE,
showline=TRUE,
showgrid=TRUE,
linecolor='rgb(204, 204, 204)',
linewidth=2,
mirror=TRUE,
fixedrange = TRUE,
range = c(plotrange[1],plotrange[2])
),
showlegend = FALSE
)
fig<-fig %>% config(editable=FALSE)
fig
}
}
| /plotlyFunctions/GeneralizedExtremeValueGEV.R | no_license | SOCR/ProbDistCalc_RShiny | R | false | false | 3,033 | r | plotlyGeneralizedExtremeValueGEVDistribution <- function(plotrange, input, distType, probrange) {
xseq<-seq(round(min(0,as.numeric(plotrange[1])),0),round(max(as.numeric(plotrange[2]),10),0),1)
f29 <- 0
graphtype<-""
if(input$FunctionType == "PDF/PMF"){
f29<-dgev(xseq, as.numeric(input$GEVMiu), as.numeric(input$GEVSigma), as.numeric(input$GEVEpsilon) )
graphtype<-"PMF"
}
else if(input$FunctionType == "CDF/CMF"){
f29<-pgev(xseq, as.numeric(input$GEVMiu), as.numeric(input$GEVSigma), as.numeric(input$GEVEpsilon) )
graphtype<-"CMF"
}
else{
graphtype<-""
}
if(graphtype != ""){
xsize = length(xseq)
colors = c(rep('rgb(31, 119, 180)', xsize))
for (index in 1:xsize){
if (xseq[index] >= round(probrange[1],0) && xseq[index] <= round(probrange[2],0)){
colors[index] = 'rgb(255, 127, 14)'
}
}
fig<-plot_ly(x = xseq,
y = f29,
name = distType,
type = 'bar',
marker = list(color = colors),
text = f29,
##text = c(rep( ppois(round(as.numeric(probrange[2]),0),as.numeric(input$PoiLambda))-ppois(round(as.numeric(probrange[1]),0)-1,as.numeric(input$PoiLambda)),xsize)),
hovertemplate = paste('<br><b>Prob. </b>: %{y}</br>',
'<b>X</b>: %{x}',
'<b>Y</b>: %{y}'
),
)
fig<-fig %>% plotly::layout(title = paste(distributions[29],' - ',graphtype,sep = ""),
hovermode = 'x',
hoverlabel = list(
namelength = 100
),
yaxis = list(fixedrange = TRUE,
zeroline = TRUE,
range = c(min(f29),max(f29)),
type = 'linear'
),
xaxis=list(showticklabels=TRUE,
title = "* All x values rounded to nearest integers",
zeroline = TRUE,
showline=TRUE,
showgrid=TRUE,
linecolor='rgb(204, 204, 204)',
linewidth=2,
mirror=TRUE,
fixedrange = TRUE,
range = c(plotrange[1],plotrange[2])
),
showlegend = FALSE
)
fig<-fig %>% config(editable=FALSE)
fig
}
}
|
#날짜 : 2021/01/28
#이름 : 김은표
#내용 : Ch16.비지도학습 - 군집분석 실습 교재 p543
library(ggplot2)
#다이몬드 군집분석
data(diamonds)
View(diamonds)
#모집단에서 1000개 샘플링
sample <- sample(1:nrow(diamonds), 1000)
df_test_sample <- diamonds[sample,]
View(df_test_sample)
#군집분석용 데이터프레임 생성
df_test_diamond <- df_test_sample[c('price', 'carat', 'depth', 'table')]
View(df_test_diamond)
#군집분석
result_kmeans <- kmeans(df_test_diamond, 3)
result_kmeans
df_test_diamond['cluster'] <- result_kmeans$cluster
View(df_test_diamond)
#시각화
plot(df_test_diamond$carat,
df_test_diamond$price,
col=df_test_diamond$cluster)
| /Ch16/16_1_Cluster.R | no_license | kepchef/R | R | false | false | 707 | r | #날짜 : 2021/01/28
#이름 : 김은표
#내용 : Ch16.비지도학습 - 군집분석 실습 교재 p543
library(ggplot2)
#다이몬드 군집분석
data(diamonds)
View(diamonds)
#모집단에서 1000개 샘플링
sample <- sample(1:nrow(diamonds), 1000)
df_test_sample <- diamonds[sample,]
View(df_test_sample)
#군집분석용 데이터프레임 생성
df_test_diamond <- df_test_sample[c('price', 'carat', 'depth', 'table')]
View(df_test_diamond)
#군집분석
result_kmeans <- kmeans(df_test_diamond, 3)
result_kmeans
df_test_diamond['cluster'] <- result_kmeans$cluster
View(df_test_diamond)
#시각화
plot(df_test_diamond$carat,
df_test_diamond$price,
col=df_test_diamond$cluster)
|
if (!require("gapminder")){install.packages("gapminder")}
library(ggplot2)
library(gridExtra)
library(dplyr)
p <- ggplot(gapminder)
plot1 <- p + aes(x=pop) + geom_histogram()
plot2 <- p + aes(x=continent, y=lifeExp) + geom_violin() + stat_summary(color="blue")
continent_freq <- gapminder %>% count(continent)
plot3 <- ggplot(continent_freq, aes(x = continent, y = n)) + geom_bar(stat = "identity")
jCountries <- c("Canada", "Rwanda", "Cambodia", "Mexico")
plot4 <- gapminder %>% filter(country %in% jCountries) %>%
ggplot(aes(x = year, y = lifeExp, color = country)) +
geom_line() + geom_point()
grid.arrange(plot1, plot2, plot3, plot4, nrow=2, ncol=2)
dir.create("./ggsave", showWarnings = F)
ggsave("./ggsave/gapminder.png")
#####################################################################33
for(i in 1:length(unique(gapminder$country))){
gapminder %>% filter(country == gapminder$country[i]) %>%
ggplot(aes(x = year, y = lifeExp, color = country)) +
geom_line() + geom_point() +
ggsave(paste0("./ggsave/",gapminder$country[i],".png"))
# print(paste0(i," / ",length(unique(gapminder$country))))
}
| /Basic_ggplot2.R | no_license | Seonwhee-Genome/Data_Visualisation | R | false | false | 1,128 | r | if (!require("gapminder")){install.packages("gapminder")}
library(ggplot2)
library(gridExtra)
library(dplyr)
p <- ggplot(gapminder)
plot1 <- p + aes(x=pop) + geom_histogram()
plot2 <- p + aes(x=continent, y=lifeExp) + geom_violin() + stat_summary(color="blue")
continent_freq <- gapminder %>% count(continent)
plot3 <- ggplot(continent_freq, aes(x = continent, y = n)) + geom_bar(stat = "identity")
jCountries <- c("Canada", "Rwanda", "Cambodia", "Mexico")
plot4 <- gapminder %>% filter(country %in% jCountries) %>%
ggplot(aes(x = year, y = lifeExp, color = country)) +
geom_line() + geom_point()
grid.arrange(plot1, plot2, plot3, plot4, nrow=2, ncol=2)
dir.create("./ggsave", showWarnings = F)
ggsave("./ggsave/gapminder.png")
#####################################################################33
for(i in 1:length(unique(gapminder$country))){
gapminder %>% filter(country == gapminder$country[i]) %>%
ggplot(aes(x = year, y = lifeExp, color = country)) +
geom_line() + geom_point() +
ggsave(paste0("./ggsave/",gapminder$country[i],".png"))
# print(paste0(i," / ",length(unique(gapminder$country))))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peptable.R
\name{mergePep}
\alias{mergePep}
\title{Merge peptide table(s) into one}
\usage{
mergePep(
plot_log2FC_cv = TRUE,
use_duppeps = TRUE,
cut_points = Inf,
omit_single_lfq = TRUE,
parallel = TRUE,
...
)
}
\arguments{
\item{plot_log2FC_cv}{Logical; if TRUE, the distributions of the CV of peptide
\code{log2FC} will be plotted. The default is TRUE.}
\item{use_duppeps}{Logical; if TRUE, re-assigns double/multiple dipping
peptide sequences to the most likely proteins by majority votes.}
\item{cut_points}{A named, numeric vector defines the cut points (knots) for
the median-centering of \code{log2FC} by sections. For example, at
\code{cut_points = c(mean_lint = seq(4, 7, .5))}, \code{log2FC} will be
binned according to the intervals of \eqn{-Inf, 4, 4.5, ..., 7, Inf} under
column \code{mean_lint} (mean log10 intensity) in the input data. The
default is \code{cut_points = Inf}, or equivalently \code{-Inf}, where the
\code{log2FC} under each sample will be median-centered as one piece. See
also \code{\link{prnHist}} for data binning in histogram visualization.}
\item{omit_single_lfq}{Logical for MaxQuant LFQ; if TRUE, omits LFQ entries
with single measured values across all samples. The default is TRUE.}
\item{parallel}{Logical; if TRUE, performs parallel computation. The default
is TRUE.}
\item{...}{\code{filter_}: Variable argument statements for the row filtration
of data against the column keys in individual peptide tables of
\code{TMTset1_LCMSinj1_Peptide_N.txt, TMTset1_LCMSinj2_Peptide_N.txt}, etc.
\cr \cr The variable argument statements should be in the following format:
each statement contains to a list of logical expression(s). The \code{lhs}
needs to start with \code{filter_}. The logical condition(s) at the
\code{rhs} needs to be enclosed in \code{exprs} with round parenthesis. For
example, \code{pep_len} is a column key present in \code{Mascot} peptide
tables of \code{TMTset1_LCMSinj1_Peptide_N.txt},
\code{TMTset1_LCMSinj2_Peptide_N.txt} etc. The statement
\code{filter_peps_at = exprs(pep_len <= 50)} will remove peptide entries
with \code{pep_len > 50}. See also \code{\link{normPSM}}.}
}
\value{
The primary output is in \code{.../Peptide/Peptide.txt}.
}
\description{
\code{mergePep} merges individual peptide table(s),
\code{TMTset1_LCMSinj1_Peptide_N.txt, TMTset1_LCMSinj2_Peptide_N.txt} etc.,
into one interim \code{Peptide.txt}. The \code{log2FC} values in the interim
result are centered with the medians at zero (median centering). The utility
is typically applied after the conversion of PSMs to peptides via
\code{\link{PSM2Pep}} and is required even with a experiment at one multiplex
TMT and one LC/MS series.
}
\details{
In the interim output file, "\code{Peptide.txt}", values under columns
\code{log2_R...} are logarithmic ratios at base 2 in relative to the
\code{reference(s)} within each multiplex TMT set, or to the row means within
each plex if no \code{reference(s)} are present. Values under columns
\code{N_log2_R...} are median-centered \code{log2_R...} without scaling
normalization. Values under columns \code{Z_log2_R...} are \code{N_log2_R...}
with additional scaling normalization. Values under columns \code{I...} are
reporter-ion or LFQ intensity before normalization. Values under columns
\code{N_I...} are normalized \code{I...}. Values under columns
\code{sd_log2_R...} are the standard deviation of the \code{log2FC} of
proteins from ascribing peptides.
Description of the column keys in the output: \cr
\code{system.file("extdata", "mascot_peptide_keys.txt", package = "proteoQ")}
The peptide counts in individual peptide tables,
\code{TMTset1_LCMSinj1_Peptide_N.txt} etc., may be fewer than the entries
indicated under the \code{prot_n_pep} column after the peptide
removals/cleanups using \code{purgePSM}.
}
\examples{
\donttest{
# ===================================
# Merge peptide data
# ===================================
## !!!require the brief working example in `?load_expts`
# everything included
mergePep()
# row filtrations against column keys in `TMTset1_LCMSinj1_Peptide_N.txt`...
mergePep(
filter_peps_by_sp = exprs(species == "human", pep_len <= 50),
)
# alignment of data by segments
mergePep(cut_points = c(mean_lint = seq(4, 7, .5)))
# alignment of data by empirical protein abundance
# `10^prot_icover - 1` comparable to emPAI
mergePep(cut_points = c(prot_icover = seq(0, 1, .25)))
}
}
\seealso{
\emph{Metadata} \cr
\code{\link{load_expts}} for metadata preparation and a reduced working example in data normalization \cr
\emph{Data normalization} \cr
\code{\link{normPSM}} for extended examples in PSM data normalization \cr
\code{\link{PSM2Pep}} for extended examples in PSM to peptide summarization \cr
\code{\link{mergePep}} for extended examples in peptide data merging \cr
\code{\link{standPep}} for extended examples in peptide data normalization \cr
\code{\link{Pep2Prn}} for extended examples in peptide to protein summarization \cr
\code{\link{standPrn}} for extended examples in protein data normalization. \cr
\code{\link{purgePSM}} and \code{\link{purgePep}} for extended examples in data purging \cr
\code{\link{pepHist}} and \code{\link{prnHist}} for extended examples in histogram visualization. \cr
\code{\link{extract_raws}} and \code{\link{extract_psm_raws}} for extracting MS file names \cr
\emph{Variable arguments of `filter_...`} \cr
\code{\link{contain_str}}, \code{\link{contain_chars_in}}, \code{\link{not_contain_str}},
\code{\link{not_contain_chars_in}}, \code{\link{start_with_str}},
\code{\link{end_with_str}}, \code{\link{start_with_chars_in}} and
\code{\link{ends_with_chars_in}} for data subsetting by character strings \cr
\emph{Missing values} \cr
\code{\link{pepImp}} and \code{\link{prnImp}} for missing value imputation \cr
\emph{Informatics} \cr
\code{\link{pepSig}} and \code{\link{prnSig}} for significance tests \cr
\code{\link{pepVol}} and \code{\link{prnVol}} for volcano plot visualization \cr
\code{\link{prnGSPA}} for gene set enrichment analysis by protein significance pVals \cr
\code{\link{gspaMap}} for mapping GSPA to volcano plot visualization \cr
\code{\link{prnGSPAHM}} for heat map and network visualization of GSPA results \cr
\code{\link{prnGSVA}} for gene set variance analysis \cr
\code{\link{prnGSEA}} for data preparation for online GSEA. \cr
\code{\link{pepMDS}} and \code{\link{prnMDS}} for MDS visualization \cr
\code{\link{pepPCA}} and \code{\link{prnPCA}} for PCA visualization \cr
\code{\link{pepLDA}} and \code{\link{prnLDA}} for LDA visualization \cr
\code{\link{pepHM}} and \code{\link{prnHM}} for heat map visualization \cr
\code{\link{pepCorr_logFC}}, \code{\link{prnCorr_logFC}}, \code{\link{pepCorr_logInt}} and
\code{\link{prnCorr_logInt}} for correlation plots \cr
\code{\link{anal_prnTrend}} and \code{\link{plot_prnTrend}} for trend analysis and visualization \cr
\code{\link{anal_pepNMF}}, \code{\link{anal_prnNMF}}, \code{\link{plot_pepNMFCon}},
\code{\link{plot_prnNMFCon}}, \code{\link{plot_pepNMFCoef}}, \code{\link{plot_prnNMFCoef}} and
\code{\link{plot_metaNMF}} for NMF analysis and visualization \cr
\emph{Custom databases} \cr
\code{\link{Uni2Entrez}} for lookups between UniProt accessions and Entrez IDs \cr
\code{\link{Ref2Entrez}} for lookups among RefSeq accessions, gene names and Entrez IDs \cr
\code{\link{prepGO}} for \code{\href{http://current.geneontology.org/products/pages/downloads.html}{gene
ontology}} \cr
\code{\link{prepMSig}} for \href{https://data.broadinstitute.org/gsea-msigdb/msigdb/release/7.0/}{molecular
signatures} \cr
\code{\link{prepString}} and \code{\link{anal_prnString}} for STRING-DB \cr
\emph{Column keys in PSM, peptide and protein outputs} \cr
system.file("extdata", "mascot_psm_keys.txt", package = "proteoQ") \cr
system.file("extdata", "mascot_peptide_keys.txt", package = "proteoQ") \cr
system.file("extdata", "mascot_protein_keys.txt", package = "proteoQ") \cr
}
| /man/mergePep.Rd | permissive | sailfish009/proteoQ | R | false | true | 8,103 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peptable.R
\name{mergePep}
\alias{mergePep}
\title{Merge peptide table(s) into one}
\usage{
mergePep(
plot_log2FC_cv = TRUE,
use_duppeps = TRUE,
cut_points = Inf,
omit_single_lfq = TRUE,
parallel = TRUE,
...
)
}
\arguments{
\item{plot_log2FC_cv}{Logical; if TRUE, the distributions of the CV of peptide
\code{log2FC} will be plotted. The default is TRUE.}
\item{use_duppeps}{Logical; if TRUE, re-assigns double/multiple dipping
peptide sequences to the most likely proteins by majority votes.}
\item{cut_points}{A named, numeric vector defines the cut points (knots) for
the median-centering of \code{log2FC} by sections. For example, at
\code{cut_points = c(mean_lint = seq(4, 7, .5))}, \code{log2FC} will be
binned according to the intervals of \eqn{-Inf, 4, 4.5, ..., 7, Inf} under
column \code{mean_lint} (mean log10 intensity) in the input data. The
default is \code{cut_points = Inf}, or equivalently \code{-Inf}, where the
\code{log2FC} under each sample will be median-centered as one piece. See
also \code{\link{prnHist}} for data binning in histogram visualization.}
\item{omit_single_lfq}{Logical for MaxQuant LFQ; if TRUE, omits LFQ entries
with single measured values across all samples. The default is TRUE.}
\item{parallel}{Logical; if TRUE, performs parallel computation. The default
is TRUE.}
\item{...}{\code{filter_}: Variable argument statements for the row filtration
of data against the column keys in individual peptide tables of
\code{TMTset1_LCMSinj1_Peptide_N.txt, TMTset1_LCMSinj2_Peptide_N.txt}, etc.
\cr \cr The variable argument statements should be in the following format:
each statement contains to a list of logical expression(s). The \code{lhs}
needs to start with \code{filter_}. The logical condition(s) at the
\code{rhs} needs to be enclosed in \code{exprs} with round parenthesis. For
example, \code{pep_len} is a column key present in \code{Mascot} peptide
tables of \code{TMTset1_LCMSinj1_Peptide_N.txt},
\code{TMTset1_LCMSinj2_Peptide_N.txt} etc. The statement
\code{filter_peps_at = exprs(pep_len <= 50)} will remove peptide entries
with \code{pep_len > 50}. See also \code{\link{normPSM}}.}
}
\value{
The primary output is in \code{.../Peptide/Peptide.txt}.
}
\description{
\code{mergePep} merges individual peptide table(s),
\code{TMTset1_LCMSinj1_Peptide_N.txt, TMTset1_LCMSinj2_Peptide_N.txt} etc.,
into one interim \code{Peptide.txt}. The \code{log2FC} values in the interim
result are centered with the medians at zero (median centering). The utility
is typically applied after the conversion of PSMs to peptides via
\code{\link{PSM2Pep}} and is required even with a experiment at one multiplex
TMT and one LC/MS series.
}
\details{
In the interim output file, "\code{Peptide.txt}", values under columns
\code{log2_R...} are logarithmic ratios at base 2 in relative to the
\code{reference(s)} within each multiplex TMT set, or to the row means within
each plex if no \code{reference(s)} are present. Values under columns
\code{N_log2_R...} are median-centered \code{log2_R...} without scaling
normalization. Values under columns \code{Z_log2_R...} are \code{N_log2_R...}
with additional scaling normalization. Values under columns \code{I...} are
reporter-ion or LFQ intensity before normalization. Values under columns
\code{N_I...} are normalized \code{I...}. Values under columns
\code{sd_log2_R...} are the standard deviation of the \code{log2FC} of
proteins from ascribing peptides.
Description of the column keys in the output: \cr
\code{system.file("extdata", "mascot_peptide_keys.txt", package = "proteoQ")}
The peptide counts in individual peptide tables,
\code{TMTset1_LCMSinj1_Peptide_N.txt} etc., may be fewer than the entries
indicated under the \code{prot_n_pep} column after the peptide
removals/cleanups using \code{purgePSM}.
}
\examples{
\donttest{
# ===================================
# Merge peptide data
# ===================================
## !!!require the brief working example in `?load_expts`
# everything included
mergePep()
# row filtrations against column keys in `TMTset1_LCMSinj1_Peptide_N.txt`...
mergePep(
filter_peps_by_sp = exprs(species == "human", pep_len <= 50),
)
# alignment of data by segments
mergePep(cut_points = c(mean_lint = seq(4, 7, .5)))
# alignment of data by empirical protein abundance
# `10^prot_icover - 1` comparable to emPAI
mergePep(cut_points = c(prot_icover = seq(0, 1, .25)))
}
}
\seealso{
\emph{Metadata} \cr
\code{\link{load_expts}} for metadata preparation and a reduced working example in data normalization \cr
\emph{Data normalization} \cr
\code{\link{normPSM}} for extended examples in PSM data normalization \cr
\code{\link{PSM2Pep}} for extended examples in PSM to peptide summarization \cr
\code{\link{mergePep}} for extended examples in peptide data merging \cr
\code{\link{standPep}} for extended examples in peptide data normalization \cr
\code{\link{Pep2Prn}} for extended examples in peptide to protein summarization \cr
\code{\link{standPrn}} for extended examples in protein data normalization. \cr
\code{\link{purgePSM}} and \code{\link{purgePep}} for extended examples in data purging \cr
\code{\link{pepHist}} and \code{\link{prnHist}} for extended examples in histogram visualization. \cr
\code{\link{extract_raws}} and \code{\link{extract_psm_raws}} for extracting MS file names \cr
\emph{Variable arguments of `filter_...`} \cr
\code{\link{contain_str}}, \code{\link{contain_chars_in}}, \code{\link{not_contain_str}},
\code{\link{not_contain_chars_in}}, \code{\link{start_with_str}},
\code{\link{end_with_str}}, \code{\link{start_with_chars_in}} and
\code{\link{ends_with_chars_in}} for data subsetting by character strings \cr
\emph{Missing values} \cr
\code{\link{pepImp}} and \code{\link{prnImp}} for missing value imputation \cr
\emph{Informatics} \cr
\code{\link{pepSig}} and \code{\link{prnSig}} for significance tests \cr
\code{\link{pepVol}} and \code{\link{prnVol}} for volcano plot visualization \cr
\code{\link{prnGSPA}} for gene set enrichment analysis by protein significance pVals \cr
\code{\link{gspaMap}} for mapping GSPA to volcano plot visualization \cr
\code{\link{prnGSPAHM}} for heat map and network visualization of GSPA results \cr
\code{\link{prnGSVA}} for gene set variance analysis \cr
\code{\link{prnGSEA}} for data preparation for online GSEA. \cr
\code{\link{pepMDS}} and \code{\link{prnMDS}} for MDS visualization \cr
\code{\link{pepPCA}} and \code{\link{prnPCA}} for PCA visualization \cr
\code{\link{pepLDA}} and \code{\link{prnLDA}} for LDA visualization \cr
\code{\link{pepHM}} and \code{\link{prnHM}} for heat map visualization \cr
\code{\link{pepCorr_logFC}}, \code{\link{prnCorr_logFC}}, \code{\link{pepCorr_logInt}} and
\code{\link{prnCorr_logInt}} for correlation plots \cr
\code{\link{anal_prnTrend}} and \code{\link{plot_prnTrend}} for trend analysis and visualization \cr
\code{\link{anal_pepNMF}}, \code{\link{anal_prnNMF}}, \code{\link{plot_pepNMFCon}},
\code{\link{plot_prnNMFCon}}, \code{\link{plot_pepNMFCoef}}, \code{\link{plot_prnNMFCoef}} and
\code{\link{plot_metaNMF}} for NMF analysis and visualization \cr
\emph{Custom databases} \cr
\code{\link{Uni2Entrez}} for lookups between UniProt accessions and Entrez IDs \cr
\code{\link{Ref2Entrez}} for lookups among RefSeq accessions, gene names and Entrez IDs \cr
\code{\link{prepGO}} for \code{\href{http://current.geneontology.org/products/pages/downloads.html}{gene
ontology}} \cr
\code{\link{prepMSig}} for \href{https://data.broadinstitute.org/gsea-msigdb/msigdb/release/7.0/}{molecular
signatures} \cr
\code{\link{prepString}} and \code{\link{anal_prnString}} for STRING-DB \cr
\emph{Column keys in PSM, peptide and protein outputs} \cr
system.file("extdata", "mascot_psm_keys.txt", package = "proteoQ") \cr
system.file("extdata", "mascot_peptide_keys.txt", package = "proteoQ") \cr
system.file("extdata", "mascot_protein_keys.txt", package = "proteoQ") \cr
}
|
################################################################################################
## Forming the dataframe for plotting LC and computing the lme model
getLGmat<-function(cdat,resp=cdat$Resp[1],lgrps=levels(cdat$dataM$Grp),
gcols=getCols(cdat$dataM$Grp),trans='None'){
if(length(lgrps)==0) lgrps=levels(cdat$dataM$Grp)
iresp=resp
print(trans)
if(trans!='None') iresp=paste(resp,tolower(trans),sep=".")
lmids=cdat$dataM$Id[cdat$dataM$Use & cdat$dataM$Grp%in%lgrps]
l=which(cdat$data$Id%in%lmids & !is.na(cdat$data[,iresp]))
df=cdat$data[l,c("Id","Tp",iresp,resp)]
names(df)=c("Id","Tp","Resp","Resp_ori")
df$Grp=factor(cdat$dataM[df$Id,]$Grp)
idlevs=unlist(tapply(as.character(df$Id),df$Grp,unique))
df$Id=factor(df$Id,levels=idlevs)
df$color=gcols[as.character(df$Grp)]
df=df[order(df$Grp,df$Id,df$Tp),]
return(list(Df=df,Resp=resp,Trans=trans))
}
plotLineC<-function(lgdata,type=c('tc','mese'),force2zero=FALSE,defzero=NA,miny=NA,maxy=NA){
#force2zero=FALSE;type='All';se=TRUE;defzero=NA;miny=NA;maxy=NA;gcols=getCols(cdat$dataM$Grp)
if(length(type)==0) type='tc'
se=('mese'%in%type)
idf=lgdata$Df
resp=lgdata$Resp
gcols=tapply(lgdata$Df$color,lgdata$Df$Grp,unique)
### set ylim and xlim
xlim=pretty(seq(ifelse(force2zero,0,min(idf$Tp)),max(idf$Tp),length=9))
if(is.na(miny)){
miny=round(min(idf$Resp,na.rm=T),3)
if(force2zero & all(idf$Tp>0,na.rm=T)) miny=ifelse(is.na(defzero),miny,round(defzero,3))
}
if(is.na(maxy)) maxy=max(idf$Resp)
ylim=pretty(sort(c(miny,maxy)))
#### pad for t=0
# idf=df
if(force2zero & all(idf$Tp>0)){
aidf=idf[tapply(1:nrow(idf),idf$Id,function(x) x[1]),]
aidf$Tp=0
aidf$Resp=miny
idf=rbind(aidf,idf)
idf=idf[order(idf$Grp,idf$Id,idf$Tp),]
idf$Resp[which(idf$Resp<=miny)]=miny
}
###### compute sd/se/me data for plot
tmpdata=do.call("rbind",lapply(levels(idf$Grp),function(i){
l=which(idf$Grp==i)
tmp=t(do.call("cbind",tapply(idf$Resp[l],idf$Tp[l],function(x)
c(n=length(x),y=mean(x),sd=sd(x),se=sd(x)/sqrt(length(x))))))
data.frame(cbind(tmp,x=tapply(idf$Tp[l],idf$Tp[l],unique)),Grp=i,color=idf$color[l][1],stringsAsFactors=F)
}))
##################################
title=paste("Response:",lgdata$Resp)
if(lgdata$Trans!="None") title=paste(title," (",lgdata$Trans,")",sep="")
levgrp=levels(idf$Grp)
idf$Grp=as.character(idf$Grp)
idf$Id=as.character(idf$Id)
names(idf)[which(names(idf)=="Resp")]="y"
names(idf)[which(names(idf)=="Tp")]="x"
a <- rCharts::Highcharts$new()
if(any(c('mese','mesd')%in%type)){
for(i in levgrp){
tmp=tmpdata[tmpdata$Grp==i,]
tmp$ymin=round(tmp$y-ifelse(se,1,0)*tmp$se-ifelse(se,0,1)*tmp$sd,3)
tmp$ymax=round(tmp$y+ifelse(se,1,0)*tmp$se+ifelse(se,0,1)*tmp$sd,3)
if(any(is.na(tmp$se))) tmp$ymin[is.na(tmp$se)]=tmp$ymax[is.na(tmp$se)]=tmp$y[is.na(tmp$se)]
tmp$y=round(tmp$y,3)
tmp$se=round(tmp$se,3)
a$series(data = lapply(1:nrow(tmp),function(j) as.list(tmp[j,c("x","y","Grp")])),
name=i,type = "line",color=tmp$color[1],lineWidth=4)
a$series(data = lapply(which(tmp$Grp==i),function(j)
unname(as.list(tmp[j,c("x","ymin","ymax")]))),type = "arearange",name=paste(i,": ",ifelse(se,"SE","SD"),sep=""),
fillOpacity = 0.3,lineWidth = 0,color=unname(tmp$color[which(tmp$Grp==i)][1]),zIndex = 0)
}
}
if('tc'%in%type){
for(i in unique(idf$Id))
a$series(data = unname(lapply(which(idf$Id==i),function(j) as.list(idf[j,c("x","y","Grp","Id")]))), name=i,type = "line",
color=unname(idf$color[which(idf$Id==i)][1]))
a$tooltip( formatter = "#! function() { return this.point.Id + ' (' + this.point.Grp + ') at ' +
this.point.x + ': ' + this.point.y ; } !#")
}
a$yAxis(title = list(text = title), min = min(ylim), max = max(ylim), tickInterval = diff(ylim)[1])
a$xAxis(title = list(text = "Time"), min = min(xlim), max = max(xlim), tickInterval = diff(xlim)[1])
a$legend(verticalAlign = "right", align = "right", layout = "vertical", title = list(text = "Mice"))
return(list(plot=a,df=idf,sesd=tmpdata,xlim=xlim,ylim=ylim,Resp=resp,Title=title))
}
# ############################################################################
# ## Old
# getLGmat2<-function(cdat,resp=cdat$Resp[1],lgrps=levels(cdat$dataM$Grp),
# gcols=getCols(cdat$dataM$Grp)){
#
# if(length(lgrps)==0) lgrps=levels(cdat$dataM$Grp)
# lmids=cdat$dataM$Id[cdat$dataM$Use & cdat$dataM$Grp%in%lgrps]
#
# l=which(cdat$data$Id%in%lmids & !is.na(cdat$data[,resp]))
# df=cdat$data[l,c("Id","Tp",resp)]
# names(df)=c("Id","Tp","Resp")
#
# iresp=paste(resp,c("log","sqrt","curt"),sep=".")
# iresp= iresp[iresp%in%names(cdat$data)]
# nresp=gsub(paste("^",resp,"\\.",sep=""),"Resp_",iresp)
# df[,nresp]=cdat$data[l,iresp]
#
# df$Grp=factor(cdat$dataM[df$Id,]$Grp)
# idlevs=unlist(tapply(as.character(df$Id),df$Grp,unique))
# df$Id=factor(df$Id,levels=idlevs)
# df$color=gcols[as.character(df$Grp)]
# df=df[order(df$Grp,df$Id,df$Tp),]
#
# return(list(Df=df,Resp=resp,Trans=gsub(paste("^",resp,"\\.",sep=""),"",iresp)))
# }
| /Line_chart.R | no_license | kroemerlab/TumGrowth | R | false | false | 5,247 | r | ################################################################################################
## Forming the dataframe for plotting LC and computing the lme model
getLGmat<-function(cdat,resp=cdat$Resp[1],lgrps=levels(cdat$dataM$Grp),
gcols=getCols(cdat$dataM$Grp),trans='None'){
if(length(lgrps)==0) lgrps=levels(cdat$dataM$Grp)
iresp=resp
print(trans)
if(trans!='None') iresp=paste(resp,tolower(trans),sep=".")
lmids=cdat$dataM$Id[cdat$dataM$Use & cdat$dataM$Grp%in%lgrps]
l=which(cdat$data$Id%in%lmids & !is.na(cdat$data[,iresp]))
df=cdat$data[l,c("Id","Tp",iresp,resp)]
names(df)=c("Id","Tp","Resp","Resp_ori")
df$Grp=factor(cdat$dataM[df$Id,]$Grp)
idlevs=unlist(tapply(as.character(df$Id),df$Grp,unique))
df$Id=factor(df$Id,levels=idlevs)
df$color=gcols[as.character(df$Grp)]
df=df[order(df$Grp,df$Id,df$Tp),]
return(list(Df=df,Resp=resp,Trans=trans))
}
plotLineC<-function(lgdata,type=c('tc','mese'),force2zero=FALSE,defzero=NA,miny=NA,maxy=NA){
#force2zero=FALSE;type='All';se=TRUE;defzero=NA;miny=NA;maxy=NA;gcols=getCols(cdat$dataM$Grp)
if(length(type)==0) type='tc'
se=('mese'%in%type)
idf=lgdata$Df
resp=lgdata$Resp
gcols=tapply(lgdata$Df$color,lgdata$Df$Grp,unique)
### set ylim and xlim
xlim=pretty(seq(ifelse(force2zero,0,min(idf$Tp)),max(idf$Tp),length=9))
if(is.na(miny)){
miny=round(min(idf$Resp,na.rm=T),3)
if(force2zero & all(idf$Tp>0,na.rm=T)) miny=ifelse(is.na(defzero),miny,round(defzero,3))
}
if(is.na(maxy)) maxy=max(idf$Resp)
ylim=pretty(sort(c(miny,maxy)))
#### pad for t=0
# idf=df
if(force2zero & all(idf$Tp>0)){
aidf=idf[tapply(1:nrow(idf),idf$Id,function(x) x[1]),]
aidf$Tp=0
aidf$Resp=miny
idf=rbind(aidf,idf)
idf=idf[order(idf$Grp,idf$Id,idf$Tp),]
idf$Resp[which(idf$Resp<=miny)]=miny
}
###### compute sd/se/me data for plot
tmpdata=do.call("rbind",lapply(levels(idf$Grp),function(i){
l=which(idf$Grp==i)
tmp=t(do.call("cbind",tapply(idf$Resp[l],idf$Tp[l],function(x)
c(n=length(x),y=mean(x),sd=sd(x),se=sd(x)/sqrt(length(x))))))
data.frame(cbind(tmp,x=tapply(idf$Tp[l],idf$Tp[l],unique)),Grp=i,color=idf$color[l][1],stringsAsFactors=F)
}))
##################################
title=paste("Response:",lgdata$Resp)
if(lgdata$Trans!="None") title=paste(title," (",lgdata$Trans,")",sep="")
levgrp=levels(idf$Grp)
idf$Grp=as.character(idf$Grp)
idf$Id=as.character(idf$Id)
names(idf)[which(names(idf)=="Resp")]="y"
names(idf)[which(names(idf)=="Tp")]="x"
a <- rCharts::Highcharts$new()
if(any(c('mese','mesd')%in%type)){
for(i in levgrp){
tmp=tmpdata[tmpdata$Grp==i,]
tmp$ymin=round(tmp$y-ifelse(se,1,0)*tmp$se-ifelse(se,0,1)*tmp$sd,3)
tmp$ymax=round(tmp$y+ifelse(se,1,0)*tmp$se+ifelse(se,0,1)*tmp$sd,3)
if(any(is.na(tmp$se))) tmp$ymin[is.na(tmp$se)]=tmp$ymax[is.na(tmp$se)]=tmp$y[is.na(tmp$se)]
tmp$y=round(tmp$y,3)
tmp$se=round(tmp$se,3)
a$series(data = lapply(1:nrow(tmp),function(j) as.list(tmp[j,c("x","y","Grp")])),
name=i,type = "line",color=tmp$color[1],lineWidth=4)
a$series(data = lapply(which(tmp$Grp==i),function(j)
unname(as.list(tmp[j,c("x","ymin","ymax")]))),type = "arearange",name=paste(i,": ",ifelse(se,"SE","SD"),sep=""),
fillOpacity = 0.3,lineWidth = 0,color=unname(tmp$color[which(tmp$Grp==i)][1]),zIndex = 0)
}
}
if('tc'%in%type){
for(i in unique(idf$Id))
a$series(data = unname(lapply(which(idf$Id==i),function(j) as.list(idf[j,c("x","y","Grp","Id")]))), name=i,type = "line",
color=unname(idf$color[which(idf$Id==i)][1]))
a$tooltip( formatter = "#! function() { return this.point.Id + ' (' + this.point.Grp + ') at ' +
this.point.x + ': ' + this.point.y ; } !#")
}
a$yAxis(title = list(text = title), min = min(ylim), max = max(ylim), tickInterval = diff(ylim)[1])
a$xAxis(title = list(text = "Time"), min = min(xlim), max = max(xlim), tickInterval = diff(xlim)[1])
a$legend(verticalAlign = "right", align = "right", layout = "vertical", title = list(text = "Mice"))
return(list(plot=a,df=idf,sesd=tmpdata,xlim=xlim,ylim=ylim,Resp=resp,Title=title))
}
# ############################################################################
# ## Old
# getLGmat2<-function(cdat,resp=cdat$Resp[1],lgrps=levels(cdat$dataM$Grp),
# gcols=getCols(cdat$dataM$Grp)){
#
# if(length(lgrps)==0) lgrps=levels(cdat$dataM$Grp)
# lmids=cdat$dataM$Id[cdat$dataM$Use & cdat$dataM$Grp%in%lgrps]
#
# l=which(cdat$data$Id%in%lmids & !is.na(cdat$data[,resp]))
# df=cdat$data[l,c("Id","Tp",resp)]
# names(df)=c("Id","Tp","Resp")
#
# iresp=paste(resp,c("log","sqrt","curt"),sep=".")
# iresp= iresp[iresp%in%names(cdat$data)]
# nresp=gsub(paste("^",resp,"\\.",sep=""),"Resp_",iresp)
# df[,nresp]=cdat$data[l,iresp]
#
# df$Grp=factor(cdat$dataM[df$Id,]$Grp)
# idlevs=unlist(tapply(as.character(df$Id),df$Grp,unique))
# df$Id=factor(df$Id,levels=idlevs)
# df$color=gcols[as.character(df$Grp)]
# df=df[order(df$Grp,df$Id,df$Tp),]
#
# return(list(Df=df,Resp=resp,Trans=gsub(paste("^",resp,"\\.",sep=""),"",iresp)))
# }
|
#' @examples
#' \dontrun{
#' res <- dfp_getReportDownloadUrlWithOptions(request_data)
#' }
| /examples/examples-dfp_getReportDownloadUrlWithOptions.R | no_license | StevenMMortimer/rdfp | R | false | false | 92 | r | #' @examples
#' \dontrun{
#' res <- dfp_getReportDownloadUrlWithOptions(request_data)
#' }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heatmap.R
\name{plot_na}
\alias{plot_na}
\title{Plot Heatmap for NA}
\usage{
plot_na(data, order_rows = FALSE, order_cols = FALSE, title = "Missing Data")
}
\arguments{
\item{data}{Tibble or Dataframe.}
\item{order_rows}{Logical. Reorder rows?}
\item{order_cols}{Logical. Reorder rows?}
\item{title}{Title of the graph.}
}
\value{
Returns a ggplot geom_tile heatmap.
}
\description{
Plot Heatmap for NA
}
\examples{
x <- tibble::tibble(
a = sample(c(1:10,NA), 100, replace = TRUE),
b = sample(c(1:10,NA), 100, replace = TRUE),
c = sample(c(1:10,NA), 100, replace = TRUE),
d = sample(c(1:10,NA), 100, replace = TRUE),
e = sample(c(1:10,NA), 100, replace = TRUE),
f = sample(c(1:10,NA), 100, replace = TRUE))
plot_na(x)
}
| /man/plot_na.Rd | no_license | edo91/ggfancy | R | false | true | 876 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heatmap.R
\name{plot_na}
\alias{plot_na}
\title{Plot Heatmap for NA}
\usage{
plot_na(data, order_rows = FALSE, order_cols = FALSE, title = "Missing Data")
}
\arguments{
\item{data}{Tibble or Dataframe.}
\item{order_rows}{Logical. Reorder rows?}
\item{order_cols}{Logical. Reorder rows?}
\item{title}{Title of the graph.}
}
\value{
Returns a ggplot geom_tile heatmap.
}
\description{
Plot Heatmap for NA
}
\examples{
x <- tibble::tibble(
a = sample(c(1:10,NA), 100, replace = TRUE),
b = sample(c(1:10,NA), 100, replace = TRUE),
c = sample(c(1:10,NA), 100, replace = TRUE),
d = sample(c(1:10,NA), 100, replace = TRUE),
e = sample(c(1:10,NA), 100, replace = TRUE),
f = sample(c(1:10,NA), 100, replace = TRUE))
plot_na(x)
}
|
#' Plotting output and parameters of inferential interest for IMIFA and related models
#'
#' @param x An object of class \code{"Results_IMIFA"} generated by \code{\link{get_IMIFA_results}}.
#' @param plot.meth The type of plot to be produced for the \code{param} of interest, where \code{correlation} refers to ACF/PACF plots, \code{means} refers to posterior means, \code{density}, \code{trace} and \code{parallel.coords} are self-explanatory. \code{"all"} in this case, the default, refers to {\code{"trace"}, \code{"density"}, \code{"means"}, and \code{"correlation"}}. \code{"parallel.coords"} is only available when \code{param} is one of \code{"means"}, \code{"loadings"} or \code{"uniquenesses"} - note that this method applies a small amount of horizontal jitter to avoid overplotting.
#'
#' Special types of plots which don't require a \code{param} are:
#' \describe{
#' \item{\code{"GQ"}}{for plotting the posterior summaries of the numbers of clusters/factors, if available.}
#' \item{\code{"zlabels"}}{for plotting clustering uncertainties - in four different ways (incl. the posterior confusion matrix) - if clustering has taken place, with or without the clustering labels being supplied via the \code{zlabels} argument. If available, the average similarity matrix, reordered according to the MAP labels, is shown as a 5-th plot.}
#' \item{\code{"errors"}}{for conducting posterior predictive checking of the appropriateness of the fitted model by visualising the posterior predictive reconstruction error (PPRE) &/or histograms comparing the data to replicate draws from the posterior distribution &/or error metrics quantifying the difference between the estimated and empirical covariance matrices. The type of plot(s) produced depends on how the \code{error.metrics} argument was supplied to \code{\link{get_IMIFA_results}} and what parameters were stored.}
#' }
#' The argument \code{g} can be used to cycle through the available plots in each case. \code{ind} can also be used to govern which variable is shown for the 2-nd plot.
#' @param param The parameter of interest for any of the following \code{plot.meth} options: \code{all}, \code{trace}, \code{density}, \code{means}, \code{correlation}. The \code{param} must have been stored when \code{\link{mcmc_IMIFA}} was initially ran. Includes \code{pis} for methods where clustering takes place, and allows posterior inference on \code{alpha} (for the \code{"IMFA"}, \code{"IMIFA"}, \code{"OMFA"}, and \code{"OMIFA"} methods) and \code{discount} (for the \code{"IMFA"} and \code{"IMIFA"} methods). Otherwise \code{"means"}, \code{"scores"}, \code{"loadings"}, and \code{"uniquenesses"} can be plotted.
#' @param g Optional argument that allows specification of exactly which cluster the plot of interest is to be produced for. If not supplied, the user will be prompted to cycle through plots for all clusters. Also functions as an index for which plot to return when \code{plot.meth} is \code{GQ}, \code{zlabels}, or \code{errors} in much the same way.
#' @param mat Logical indicating whether a \code{\link[graphics]{matplot}} is produced (defaults to \code{TRUE}). If given as \code{FALSE}, \code{ind} is invoked.
#' @param zlabels The true labels can be supplied if they are known. If this is not supplied, the function uses the labels that were supplied, if any, to \code{\link{get_IMIFA_results}}. Only relevant when \code{plot.meth = "zlabels"}. When explicitly supplied, misclassified observations are highlighted in the first type of uncertainty plot (otherwise observations whose uncertainty exceed the inverse of the number of clusters are highlighted). For the second type of uncertainty plot, when \code{zlabels} are explicitly supplied, the uncertainty of misclassified observations is marked by vertical lines on the profile plot.
#' @param heat.map A logical which controls plotting posterior mean loadings or posterior mean scores as a heatmap, or else as something akin to \code{link{plot(..., type="h")}}. Only relevant if \code{param = "loadings"} (in which case the default is \code{TRUE}) or \code{param = "scores"} (in which case the default is \code{FALSE}). Heatmaps are produced with the aid of \code{\link{mat2cols}} and \code{\link{plot_cols}}.
#' @param show.last A logical indicator which defaults to \code{FALSE}, but when \code{TRUE} replaces any instance of the posterior mean with the last valid sample. Only relevant when \code{param} is one of \code{"means"} \code{"scores"}, \code{"loadings"}, \code{"uniquenesses"}, or \code{"pis"} and \code{plot.meth} is one of \code{"all"} or \code{"means"}. Also relevant for \code{"means"}, \code{"loadings"} and \code{"uniquenesses"} when \code{plot.meth} is \code{"parallel.coords"}. When \code{TRUE}, this has the effect of forcing \code{intervals} to be \code{FALSE}.
#' @param palette An optional colour palette to be supplied if overwriting the default palette set inside the function by \code{\link[viridisLite]{viridis}} is desired. It makes little sense to a supply a \code{palette} when \code{plot.meth="all"} and \code{param} is one of \code{"scores"} or \code{"loadings"}.
#' @param ind Either a single number indicating which variable to plot when \code{param} is one of \code{means} or \code{uniquenesses} (or \code{plot.meth="errors"}), or which cluster to plot if \code{param} is \code{pis}. If \code{scores} are plotted, a vector of length two giving which observation and factor to plot; if \code{loadings} are plotted, a vector of length two giving which variable and factor to plot. Will be recycled to length 2 if necessary. Also governs which two factors are displayed on posterior mean plots of the \code{"scores"} when \code{heat.map} is \code{FALSE}; otherwise only relevant when \code{mat} is \code{FALSE}.
#' @param fac Optional argument that provides an alternative way to specify \code{ind[2]} when \code{mat} is \code{FALSE} and \code{param} is one of \code{scores} or \code{loadings}.
#' @param by.fac Optionally allows (mat)plotting of scores and loadings by factor - i.e. observation(s) (scores) or variable(s) (loadings) for a given factor, respectively, controlled by \code{ind} or \code{fac}) when set to \code{TRUE}. Otherwise all factor(s) are plotted for a given observation or variable when set to \code{FALSE} (the default), again controlled by \code{ind} or \code{fac}. Only relevant when \code{param} is one of \code{scores} or \code{loadings}.
#' @param type The manner in which the plot is to be drawn, as per the \code{type} argument to \code{\link{plot}}.
#' @param intervals Logical indicating whether credible intervals around the posterior mean(s) are to be plotted when \code{is.element(plot.meth, c("all", "means"))}. Defaults to \code{TRUE}, but can only be \code{TRUE} when \code{show.last} is \code{FALSE}.
#' @param common Logical indicating whether plots with \code{plot.meth="means"} (or the corresponding plots for \code{plot.meth="all"}) when \code{param} is one of \code{"means"}, \code{"scores"}, \code{"loadings"}, or \code{"uniquenesses"} are calibrated to a common scale based on the range of the \code{param} parameters across all clusters (defaults to \code{TRUE}, and only relevant when there are clusters). Otherwise, the only the range corresponding to the image being plotted is used to determine the scale.
#'
#' Note that this affects the \code{"loadings"} and \code{"scores"} plots regardless of the value of \code{heat.map}. An exception is the \code{"scores"} plots when \code{plot.meth="means"} and \code{heat.map} is \code{FALSE}, in which case \code{common} defaults to \code{FALSE}.
#' @param partial Logical indicating whether plots of type \code{"correlation"} use the PACF. The default, \code{FALSE}, ensures the ACF is used. Only relevant when \code{plot.meth = "all"}, otherwise both plots are produced when \code{plot.meth = "correlation"}.
#' @param titles Logical indicating whether default plot titles are to be used (\code{TRUE}), or suppressed (\code{FALSE}).
#' @param transparency A factor in [0, 1] modifying the opacity for overplotted lines. Defaults to 0.75, unless semi-transparency is not supported. Only relevant when \code{palette} is not supplied, otherwise the supplied \code{palette} must already be adjusted for transparency.
#' @param ... Other arguments typically passed to \code{\link{plot}} or the \code{breaks} argument to \code{\link{mat2cols}} and \code{\link{heat_legend}} when heatmaps are plotted.
#'
#' @return The desired plot with appropriate output and summary statistics printed to the console screen.
#' @export
#' @note Supplying the argument \code{zlabels} does \strong{not} have the same effect of reordering the sampled parameters as it does if supplied directly to \code{\link{get_IMIFA_results}}.
#'
#' When \code{mat} is \code{TRUE} and \code{by.fac} is \code{FALSE} (both defaults), the convention for dealing with overplotting for \code{trace} and \code{density} plots when \code{param} is either \code{scores} or \code{loadings} is to plot the last factor first, such that the first factor appears 'on top'.
#' @keywords plotting main
#' @method plot Results_IMIFA
#' @importFrom Rfast "colMedians" "Median"
#' @importFrom matrixStats "rowRanges"
#' @importFrom mclust "classError"
#' @importFrom viridisLite "viridis"
#' @seealso \code{\link{mcmc_IMIFA}}, \code{\link{get_IMIFA_results}}, \code{\link{mat2cols}}, \code{\link{plot_cols}}
#' @references Murphy, K., Viroli, C., and Gormley, I. C. (2020) Infinite mixtures of infinite factor analysers, \emph{Bayesian Analysis}, 15(3): 937-963. <\href{https://projecteuclid.org/euclid.ba/1570586978}{doi:10.1214/19-BA1179}>.
#'
#' @author Keefe Murphy - <\email{keefe.murphy@@mu.ie}>
#' @usage
#' \method{plot}{Results_IMIFA}(x,
#' plot.meth = c("all", "correlation", "density", "errors", "GQ",
#' "means", "parallel.coords", "trace", "zlabels"),
#' param = c("means", "scores", "loadings", "uniquenesses",
#' "pis", "alpha", "discount"),
#' g = NULL,
#' mat = TRUE,
#' zlabels = NULL,
#' heat.map = TRUE,
#' show.last = FALSE,
#' palette = NULL,
#' ind = NULL,
#' fac = NULL,
#' by.fac = FALSE,
#' type = c("h", "n", "p", "l"),
#' intervals = TRUE,
#' common = TRUE,
#' partial = FALSE,
#' titles = TRUE,
#' transparency = 0.75,
#' ...)
#' @examples
#' \donttest{# See the vignette associated with the package for more graphical examples:
#' # vignette("IMIFA", package = "IMIFA")
#'
#' # data(olive)
#' # simIMIFA <- mcmc_IMIFA(olive, method="IMIFA")
#' # resIMIFA <- get_IMIFA_results(simIMIFA, z.avgsim=TRUE)
#'
#' # Examine the posterior distribution(s) of the number(s) of clusters (G) &/or latent factors (Q)
#' # For the IM(I)FA and OM(I)FA methods, this also plots the trace of the active/non-empty clusters
#' # plot(resIMIFA, plot.meth="GQ")
#' # plot(resIMIFA, plot.meth="GQ", g=2)
#'
#' # Plot clustering uncertainty (and, if available, the similarity matrix)
#' # plot(resIMIFA, plot.meth="zlabels", zlabels=olive$area)
#'
#' # Visualise the posterior predictive reconstruction error
#' # plot(resIMIFA, plot.meth="errors", g=1)
#'
#' # Compare histograms of the data vs. replicate draw from the posterior for the 1st variable
#' # plot(resIMIFA, plot.meth="errors", g=2, ind=1)
#'
#' # Visualise empirical vs. estimated covariance error metrics
#' # plot(resIMIFA, plot.meth="errors", g=3)
#'
#' # Look at the trace, density, posterior mean, and correlation of various parameters of interest
#' # plot(resIMIFA, plot.meth="all", param="means", g=1)
#' # plot(resIMIFA, plot.meth="all", param="means", g=1, ind=2)
#' # plot(resIMIFA, plot.meth="trace", param="scores")
#' # plot(resIMIFA, plot.meth="trace", param="scores", by.fac=TRUE)
#' # plot(resIMIFA, plot.meth="mean", param="loadings", g=1)
#' # plot(resIMIFA, plot.meth="mean", param="loadings", g=1, heat.map=FALSE)
#' # plot(resIMIFA, plot.meth="parallel.coords", param="uniquenesses")
#' # plot(resIMIFA, plot.meth="density", param="pis", intervals=FALSE, partial=TRUE)
#' # plot(resIMIFA, plot.meth="all", param="alpha")
#' # plot(resIMIFA, plot.meth="all", param="discount")}
plot.Results_IMIFA <- function(x, plot.meth = c("all", "correlation", "density", "errors", "GQ", "means", "parallel.coords", "trace", "zlabels"), param = c("means", "scores", "loadings", "uniquenesses", "pis", "alpha", "discount"), g = NULL, mat = TRUE,
zlabels = NULL, heat.map = TRUE, show.last = FALSE, palette = NULL, ind = NULL, fac = NULL, by.fac = FALSE, type = c("h", "n", "p", "l"), intervals = TRUE, common = TRUE, partial = FALSE, titles = TRUE, transparency = 0.75, ...) {
if(missing(x)) stop("'x' must be supplied", call.=FALSE)
if(!inherits(x, "Results_IMIFA")) stop("Results object of class 'Results_IMIFA' must be supplied", call.=FALSE)
GQ.res <- x$GQ.results
G <- GQ.res$G
Gseq <- seq_len(G)
Qs <- unname(GQ.res$Q)
Q.max <- max(Qs)
Qmseq <- seq_len(Q.max)
nLx <- Qs != 0
defpar <- suppressWarnings(graphics::par(no.readonly=TRUE))
defpar$new <- FALSE
suppressWarnings(graphics::par(pty="m"))
mispal <- missing(palette)
oldpal <- grDevices::palette()
if(mispal) palette <- viridis(min(10L, max(G, Q.max, 5L)), option="D")
if(!all(is.cols(cols=palette))) stop("Supplied colour palette contains invalid colours", call.=FALSE)
if(length(palette) < 5) warning("Palette should contain 5 or more colours\n", call.=FALSE)
trx <- grDevices::dev.capabilities()$semiTransparency
xtr <- missing(transparency)
if(length(transparency) != 1 &&
any(!is.numeric(transparency),
(transparency < 0 ||
transparency > 1))) stop("'transparency' must be a single number in [0, 1]", call.=FALSE)
if(transparency != 1 && !trx) {
if(!xtr) message("'transparency' not supported on this device\n")
transparency <- 1
}
tmp.pal <- palette
palette <- if(mispal) grDevices::adjustcolor(palette, alpha.f=transparency) else palette
grDevices::palette(palette)
grey <- ifelse(trx, grDevices::adjustcolor("grey50", alpha.f=transparency), "grey50")
defopt <- options()
options(warn=1)
suppressWarnings(graphics::par(cex.axis=0.8, new=FALSE))
on.exit(suppressWarnings(graphics::par(defpar)))
on.exit(do.call(graphics::clip, as.list(defpar$usr)), add=TRUE)
on.exit(grDevices::palette(oldpal), add=TRUE)
on.exit(suppressWarnings(options(defopt)), add=TRUE)
dots <- list(...)
dots <- dots[unique(names(dots))]
if(brX <- "breaks" %in% names(dots)) {
brXs <- dots[["breaks"]]
}
n.grp <- attr(GQ.res, "Clusters")
n.fac <- attr(GQ.res, "Factors")
G.supp <- attr(GQ.res, "Supplied")["G"]
Q.supp <- attr(GQ.res, "Supplied")["Q"]
method <- attr(x, "Method")
store <- attr(x, "Store")
n.var <- attr(x, "Vars")
var.pal <- max(min(n.var, 1024L), 2L)
n.obs <- attr(x, "Obs")
z.sim <- attr(x, "Z.sim")
plot.mx <- missing(plot.meth)
param.x <- missing(param)
type.x <- missing(type)
if(!all(is.character(plot.meth))) stop("'plot.meth' must be a character vector of length 1", call.=FALSE)
if(!all(is.character(param))) stop("'param' must be a character vector of length 1", call.=FALSE)
if(!all(is.character(type))) stop("'type' must be a character vector of length 1", call.=FALSE)
if(plot.mx) {
if(!param.x) { plot.meth <- "all"
} else stop("'plot.meth' not supplied:\nWhat type of plot would you like to produce?", call.=FALSE)
}
if(is.element(plot.meth,
c("G", "Q",
"QG"))) { plot.meth <- "GQ"
}
plot.meth <- match.arg(plot.meth)
param <- match.arg(param)
type <- match.arg(type)
if(!is.element(plot.meth, c("errors", "GQ", "zlabels")) &&
param.x) stop("'param' not supplied:\nWhat variable would you like to plot?", call.=FALSE)
m.sw <- c(G.sw = FALSE, Z.sw = FALSE, E.sw = FALSE, P.sw = FALSE, C.sw = FALSE, D.sw = FALSE, M.sw = FALSE, T.sw = FALSE)
v.sw <- attr(x, "Switch")
obs.names <- attr(x, "Obsnames")
var.names <- attr(x, "Varnames")
obs.names <- if(is.null(obs.names)) seq_len(n.obs) else obs.names
var.names <- if(is.null(var.names)) seq_len(n.var) else var.names
v.sw <- c(v.sw[-6L], v.sw[6L])
names(v.sw) <- c(as.character(formals()$param)[-1L], "u.sw")
ci.sw <- v.sw
uni.type <- unname(attr(x, "Uni.Meth")['Uni.Type'])
if((grp.ind <- !is.element(method, c("FA", "IFA")) && !(param == "uniquenesses" && is.element(uni.type, c("constrained", "single"))))) {
clust <- x$Clust
grp.size <- clust$post.sizes
labelmiss <- !is.null(attr(clust, "Label.Sup")) && !attr(clust, "Label.Sup")
} else grp.size <- n.obs
grp.ind <- all(G != 1, grp.ind)
if((all.ind <- plot.meth == "all")) {
if(v.sw[param]) {
m.sw[-seq_len(4L)] <- !m.sw[-seq_len(4L)]
graphics::layout(matrix(c(1, 2, 3, 4), nrow=2L, ncol=2L, byrow=TRUE))
graphics::par(cex=0.8, mai=c(0.5, 0.5, 0.5, 0.2), mgp=c(2, 1, 0), oma=c(0, 0.5, 2, 0.5))
}
} else {
graphics::layout(1)
sw.n <- paste0(toupper(substring(plot.meth, 1L, 1L)), ".sw")
m.sw[sw.n] <- TRUE
}
if(param == "uniquenesses") {
mat <- switch(EXPR=uni.type, constrained=, unconstrained=mat, FALSE)
}
mat <- n.var != 1 && mat
z.miss <- missing(zlabels)
if(!z.miss) {
if(all(!is.factor(zlabels), !is.logical(zlabels), !is.numeric(zlabels)) ||
length(zlabels) != n.obs) stop(paste0("'zlabels' must be a factor of length N=", n.obs), call.=FALSE)
}
if(m.sw["P.sw"]) {
if(!is.element(param, c("means",
"loadings", "uniquenesses"))) stop("Can only plot parallel coordinates for means, loadings or uniquenesses", call.=FALSE)
}
if(!grp.ind) {
if(m.sw["Z.sw"]) stop("Can't use 'zlabels' for 'plot.meth' as no clustering has taken place", call.=FALSE)
if(param == "pis") stop("Can't plot mixing proportions as no clustering has taken place", call.=FALSE)
}
if(m.sw["E.sw"]) {
errX <- attr(x, "Errors")
if(is.element(errX,
c("None", "Vars"))) { stop("Can't plot error metrics as they were not calculated within get_IMIFA_results()", call.=FALSE)
} else if(errX == "PPRE") { warning("Can only plot the posterior predictive reconstruction error, and not error metrics between covariance matrices\n", call.=FALSE)
} else if(errX == "Covs") { warning("Can only plot error metrics between covariance matrices, and not the posterior predictive reconstruction error\n", call.=FALSE)
} else if(errX == "Post") warning("Can only plot error metrics between covariance matrices evaluated at the posterior mean, as they were not calculated for every iteration within get_IMIFA_results\n", call.=FALSE)
}
if(all(any(m.sw["M.sw"], m.sw["P.sw"], all.ind),
is.element(param, c("means", "uniquenesses")),
!v.sw[param],
is.element(method, c("FA", "IFA")))) {
if(show.last) { stop(paste0("Can't plot last valid sample, as ", param, switch(EXPR=param, alpha=, discount=" wasn't", " weren't"), " stored"), call.=FALSE)
} else if(param == "means" &&
!v.sw["u.sw"]) { stop("Nothing to plot as means were not updated", call.=FALSE)
} else if(all.ind) { warning(paste0("Can only plot posterior mean, as ", param, switch(EXPR=param, alpha=, discount=" wasn't", " weren't"), " stored\n"), call.=FALSE)
all.ind <- FALSE
m.sw["M.sw"] <- TRUE
}
v.sw[param] <- !v.sw[param]
}
if(all(!v.sw[param], !m.sw["G.sw"],
!m.sw["Z.sw"], !m.sw["E.sw"])) stop(paste0("Nothing to plot: ", param, ifelse(is.element(param, c("alpha", "discount")), ifelse(any(all(param == "alpha", is.element(method, c("FA", "IFA"))),
all(param == "discount", !is.element(method, c("IMFA", "IMIFA")))), paste0(" not used for the ", method, " method"), paste0(" was fixed at ", ifelse(param == "alpha",
attr(x, "Alpha"), attr(x, "Discount")))), " weren't stored"), ifelse(param == "pis" && attr(x, "Equal.Pi"), " as mixing proportions were constrained to be equal across clusters", "")), call.=FALSE)
heat.map <- ifelse(missing(heat.map), param == "loadings", heat.map)
int.miss <- !missing(intervals)
if(any(!is.logical(heat.map),
length(heat.map) != 1)) stop("'heat.map' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(show.last),
length(show.last) != 1)) stop("'show.last' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(intervals),
length(intervals) != 1)) stop("'intervals' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(mat),
length(mat) != 1)) stop("'mat' must be a single logical indicator", call.=FALSE)
common <- !(missing(common) && all(grp.ind, !all.ind, m.sw["M.sw"], param == "scores", heat.map)) && (!grp.ind || common)
if(any(!is.logical(common),
length(common) != 1)) stop("'common' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(partial),
length(partial) != 1)) stop("'partial' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(titles),
length(titles) != 1)) stop("'titles' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(by.fac),
length(by.fac) != 1)) stop("'by.fac' must be a single logical indicator", call.=FALSE)
if(all(show.last, intervals)) {
if(int.miss) message("Forcing 'intervals' to FALSE as 'show.last' is TRUE\n")
intervals <- FALSE
}
post.last <- ifelse(show.last, "Last Valid Sample", "Posterior Mean")
indx <- missing(ind)
facx <- missing(fac)
gx <- missing(g)
if(!indx) {
ind <- as.integer(ind)
xind <- ind
}
if(!facx) {
fac <- as.integer(fac)
fl <- length(fac)
if(fl == 1) fac <- rep(fac, G)
fl <- length(fac)
if(fl != G && is.element(param,
c("loadings", "scores"))) stop(paste0("'fac' must be supplied for each of the ", G, " clusters"), call.=FALSE)
}
g.score <- param == "scores" && all(grp.ind, !all.ind, !common)
if(!gx) g <- as.integer(g)
if(!gx && any(length(g) != 1,
!is.numeric(g))) stop("If 'g' is supplied it must be of length 1", call.=FALSE)
if(any(all(is.element(method, c("IMFA", "OMFA")), m.sw["G.sw"]), m.sw["Z.sw"])) {
if(m.sw["G.sw"]) {
Gs <- if(gx) seq_len(2L) else ifelse(g <= 2, g,
stop("Invalid 'g' value", call.=FALSE))
} else if(m.sw["Z.sw"]) {
Gs <- if(gx) (if(z.sim) seq_len(5L) else seq_len(4L)) else ifelse(g <=
ifelse(z.sim, 5, 4), g, stop(paste0("Invalid 'g' value", ifelse(z.sim, ": similarity matrix not available", "")), call.=FALSE))
}
} else if(all(is.element(method, c("IMIFA", "OMIFA")), m.sw["G.sw"])) {
if(m.sw["G.sw"]) {
Gs <- if(gx) seq_len(3L) else ifelse(g <= 3, g,
stop("Invalid 'g' value", call.=FALSE))
} else if(m.sw["Z.sw"]) {
Gs <- if(gx) (if(z.sim) seq_len(5L) else seq_len(4L)) else ifelse(g <=
ifelse(z.sim, 5, 4), g, stop(paste0("Invalid 'g' value", ifelse(z.sim, ": similarity matrix not available", "")), call.=FALSE))
}
} else if(m.sw["E.sw"]) {
Gs <- if(gx) switch(EXPR=errX, All=seq_len(3L), PPRE=seq_len(2L), 1L) else ifelse(g <= switch(EXPR=errX, All=3L, PPRE=2L, 1L), g,
stop("Invalid 'g' value", call.=FALSE))
} else if(any(all(is.element(param, c("scores", "pis", "alpha", "discount")), any(all.ind, common, param != "scores", !m.sw["M.sw"])), m.sw["G.sw"],
all(m.sw["P.sw"], param != "loadings"), all(param == "uniquenesses", is.element(uni.type, c("constrained", "single"))))) {
Gs <- 1L
} else if(!gx) {
if(!is.element(method, c("FA", "IFA"))) {
if(!is.element(g, Gseq)) stop("This g value was not used during simulation", call.=FALSE)
Gs <- g
} else if(g > 1) { message(paste0("Forced g=1 for the ", method, " method\n"))
Gs <- 1L
}
} else if(!interactive()) { stop("g must be supplied for non-interactive sessions", call.=FALSE)
} else {
Gs <- Gseq
}
if(m.sw["Z.sw"] && !all(Gs == 5)) {
prf <- NULL
uncer <- attr(clust$uncertainty, "Obs")
if(any(!labelmiss, !z.miss)) {
if(all(!labelmiss, z.miss)) {
prf <- clust$perf
} else {
pzs <- factor(clust$MAP, levels=seq_len(G))
tab <- table(pzs, zlabels, dnn=list("Predicted", "Observed"))
prf <- c(.class_agreement(tab), classError(classification=pzs, class=zlabels))
if(nrow(tab) != ncol(tab)) {
prf <- prf[-seq_len(2L)]
names(prf)[4L] <- "error.rate"
} else {
names(prf)[6L] <- "error.rate"
}
if(prf$error.rate == 0) {
prf$misclassified <- NULL
}
prf <- c(list(confusion.matrix = tab), prf, if(!is.null(uncer)) list(uncertain = uncer))
}
prf$confusion.matrix <- if(!is.null(prf$confusion.matrix)) stats::addmargins(prf$confusion.matrix, quiet=TRUE)
prf$error.rate <- if(!is.null(prf$error.rate)) paste0(round(100L * prf$error.rate, 2L), "%")
} else {
prf <- if(!is.null(uncer)) list(uncertain = uncer)
prf <- if(!is.null(prf[[1L]])) prf
}
}
for(g in Gs) {
Q <- Qs[g]
ng <- ifelse(grp.ind, grp.size[g], n.obs)
g.ind <- which(Gs == g)
msgx <- all(interactive(), g != max(Gs))
if(any(all(Qs == 0, param == "scores"),
all(Q == 0, param == "loadings"),
all(ng == 0, param == "scores", m.sw["M.sw"] && !all.ind))) {
warning(paste0("Can't plot ", param, paste0(ifelse(any(all(param == "scores", ng == 0), all(param == "loadings", grp.ind)), paste0(" for cluster ", g), "")), " as they contain no ", ifelse(all(param == "scores", ng == 0), "rows/observations\n", "columns/factors\n")), call.=FALSE)
if(g == max(Gs)) {
break
} else {
if(isTRUE(msgx)) .ent_exit(opts = defopt)
next
}
}
if(any(is.element(param, c("alpha", "discount")),
all(is.element(param, c("means", "uniquenesses")), !indx),
all(param == "loadings", Q == 1), all(param == "scores",
Q.max == 1))) { matx <- FALSE
} else {
matx <- mat
}
if(!matx) {
iter <- switch(EXPR=param, scores=seq_len(attr(x$Score, "Eta.store")), loadings=seq_len(attr(x, "N.Loadstore")[g]), seq_along(store))
}
if(is.element(param, c("scores", "loadings"))) {
if(all((g == min(Gs)), m.sw["M.sw"], isTRUE(heat.map))) {
if(brX) {
hlen <- length(brXs)
if(!mispal && (hlen !=
length(palette))) warning("'breaks' and 'palette' should be the same length if 'breaks' is supplied\n", call.=FALSE, immediate.=TRUE)
} else hlen <- 30L
hcols <- if(mispal) viridis(hlen, option="B") else palette
}
if(indx) ind <- c(1L, 1L)
if(!facx) ind[2L] <- fac[g]
if(all(length(ind) == 1,
mat)) { ind <- rep(ind, 2L)
if(g == 1) xind <- rep(xind, 2L)
}
if(length(ind) != 2) stop(paste0("Length of plotting indices must be 2 for the ", param, " parameter when 'mat' is FALSE"), call.=FALSE)
if(param == "scores") {
if(ind[1L] > n.obs) stop(paste0("First index can't be greater than the number of observations: ", n.obs), call.=FALSE)
if(ind[2L] > Q.max) { warning(paste0("Second index can't be greater than ", Q.max, ", the total number of factors", ifelse(grp.ind, " in the widest loadings matrix\n", "\n")), call.=FALSE)
if(isTRUE(msgx)) .ent_exit(opts = defopt)
next
}
} else {
if(ind[1L] > n.var) stop(paste0("First index can't be greater than the number of variables: ", n.var), call.=FALSE)
if(ind[2L] > Q) { warning(paste0("Second index can't be greater than ", Q, ", the number of factors", if(grp.ind) paste0(" in cluster ", g), ".\nTry specifying a vector of fac values with maximum entries ", paste0(Qs, collapse=", "), "\n"), call.=FALSE)
if(isTRUE(msgx)) .ent_exit(opts = defopt)
next
}
}
} else {
if(any(is.element(param, c("alpha", "discount")),
indx)) ind <- 1L
if(length(ind) > 1) stop("Length of plotting indices can't be greater than 1", call.=FALSE)
if(param == "pis") {
if(ind > G) stop(paste0("Index can't be greater than the number of clusters: ", G), call.=FALSE)
} else {
if(ind > n.var) stop(paste0("Index can't be greater than the number of variables: ", n.var), call.=FALSE)
}
}
if(m.sw["T.sw"]) {
if(param == "means") {
plot.x <- x$Means$mus[[g]]
if(matx) {
if(mispal) grDevices::palette(viridis(var.pal, option="D", alpha=transparency))
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, ylim=if(is.element(method, c("FA", "IFA")) && attr(x, "Center")) c(-1, 1), col=seq_along(grDevices::palette()))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nMeans", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
} else {
base::plot(x=iter, y=plot.x[ind,], type="l", ylab="", xlab="Iteration", ylim=if(is.element(method, c("FA", "IFA")) && attr(x, "Center")) c(-1, 1))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", paste0(":\nMean - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind], " Variable")))
}
}
if(param == "scores") {
x.plot <- x$Scores$eta
if(by.fac) {
plot.x <- x.plot[,ind[2L],]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, n.obs)), option="D", alpha=transparency))
} else {
plot.x <- if(Q.max > 1) x.plot[ind[1L],rev(Qmseq),] else t(x.plot[ind[1L],rev(Qmseq),])
if(mispal) grDevices::palette(viridis(min(10L, max(2L, Q.max)), option="D", alpha=transparency))
}
if(matx) {
scols <- seq_along(grDevices::palette())
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, col=if(by.fac) scols else rev(scols))
if(by.fac) {
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", ":\nScores - "), "Factor ", ind[2L])))
} else {
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", ":\nScores - "), "Observation ", obs.names[ind[1L]])))
}
} else {
base::plot(x=iter, y=x.plot[ind[1L],ind[2L],], type="l", ylab="", xlab="Iteration")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", ":\nScores - "), "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L])))
}
}
if(param == "loadings") {
x.plot <- x$Loadings$lmats[[g]]
if(by.fac) {
plot.x <- x.plot[,ind[2L],]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, n.var)), option="D", alpha=transparency))
} else {
plot.x <- x.plot[ind[1L],rev(seq_len(Q)),]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, Q)), option="D", alpha=transparency))
}
if(matx) {
lcols <- seq_along(grDevices::palette())
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, col=if(by.fac) lcols else rev(lcols))
if(by.fac) {
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), "Factor ", ind[2L])))
} else {
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind[1L]], " Variable")))
}
} else {
base::plot(x=iter, y=x.plot[ind[1L],ind[2L],], type="l", ylab="", xlab="Iteration")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind[1L]], " Variable, Factor ", ind[2L])))
}
}
if(param == "uniquenesses") {
plot.x <- x$Uniquenesses$psis[[g]]
if(matx) {
if(mispal) grDevices::palette(viridis(var.pal, option="D", alpha=transparency))
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, col=seq_along(grDevices::palette()))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nUniquenesses", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
} else {
base::plot(x=iter, y=plot.x[ind,], ylab="", type="l", xlab="Iteration")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, switch(EXPR=uni.type, constrained=, unconstrained=paste0(":\n"), ""), paste0(":\nUniquenesses - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), switch(EXPR=uni.type, constrained=, unconstrained=paste0(var.names[ind], " Variable"), ""))))
}
}
if(param == "pis") {
plot.x <- clust$pi.prop
if(matx) {
if(mispal) grDevices::palette(viridis(max(G, 2L), option="D", alpha=transparency))
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, col=seq_along(grDevices::palette()), ylim=c(0, 1))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nMixing Proportions")))))
} else {
base::plot(x=iter, y=plot.x[ind,], ylab="", type="l", xlab="Iteration")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nMixing Proportion - Cluster ", ind)))))
}
}
if(param == "alpha") {
plot.x <- clust$Alpha
base::plot(plot.x$alpha, ylab="", type="l", xlab="Iteration", main="")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nAlpha")))))
if(all(intervals, ci.sw[param])) {
ci.x <- plot.x$ci.alpha
graphics::abline(h=plot.x$post.alpha, col=2, lty=2)
graphics::abline(h=ci.x[1L], col=grey, lty=2)
graphics::abline(h=ci.x[2L], col=grey, lty=2)
}
}
if(param == "discount") {
plot.x <- clust$Discount
base::plot(as.vector(plot.x$discount), ylab="", type="l", xlab="Iteration", main="", ylim=c(0, 1))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nDiscount")))))
if(all(intervals, ci.sw[param])) {
ci.x <- plot.x$ci.disc
graphics::abline(h=plot.x$post.disc, col=2, lty=2)
graphics::abline(h=ci.x[1L], col=grey, lty=2)
graphics::abline(h=ci.x[2L], col=grey, lty=2)
}
}
if(!indx) { ind[1L] <- xind[1L]
if(all(facx, is.element(param, c("scores",
"loadings")))) ind[2L] <- xind[2L]
}
if(all.ind) xxind <- ind
}
if(m.sw["D.sw"]) {
if(param == "means") {
x.plot <- x$Means$mus[[g]]
if(matx) {
if(mispal) grDevices::palette(viridis(var.pal, option="D", alpha=transparency))
plot.x <- tryCatch(apply(x.plot, 1L, stats::density, bw="SJ"), error = function(e) apply(x.plot, 1L, stats::density))
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", xlab="", ylab="", lty=1, col=seq_along(grDevices::palette()))
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nMeans", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
} else {
plot.d <- tryCatch(stats::density(x.plot[ind,], bw="SJ"), error = function(e) stats::density(x.plot[ind,]))
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", paste0(":\nMeans - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind], " Variable")))
graphics::polygon(plot.d, col=grey, border=NA)
}
}
if(param == "scores") {
x.plot <- x$Scores$eta
if(by.fac) {
plot.x <- x.plot[,ind[2L],]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, n.obs)), option="D", alpha=transparency))
} else {
plot.x <- if(Q > 1) x.plot[ind[1],rev(Qmseq),] else t(x.plot[ind[1L],rev(Qmseq),])
if(mispal) grDevices::palette(viridis(min(10L, max(2L, Q.max)), option="D", alpha=transparency))
}
if(matx) {
scols <- seq_along(grDevices::palette())
plot.x <- tryCatch(apply(x.plot, 1L, stats::density, bw="SJ"), error = function(e) apply(x.plot, 1L, stats::density))
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", xlab="", ylab="", lty=1, col=if(by.fac) scols else rev(scols))
if(by.fac) {
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", ":\nScores - "), "Factor ", ind[2L])))
} else {
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", ":\nScores - "), "Observation ", obs.names[ind[1L]])))
}
} else {
plot.d <- tryCatch(stats::density(x.plot[ind[1L],ind[2L],], bw="SJ"), error = function(e) stats::density(x.plot[ind[1L],ind[2L],]))
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", ":\nScores - "), "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L])))
graphics::polygon(plot.d, col=grey, border=NA)
}
}
if(param == "loadings") {
x.plot <- x$Loadings$lmats[[g]]
if(by.fac) {
plot.x <- x.plot[,ind[2L],]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, n.var)), option="D", alpha=transparency))
} else {
plot.x <- x.plot[ind[1L],rev(seq_len(Q)),]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, Q)), option="D", alpha=transparency))
}
if(matx) {
lcols <- seq_along(grDevices::palette())
plot.x <- tryCatch(apply(plot.x, 1L, stats::density, bw="SJ"), error = function(e) apply(plot.x, 1L, stats::density))
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", xlab="", ylab="", lty=1, col=if(by.fac) lcols else rev(lcols))
if(by.fac) {
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), "Factor ", ind[2L])))
} else {
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind[1L]], " Variable")))
}
} else {
plot.d <- tryCatch(stats::density(x.plot[ind[1L],ind[2L],], bw="SJ"), error = function(e) stats::density(x.plot[ind[1L],ind[2L],]))
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind[1L]], " Variable, Factor ", ind[2L])))
graphics::polygon(plot.d, col=grey, border=NA)
}
}
if(param == "uniquenesses") {
x.plot <- x$Uniquenesses$psis[[g]]
if(matx) {
if(mispal) grDevices::palette(viridis(var.pal, option="D", alpha=transparency))
plot.x <- apply(x.plot, 1L, .logdensity, bw="SJ")
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", xlab="", ylab="", lty=1, col=seq_along(grDevices::palette()))
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nUniquenesses", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
} else {
plot.d <- .logdensity(x.plot[ind,], bw="SJ")
plot.d$y[plot.d$x < 0] <- 0
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, switch(EXPR=uni.type, constrained=, unconstrained=paste0(":\n"), ""), paste0(":\nUniquenesses - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), switch(EXPR=uni.type, constrained=, unconstrained=paste0(var.names[ind], " Variable"), ""))))
graphics::polygon(plot.d, col=grey, border=NA)
}
}
if(param == "pis") {
x.plot <- t(clust$pi.prop)
if(matx) {
if(mispal) grDevices::palette(viridis(max(G, 2L), option="D", alpha=transparency))
plot.x <- lapply(as.data.frame(x.plot), .logitdensity, bw="SJ")
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", ylab="", lty=1, col=seq_along(grDevices::palette()), xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nMixing Proportions")))))
} else {
x.plot <- x.plot[,ind]
fit <- .logitdensity(x.plot, bw="SJ")
fitx <- fit$x
fity <- fit$y
base::plot(fitx, fity, type="l", main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nMixing Proportions - Cluster ", ind)))))
graphics::polygon(c(min(fitx), fitx), c(0, fity), col=grey, border=NA)
}
}
if(param == "alpha") {
plot.x <- clust$Alpha
tr <- ifelse(attr(x, "Pitman"), - max(if(is.null(attr(x, "Discount"))) clust$Discount$discount else attr(x, "Discount"), 0), 0)
plot.d <- .logdensity(plot.x$alpha, left=tr, bw="SJ")
plot.d$y[plot.d$x < tr] <- 0L
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nAlpha")))))
graphics::polygon(plot.d, col=grey, border=NA)
if(intervals) {
avg <- plot.x$post.alpha
graphics::clip(avg, avg, 0, plot.d$y[which.min(abs(plot.d$x - avg))])
graphics::abline(v=avg, col=2, lty=2)
}
}
if(param == "discount") {
plot.x <- clust$Discount
x.plot <- as.vector(plot.x$discount)
fit <- try(.logitdensity(x.plot, bw="SJ"), silent = TRUE)
if(!inherits(fit, "try-error")) {
fitx <- fit$x
fity <- fit$y * (1 - plot.x$post.kappa)
base::plot(fitx, fity, type="l", main="", xlab="", ylab="", xlim=c(0, max(fitx)))
usr <- graphics::par("usr")
if(plot.x$post.kappa > 0) {
graphics::clip(usr[1L], usr[2L], 0, usr[4L])
graphics::abline(v=0, col=3, lwd=2)
graphics::clip(usr[1L], usr[2L], usr[3L], usr[4L])
}
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nDiscount")))))
graphics::polygon(c(min(fitx), fitx), c(0, fity), col=grey, border=NA)
if(intervals) {
D <- plot.x$post.disc
d2 <- fity[which.min(abs(fitx - D))]
if(is.finite(d2)) {
graphics::clip(D, D, 0, d2)
graphics::abline(v=D, col=2, lty=2)
graphics::clip(usr[1L], usr[2L], usr[3L], usr[4L])
}
}
} else { warning(paste0(ifelse(attr(x, "Kappa0"), "Acceptance", "Mutation"), " rate too low: can't plot density\n"), call.=FALSE)
if(all.ind) graphics::plot.new()
}
}
}
if(m.sw["M.sw"]) {
if(is.element(param, c("scores", "loadings"))) {
if(indx) {
ind <- switch(EXPR=param, scores=c(1L, min(Q.max, 2L)), c(1L, 1L))
}
if(!facx) {
ind[2L] <- fac[g]
}
if(param == "scores") {
if(any(ind[1L] > Q.max,
ind[2L] > Q.max)) stop(paste0("Only the first ", Q.max, " columns can be plotted"), call.=FALSE)
} else if(ind[2L] > Q) stop(paste0("Only the first ", Q, " columns can be plotted"), call.=FALSE)
}
if(param == "means") {
x.plot <- if(show.last) x$Means$last.mu else x$Means$post.mu
plot.x <- x.plot[,g]
if(ci.sw[param]) ci.x <- x$Means$ci.mu
if(g == min(Gs) && isTRUE(common)) {
pxx <- range(vapply(x.plot, range, numeric(2L)))
cixx <- if(all(intervals, ci.sw[param])) range(vapply(ci.x, range, numeric(2L)))
} else if(!common) {
pxx <- range(plot.x)
cixx <- if(all(intervals, ci.sw[param])) range(ci.x[[g]])
}
if(ci.sw[param]) ci.x <- ci.x[[g]]
base::plot(plot.x, type=type, ylab="", xlab="Variable", ylim=if(is.element(method, c("FA", "IFA")) && attr(x, "Center")) c(-1, 1) else if(all(intervals, ci.sw[param])) cixx else pxx, lend=1)
if(all(intervals, ci.sw[param])) suppressWarnings(.plot_CI(plot.x, li=ci.x[,1L], ui=ci.x[,2L], slty=3, scol=grey, add=TRUE, gap=TRUE, pch=ifelse(type == "n", NA, 20)))
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", paste0(":\nMeans", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
if(type == "n") graphics::text(x=seq_along(plot.x), y=plot.x, var.names, cex=0.5)
}
if(param == "scores") {
labs <- if(z.miss) { if(!grp.ind) 1L else if(show.last) clust$last.z else clust$MAP } else if(is.factor(zlabels)) as.integer(zlabels) else zlabels
p.eta <- if(show.last) x$Scores$last.eta else x$Scores$post.eta
eta1st <- if(plot.meth == "all" || !gx) 1L else which.min(grp.size > 0)
if(g.score) {
if(g.ind == eta1st) tmplab <- labs
z.ind <- tmplab %in% g
plot.x <- p.eta[z.ind,,drop=FALSE]
ind2 <- ifelse(any(!facx, Q <= 1), ind[2L], if(Q > 1) max(2L, ind[2L]))
if(ci.sw[param]) ci.x <- x$Scores$ci.eta[,z.ind,, drop=FALSE]
labs <- g
n.eta <- grp.size[g]
} else {
plot.x <- p.eta
ind2 <- ifelse(any(!facx, Q.max <= 1), ind[2L], if(Q.max > 1) max(2L, ind[2L]))
if(ci.sw[param]) ci.x <- x$Scores$ci.eta
n.eta <- n.obs
}
if(isTRUE(heat.map)) {
if(titles && !all.ind) graphics::par(mar=c(5.1, 4.1, 4.1, 3.1))
if(g.ind == eta1st) {
sxx <- mat2cols(p.eta, cols=hcols, na.col=graphics::par()$bg, ...)
sxx <- if(g.score) lapply(split(sxx, factor(clust$MAP, levels=Gseq)), matrix, ncol=ncol(sxx)) else sxx
pxx <- range(p.eta)
}
pxx <- if(g.score) range(plot.x) else pxx
plot_cols(if(g.score) sxx[[g]] else sxx, ...)
if(!is.element(Q.max, c(1, Q)) && all(plot.meth != "all", !common)) graphics::abline(v=Q + 0.5, lty=2, lwd=2)
if(titles) {
graphics::title(main=list(paste0(post.last, ifelse(!all.ind, " Scores ", " "), "Heatmap", ifelse(all(!all.ind, grp.ind, !common), paste0(" - Cluster ", g), ""))))
if(all.ind || common) {
graphics::axis(1, line=-0.5, tick=FALSE, at=Qmseq, labels=Qmseq)
} else {
graphics::axis(1, line=-0.5, tick=FALSE, at=Qmseq, labels=replace(Qmseq, Q, NA))
if(Q > 0) {
graphics::axis(1, line=-0, tick=FALSE, at=Q, labels=substitute(paste(hat(q)['g'], " = ", Q), list(Q=Q)), cex.axis=1.5)
} else message("Estimated number of columns in corresponding loadings matrix was zero\n")
}
suppressWarnings(heat_legend(data=pxx, cols=hcols, cex.lab=0.8, ...))
if(Q.max != 1) {
absq <- seq(from=graphics::par("usr")[1L], to=graphics::par("usr")[2L], length.out=Q.max + 1L)
graphics::abline(v=absq[-c(1L, length(absq))], lty=2, lwd=1, col=grey)
}
}
graphics::box(lwd=2)
graphics::mtext(ifelse(Q.max > 1, "Factors", "Factor"), side=1, line=2)
} else {
if((mispal && G >= 2) || !z.miss) grDevices::palette(viridis(ifelse(z.miss, max(G, 2L), length(unique(labs))), option="D", alpha=transparency))
col.s <- if(is.factor(labs)) as.integer(levels(labs))[labs] else labs
type.s <- ifelse(any(type.x, type == "l"), "p", type)
if(ind2 != 1) {
if(all(intervals, ci.sw[param])) {
suppressWarnings(.plot_CI(plot.x[,ind[1L]], plot.x[,ind2], li=ci.x[1L,,ind2], ui=ci.x[2L,,ind2], gap=TRUE, pch=NA, scol=grey, slty=3, xlab=paste0("Factor ", ind[1L]), ylab=paste0("Factor ", ind2)))
suppressWarnings(.plot_CI(plot.x[,ind[1L]], plot.x[,ind2], li=ci.x[1L,,ind[1L]], ui=ci.x[2L,,ind[1L]], add=TRUE, gap=TRUE, pch=NA, scol=grey, slty=3, err="x"))
if(type.s != "n") graphics::points(plot.x[,ind[1L]], plot.x[,ind2], type=type.s, col=col.s, pch=20)
} else {
base::plot(plot.x[,ind[1L]], plot.x[,ind2], type=type.s, col=col.s, pch=20, cex=0.8,
xlab=paste0("Factor ", ind[1L]), ylab=paste0("Factor ", ind2))
}
if(type.s == "n") graphics::text(plot.x[,ind[1L]], plot.x[,ind2], obs.names, col=col.s, cex=0.5)
} else {
if(all(intervals, ci.sw[param])) {
suppressWarnings(.plot_CI(if(g.score) seq_len(grp.size[g]) else seq_len(n.obs), plot.x[,ind[1L]], li=ci.x[1L,,ind[1L]], ui=ci.x[2L,,ind[1L]], gap=TRUE, pch=NA, scol=grey, slty=3, xlab="Observation", ylab=paste0("Factor ", ind[1L])))
graphics::points(plot.x[,ind[1L]], type=type.s, col=col.s, pch=20)
} else {
base::plot(plot.x[,ind[1L]], type=type.s, col=col.s, xlab="Observation", ylab=paste0("Factor ", ind[1L]), pch=20)
}
if(type.s == "n") graphics::text(plot.x[,ind[1L]], col=col.s, cex=0.5)
}
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", ":\nScores"), ifelse(g.score, paste0(" - Cluster ", g), ""))))
}
}
if(param == "loadings") {
plot.x <- if(show.last) x$Loadings$last.load else x$Loadings$post.load
if(ci.sw[param]) ci.x <- x$Loadings$ci.load
if(g == min(Gs[nLx[Gs]]) && isTRUE(common)) {
if(isTRUE(heat.map)) {
if(any(Qs == 0)) {
lxx <- vector("list", G)
lxx[nLx] <- mat2cols(Filter(Negate(is.null), plot.x), cols=hcols, compare=G > 1, na.col=graphics::par()$bg, ...)
} else {
lxx <- mat2cols(if(G > 1) plot.x else plot.x[[g]], cols=hcols, compare=G > 1, na.col=graphics::par()$bg, ...)
}
} else {
cixx <- if(all(intervals, ci.sw[param], !heat.map)) { if(by.fac) range(vapply(Filter(Negate(is.null), ci.x), function(x) range(x[,,ind[2L]]), numeric(2L))) else range(vapply(Filter(Negate(is.null), ci.x), function(x) range(x[,ind[1L],]), numeric(2L))) }
}
pxx <- range(vapply(Filter(Negate(is.null), plot.x), range, na.rm=TRUE, numeric(2L)))
}
if(!nLx[g]) { break
} else if(!common) {
if(isTRUE(heat.map)) {
lxx <- mat2cols(plot.x[[g]], cols=hcols, compare=FALSE, na.col=graphics::par()$bg, ...)
} else {
cixx <- if(all(intervals, ci.sw[param], !heat.map)) { if(by.fac) range(ci.x[[g]][,,ind[2L]]) else range(ci.x[[g]][,ind[1L],]) }
}
pxx <- range(plot.x[[g]])
}
if(isTRUE(heat.map)) {
if(titles && !all.ind) graphics::par(mar=c(5.1, 4.1, 4.1, 3.1))
plot_cols(if(G > 1 && isTRUE(common)) lxx[[g]] else lxx, ...)
if(titles) {
graphics::title(main=list(paste0(post.last, ifelse(!all.ind, " Loadings ", " "), "Heatmap", ifelse(all(!all.ind, grp.ind), paste0(" - Cluster ", g), ""))))
graphics::axis(1, line=-0.5, tick=FALSE, at=seq_len(Q), labels=seq_len(Q))
if(n.var < 100) {
graphics::axis(2, cex.axis=0.5, line=-0.5, tick=FALSE, las=1, at=seq_len(n.var), labels=substring(var.names[n.var:1L], 1L, 11L))
}
suppressWarnings(heat_legend(data=pxx, cols=hcols, cex.lab=0.8, ...))
}
graphics::box(lwd=2)
graphics::mtext(ifelse(Q > 1, "Factors", "Factor"), side=1, line=2, cex=0.8)
if(Q != 1 && titles) {
absq <- seq(from=graphics::par("usr")[1L], to=graphics::par("usr")[2L], length.out=Q + 1)
graphics::abline(v=absq[-c(1L, length(absq))], lty=2, lwd=1, col=grey)
}
} else {
plot.x <- plot.x[[g]]
if(ci.sw[param]) ci.x <- ci.x[[g]]
if(!by.fac) {
if(ci.sw[param]) ci.x <- as.matrix(ci.x[,ind[1L],])
base::plot(plot.x[ind[1L],], type=type, xaxt="n", xlab="", ylab="Loading", ylim=if(all(intervals, ci.sw[param])) cixx else pxx, lend=1)
if(all(intervals, ci.sw[param])) suppressWarnings(.plot_CI(plot.x[ind[1L],], li=ci.x[1L,], ui=ci.x[2L,], slty=3, scol=grey, add=TRUE, gap=TRUE, pch=ifelse(type == "n", NA, 20)))
graphics::axis(1, line=-0.5, tick=FALSE, at=seq_len(Q), labels=seq_len(Q))
graphics::mtext("Factors", side=1, line=2)
if(titles) graphics::title(main=list(paste0(post.last, ":\n", ifelse(!all.ind, paste0("Loadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), "")), ""), var.names[ind[1L]], " Variable")))
if(type == "n") graphics::text(x=plot.x[ind[1L],], paste0("Factor ", seq_len(Q)), cex=0.5)
} else {
if(ci.sw[param]) ci.x <- as.matrix(ci.x[,,ind[2L]])
base::plot(plot.x[,ind[2L]], type=type, xaxt="n", xlab="", ylab="Loading", ylim=if(all(intervals, ci.sw[param])) cixx else pxx, lend=1)
if(all(intervals, ci.sw[param])) suppressWarnings(.plot_CI(plot.x[,ind[2L]], li=ci.x[1L,], ui=ci.x[2L,], slty=3, scol=grey, add=TRUE, gap=TRUE, pch=ifelse(type == "n", NA, 20)))
graphics::axis(1, line=-0.5, tick=FALSE, at=seq_len(n.var), labels=seq_len(n.var))
graphics::mtext("Variable #", side=1, line=2, cex=0.8)
if(titles) graphics::title(main=list(paste0(post.last, ":\n", ifelse(!all.ind, paste0("Loadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), "")), ""), "Factor ", ind[2L])))
if(type == "n") graphics::text(x=plot.x, var.names, cex=0.5)
}
}
}
if(param == "uniquenesses") {
x.plot <- if(show.last) x$Uniquenesses$last.psi else x$Uniquenesses$post.psi
plot.x <- x.plot[,g]
if(ci.sw[param]) ci.x <- x$Uniquenesses$ci.psi
if(g == min(Gs) && isTRUE(common)) {
pxx <- c(0, max(vapply(x.plot, max, numeric(1L))))
cixx <- if(all(intervals, ci.sw[param])) c(0, max(vapply(ci.x, max, numeric(1L))))
} else if(!common) {
pxx <- c(0, max(plot.x))
cixx <- if(all(intervals, ci.sw[param])) c(0, max(ci.x[[g]]))
}
if(ci.sw[param]) ci.x <- ci.x[[g]]
base::plot(plot.x, type=type, ylab="", xlab="Variable", ylim=if(all(intervals, ci.sw[param])) cixx else pxx, lend=1)
if(all(intervals, ci.sw[param])) suppressWarnings(.plot_CI(plot.x, li=ci.x[,1L], ui=ci.x[,2L], slty=3, scol=grey, add=TRUE, gap=TRUE, pch=ifelse(type == "n", NA, 20)))
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", paste0(":\nUniquenesses", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
if(type == "n") graphics::text(seq_along(plot.x), plot.x, var.names, cex=0.5)
}
if(param == "pis") {
plot.x <- if(show.last) clust$last.pi else clust$post.pi
if(ci.sw[param]) ci.x <- clust$ci.pi
if(matx) {
if(all(intervals, ci.sw[param])) {
suppressWarnings(.plot_CI(graphics::barplot(plot.x, ylab="", xlab="", col=grey, ylim=c(0, 1), cex.names=0.7),
plot.x, li=ci.x[,1L], ui=ci.x[,2L], slty=3, scol="red", add=TRUE, gap=TRUE, pch=20))
} else {
graphics::barplot(plot.x, ylab="", xlab="", ylim=c(0, 1), cex.names=0.7)
}
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", paste0(":\nMixing Proportions")))))
} else {
if(all(intervals, ci.sw[param])) {
suppressWarnings(.plot_CI(graphics::barplot(plot.x[ind], ylab="", xlab="", ylim=c(0, 1), cex.names=0.7),
plot.x[ind], li=ci.x[ind,1L], ui=ci.x[ind,2L], slty=3, scol="red", add=TRUE, gap=TRUE, pch=20))
} else {
graphics::barplot(plot.x[ind], ylab="", xlab="Variable", ylim=c(0, 1), cex.names=0.7)
}
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", paste0(":\nMixing Proportions - Cluster ", ind)))))
}
}
if(is.element(param, c("alpha", "discount"))) {
if(param == "discount" &&
attr(x, "Kappa0")) message(paste0("Spike-and-slab prior not invoked as alpha was fixed <= 0 (alpha=", attr(x, "Alpha"), ")\n"))
base::plot(c(0, 1), c(0, 1), ann=FALSE, bty='n', type='n', xaxt='n', yaxt='n')
if(titles) graphics::title(main=list(paste0("Summary Statistics", ifelse(all.ind, "", paste0(":\n", switch(EXPR=param, alpha="Alpha", discount="Discount"))))))
plot.x <- switch(EXPR=param, alpha=clust$Alpha[-1L], discount=clust$Discount[-1L])
x.step <- switch(EXPR=param, alpha=attr(x, "Alph.step"), discount=attr(x, "Disc.step"))
conf <- attr(x, "Conf.Level")
digits <- options()$digits
MH <- switch(EXPR=param, alpha=is.element(method, c("OMFA", "OMIFA")) || plot.x$alpha.rate != 1, discount=plot.x$disc.rate != 1)
a.adj <- rep(0.5, 2)
a.cex <- graphics::par()$fin[2L]/ifelse(MH, 4, 3)
pen <- ifelse(MH, 0, 0.15)
tz <- isTRUE(attr(x, "TuneZeta"))
y1 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.9, 0.85), discount=0.9), 0.925) - pen/3
y2 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.725, 0.675), discount=0.725), 0.825) - pen/3
y3 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.6125, 0.55), discount=0.6125), 0.7625) - pen
y4 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.5375, 0.4875), discount=0.55), 0.7125) - pen * 5/4
y5 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.2, 0.1375), discount=0.2125), 0.1375)
y6 <- y5 + 0.0125
graphics::text(x=0.5, y=y1, cex=a.cex, col="black", adj=a.adj, expression(bold("Posterior Mean:\n")))
graphics::text(x=0.5, y=y1, cex=a.cex, col="black", adj=a.adj, bquote(.(round(switch(EXPR=param, alpha=plot.x$post.alpha, discount=plot.x$post.disc), digits))))
graphics::text(x=0.5, y=y2 - pen, cex=a.cex, col="black", adj=a.adj, expression(bold("\nVariance:\n")))
graphics::text(x=0.5, y=y2 - pen, cex=a.cex, col="black", adj=a.adj, bquote(.(round(switch(EXPR=param, alpha=plot.x$var.alpha, discount=plot.x$var.disc), digits))))
graphics::text(x=0.5, y=y3 - pen, cex=a.cex, col="black", adj=a.adj, bquote(bold(.(100 * conf)) * bold("% Credible Interval:")))
graphics::text(x=0.5, y=y4 - pen, cex=a.cex, col="black", adj=a.adj, bquote(paste("[", .(round(switch(EXPR=param, alpha=plot.x$ci.alpha[1L], discount=plot.x$ci.disc[1L]), digits)), ", ", .(round(switch(EXPR=param, alpha=plot.x$ci.alpha[2L], discount=plot.x$ci.disc[2L]), digits)), "]")))
graphics::text(x=0.5, y=y5, cex=a.cex, col="black", adj=a.adj, expression(bold("Last Valid Sample:\n")))
graphics::text(x=0.5, y=y6, cex=a.cex, col="black", adj=a.adj, bquote(.(round(switch(EXPR=param, alpha=plot.x$last.alpha, discount=plot.x$last.disc), digits))))
if(isTRUE(MH)) {
rate <- switch(EXPR=param, alpha="Acceptance Rate:", discount=paste0(ifelse(attr(x, "Kappa0"), "Acceptance", "Mutation"), " Rate:"))
y7 <- switch(EXPR=param, alpha=ifelse(tz, 0.4375, 0.3625), discount=0.4375)
y8 <- switch(EXPR=param, alpha=ifelse(tz, 0.375, 0.3125), discount=0.375)
graphics::text(x=0.5, y=y7, cex=a.cex, col="black", adj=a.adj, substitute(bold(rate)))
graphics::text(x=0.5, y=y8, cex=a.cex, col="black", adj=a.adj, bquote(paste(.(round(100 * switch(EXPR=param, alpha=plot.x$alpha.rate, discount=plot.x$disc.rate), 2L)), "%")))
}
if(param == "discount") {
graphics::text(x=0.5, y=0.1275, cex=a.cex, col="black", adj=a.adj, bquote(bold(hat(kappa)) * bold(" - Posterior Proportion of Zeros:")))
graphics::text(x=0.5, y=0.0575, cex=a.cex, col="black", adj=a.adj, bquote(.(round(plot.x$post.kappa, digits))))
}
if(param == "alpha" && tz) {
graphics::text(x=0.5, y=0.1275, cex=a.cex, col="black", adj=a.adj, bquote(bold(hat(zeta)) * bold(" - Posterior Mean Zeta:")))
graphics::text(x=0.5, y=0.0575, cex=a.cex, col="black", adj=a.adj, bquote(.(round(plot.x$avg.zeta, digits))))
}
}
if(!indx) { ind[1L] <- xind[1L]
if(all(facx, is.element(param, c("scores",
"loadings")))) ind[2L] <- xind[2L]
}
if(all.ind) ind <- xxind
}
if(m.sw["G.sw"]) {
plotG.ind <- is.element(method, c("IMIFA", "IMFA", "OMIFA", "OMFA"))
plotQ.ind <- adapt <- any(is.element(method, c("IFA", "MIFA")), all(is.element(method, c("IMIFA", "OMIFA")), g == 2))
plotT.ind <- any(all(g == 2, is.element(method, c("IMFA", "OMFA"))), all(is.element(method, c("IMIFA", "OMIFA")), g == 3))
if(!(critx <- is.null(crit <- GQ.res$Criteria))) {
aicm <- round(crit$AICMs, 2L)
bicm <- round(crit$BICMs, 2L)
dic <- round(crit$DICs, 2L)
if(is.element(method, c("FA", "MFA", "OMFA", "IMFA"))) {
aic.mcmc <- round(crit$AIC.mcmcs, 2L)
bic.mcmc <- round(crit$BIC.mcmcs, 2L)
}
}
if(all(plotG.ind, g == 1)) {
graphics::layout(1)
graphics::par(mar=c(5.1, 4.1, 4.1, 2.1))
plot.G <- GQ.res$G.Counts
G.name <- names(plot.G)
rangeG <- as.numeric(G.name)
rangeG <- seq(from=min(rangeG), to=max(rangeG), by=1)
missG <- setdiff(rangeG, G.name)
missG <- stats::setNames(rep(NA, length(missG)), as.character(missG))
plot.G <- c(plot.G, missG)
plot.G <- plot.G[order(as.numeric(names(plot.G)))]
col.G <- c(1L, ceiling(length(palette)/2))[(rangeG == G) + 1L]
G.plot <- graphics::barplot(plot.G, ylab="Frequency", xaxt="n", col=col.G)
if(titles) graphics::title(main=list("Posterior Distribution of G"))
graphics::axis(1, at=G.plot, labels=names(plot.G), tick=FALSE)
graphics::axis(1, at=Median(G.plot), labels="G", tick=FALSE, line=1.5)
}
if(plotQ.ind) {
if(method == "IFA") {
graphics::layout(1)
graphics::par(mar=c(5.1, 4.1, 4.1, 2.1))
plot.Q <- GQ.res$Q.Counts
Q.name <- names(plot.Q)
rangeQ <- as.numeric(Q.name)
rangeQ <- seq(from=min(rangeQ), to=max(rangeQ), by=1)
missQ <- setdiff(rangeQ, Q.name)
missQ <- stats::setNames(rep(NA, length(missQ)), as.character(missQ))
plot.Q <- c(plot.Q, missQ)
plot.Q <- plot.Q[order(as.numeric(names(plot.Q)))]
col.Q <- c(1L, ceiling(length(palette)/2))[(rangeQ == Q) + 1L]
Q.plot <- graphics::barplot(plot.Q, ylab="Frequency", xaxt="n", col=col.Q)
if(titles) graphics::title(main=list("Posterior Distribution of Q"))
graphics::axis(1, at=Q.plot, labels=names(plot.Q), tick=FALSE)
graphics::axis(1, at=Median(Q.plot), labels="Q", tick=FALSE, line=1.5)
} else {
if(mispal) grDevices::palette(viridis(max(G, 2L), option="D", alpha=transparency))
graphics::layout(1)
graphics::par(mar=c(5.1, 4.1, 4.1, 2.1))
plot.Q <- GQ.res$Q.Counts
plot.Q <- if(inherits(plot.Q, "listof")) plot.Q else list(plot.Q)
Q.name <- lapply(plot.Q, names)
rangeQ <- as.numeric(unique(unlist(Q.name, use.names=FALSE)))
rangeQ <- seq(from=min(rangeQ), to=max(rangeQ), by=1)
missQ <- lapply(Gseq, function(g) setdiff(rangeQ, as.numeric(Q.name[[g]])))
missQ <- lapply(Gseq, function(g) stats::setNames(rep(NA, length(missQ[[g]])), as.character(missQ[[g]])))
plot.Q <- lapply(Gseq, function(g) c(plot.Q[[g]], missQ[[g]]))
plot.Q <- do.call(rbind, lapply(Gseq, function(g) plot.Q[[g]][order(as.numeric(names(plot.Q[[g]])))]))
if(titles) {
graphics::layout(rbind(1, 2), heights=c(9, 1))
graphics::par(mar=c(3.1, 4.1, 4.1, 2.1))
}
Q.plot <- graphics::barplot(plot.Q, beside=TRUE, ylab="Frequency", xaxt="n", col=Gseq, space=c(0, 2))
if(titles) graphics::title(main=list(expression('Posterior Distribution of Q'["g"])))
graphics::axis(1, at=Rfast::colMedians(Q.plot), labels=colnames(plot.Q), tick=FALSE)
graphics::axis(1, at=Median(Q.plot), labels="Q", tick=FALSE, line=1)
if(titles) {
graphics::par(mar=c(0, 0, 0, 0))
graphics::plot.new()
tmp <- if(G > 5) unlist(lapply(Gseq, function(g) c(Gseq[g], Gseq[g + ceiling(G/2)])))[Gseq] else Gseq
ltxt <- paste0("Cluster ", tmp)
lcol <- Gseq[tmp]
graphics::legend("center", legend=ltxt, ncol=if(G > 5) ceiling(G/2) else G, bty="n", pch=15, col=lcol, cex=max(0.7, 1 - 0.03 * G))
}
}
adapt <- attr(x, "Adapt") && length(unique(plot.Q[!is.na(plot.Q)])) != 1
}
if(plotT.ind) {
graphics::layout(1)
graphics::par(mar=c(5.1, 4.1, 4.1, 2.1))
col.G <- c(ceiling(length(palette)/2), 1)
x.plot <- GQ.res$Stored.G
plot.x <- if(is.element(method, c("IMFA", "IMIFA"))) t(x.plot) else cbind(as.vector(x.plot), rep(attr(x, "range.G"), ncol(x.plot)))
graphics::matplot(plot.x, type="l", col=palette[col.G], ylab="G", xlab="Iteration", main="", lty=if(is.element(method, c("IMFA", "IMIFA"))) 1 else seq_len(2L), ylim=c(0, max(plot.x)), las=1, yaxt="n")
g.axis <- pretty(c(0L, max(plot.x)))
g.axis <- unique(c(1L, g.axis[g.axis != 0]))
graphics::axis(2, at=g.axis, labels=g.axis, las=1)
if(titles) {
graphics::title(main=list("Trace: \n\n"))
graphics::title(expression("Active" * phantom(" and Non-empty Clusters")), col.main = palette[1L])
graphics::title(expression(phantom("Active ") * "and" * phantom(" Non-empty Clusters")), col.main="black")
graphics::title(expression(phantom("Active and ") * "Non-empty" * phantom(" Clusters")), col.main = palette[col.G[1L]])
graphics::title(expression(phantom("Active and Non-empty ") * "Clusters"), col.main="black")
if(length(unique(plot.x[,1L])) > 1) {
G.ci <- GQ.res$G.CI
graphics::lines(x=c(0, nrow(plot.x)), y=rep(G, 2), col=length(palette), lty=2, lwd=1)
if(G.ci[1L] != G) graphics::lines(x=c(0, nrow(plot.x)), y=rep(G.ci[1L], 2), lty=2, lwd=0.5, col=grey)
if(G.ci[2L] != G) graphics::lines(x=c(0, nrow(plot.x)), y=rep(G.ci[2L], 2), lty=2, lwd=0.5, col=grey)
}
}
}
if(!any(plotQ.ind,
plotG.ind, plotT.ind)) message(paste0("Nothing to plot", switch(EXPR=method, FA=paste0(": Q = ", Q, "\n"), "\n")))
gq.nam <- toupper(substring(names(GQ.res), 1L, 1L))
if(is.element(method, c("IMIFA", "OMIFA"))) {
if(g == 1) {
print(GQ.res[gq.nam == "G"])
} else if(g == 2) {
if(adapt) {
print(if(attr(x, "C.Shrink")) GQ.res[gq.nam == "Q" | gq.nam == "P"] else GQ.res[gq.nam == "Q"])
#print(if(attr(x, "C.Shrink") || attr(x, "G.shrink")) GQ.res[gq.nam == "Q" | gq.nam == "P"] else GQ.res[gq.nam == "Q"])
} else print(GQ.res[gq.nam == "P"])
} else if(g == 3 && !critx) {
print(GQ.res[gq.nam == "C"])
}
} else if(is.element(method, c("OMFA", "IMFA"))) {
if(g == 1) {
print(GQ.res[gq.nam == "G"])
} else if(!critx) {
print(GQ.res[gq.nam != "G" & gq.nam != "S"])
}
} else if(!critx) {
switch(EXPR=method, MFA= {
print(GQ.res[gq.nam != "S"])
}, MIFA={
if(adapt) {
print(GQ.res[gq.nam != "S"])
} else {
print(GQ.res[gq.nam != "Q" & gq.nam != "S"])
}
}, IFA= {
if(adapt) {
print(GQ.res[gq.nam != "S"][-1L])
} else {
print(GQ.res[gq.nam == "C"])
}
})
}
if(all(g == max(Gs), !critx && any(dim(bicm) > 1))) {
G.ind <- if(any(G.supp, !is.element(method, c("MFA", "MIFA")))) 1L else n.grp == G
Q.ind <- if(any(Q.supp, !is.element(method, c("FA", "MFA")))) 1L else n.fac == Q
if(!is.element(method, c("IFA", "MIFA"))) {
cat(paste0("AIC.mcmc = ", aic.mcmc[G.ind,Q.ind], "\n"))
cat(paste0("BIC.mcmc = ", bic.mcmc[G.ind,Q.ind], "\n"))
}
cat(paste0("AICM = ", aicm[G.ind,Q.ind], "\n"))
cat(paste0("BICM = ", bicm[G.ind,Q.ind], "\n"))
cat(paste0("DIC = ", dic[G.ind,Q.ind], "\n\n"))
}
if(!isTRUE(attr(x, "Nowarn.G"))) { cat("\n"); message(attr(x, "Nowarn.G"))
}
if(!isTRUE(attr(x, "Nowarn.Q"))) {
if(isTRUE(attr(x, "Nowarn.G"))) { cat("\n")}
if(!is.element(method,
c("OMFA", "IMFA")) || plotT.ind) message(attr(x, "Nowarn.Q"))
}
if(plotQ.ind) {
if(!adapt) message("No adaptation took place\n")
forceQg <- attr(x, "ForceQg")
if(attr(GQ.res, "Q.big")) warning(paste0("Q had to be prevented from exceeding its initial value", ifelse(forceQg, " (or exceeding the number of observations in one or more clusters)", ""), ".\nConsider re-running the model with a higher value for 'range.Q'", ifelse(forceQg, " or setting 'forceQg' to FALSE\n", "\n")), call.=FALSE)
}
}
if(m.sw["Z.sw"]) {
if(type == "l") stop("'type' cannot be 'l' for clustering uncertainty plots", call.=FALSE)
plot.x <- as.vector(clust$uncertainty)
if(g == 1 || g == 2) {
graphics::layout(1)
oneG <- 1/G
minG <- 1 - oneG
yax <- unique(c(0, pretty(c(0, minG))))
YAX <- which.min(abs(yax - minG))
yax[YAX] <- minG
yax <- abs(yax[yax < 1])
mind <- !is.null(prf) && !z.miss
}
if(g == 1) {
if(mispal) grDevices::palette(replace(viridis(8L, option="D", alpha=transparency), 2L, "red"))
col.x <- if(mind) replace(rep(5L, n.obs), prf$misclassified, 2L) else c(5L, 2L)[(plot.x >= oneG) + 1L]
if(type != "n") col.x[plot.x == 0] <- NA
graphics::par(mar=c(5.1, 4.1, 4.1, 3.1))
base::plot(plot.x, type=type, ylim=range(yax), col=col.x, yaxt="n", main="Clustering Uncertainty", ylab="Uncertainty", xlab="Observation", pch=ifelse(type == "n", NA, 16), lend=1)
graphics::lines(x=c(0, n.obs), y=c(oneG, oneG), lty=2, col=1)
graphics::axis(2, at=yax, labels=replace(yax, YAX, expression(1 - frac(1, hat(G)))), las=2, cex.axis=0.9, xpd=TRUE)
graphics::axis(2, at=oneG, labels=expression(frac(1, hat(G))), las=2, xpd=TRUE, side=4, xpd=TRUE)
if(type == "n") {
znam <- obs.names
znam[plot.x == 0] <- ""
graphics::text(x=seq_along(plot.x), y=plot.x, znam, col=col.x, cex=0.5)
}
} else if(g == 2) {
graphics::par(mar=c(5.1, 4.1, 4.1, 3.1))
x.ord <- order(plot.x)
x.plot <- plot.x[x.ord]
if(mind) mcO <- which(x.ord %in% prf$misclassified)
base::plot(x.plot, type="n", ylim=c(-max(x.plot)/32, max(yax)), main="Clustering Uncertainty Profile", ylab="Uncertainty", xaxt="n", yaxt="n", xlab="Observations in order of increasing uncertainty")
graphics::lines(x=c(0, n.obs), y=c(0, 0), lty=3, col=grey)
graphics::lines(x.plot)
graphics::points(x.plot, pch=15, cex=if(mind) replace(rep(0.5, n.obs), mcO, 0.75) else 0.5, col=if(mind) replace(rep(1, n.obs), mcO, 3) else 1)
graphics::lines(x=c(0, n.obs), y=c(oneG, oneG), lty=2, col=3)
graphics::axis(2, at=yax, labels=replace(yax, YAX, expression(1 - frac(1, hat(G)))), las=2, cex.axis=0.9, xpd=TRUE)
graphics::axis(2, at=oneG, labels=expression(frac(1, hat(G))), las=2, xpd=TRUE, side=4, xpd=TRUE)
if(mind) {
Nseq <- seq_len(n.obs)
for(i in prf$misclassified) {
x <- Nseq[x.ord == i]
graphics::lines(c(x, x), c(-max(plot.x)/32, plot.x[i]), lty=1, col=3, lend=1)
}
}
} else if(g == 3) {
if(titles) {
graphics::layout(rbind(1, 2), heights=c(1, 6))
graphics::par(mar=c(0, 4.1, 0.5, 2.1))
graphics::plot.new()
graphics::legend("center", legend=bquote({NA >= 1/hat(G)} == 1/.(G)), title="", pch=15, col=3, bty="n", y.intersp=graphics::par()$fin[2L] * 7/5)
graphics::legend("center", legend=c(" "," "), title=expression(bold("Clustering Uncertainty")), bty='n', y.intersp=graphics::par()$fin[2L] * 2/5, cex=graphics::par()$cex.main)
graphics::par(mar=c(5.1, 4.1, 0.5, 2.1))
}
x.plot <- graphics::hist(plot.x, plot=FALSE)
breaks <- if(sum(plot.x != 0)) x.plot$breaks else seq(from=0, to=max(plot.x, 1/G), by=1/G)
cols <- 2L + (breaks >= 1/G)
cols[cols == 2] <- grey
base::plot(x.plot, main="", xlab="Uncertainties", xlim=c(0, 1 - 1/G), col=cols, xaxt="n", ylim=c(0, max(x.plot$counts)), yaxt="n")
graphics::axis(1, at=c(breaks[round(breaks, 1) < min(0.8, 1 - 1/G)], 1 - 1/G), labels=(c(round(breaks[round(breaks, 1) < min(0.8, 1 - 1/G)], 3), expression(1 - frac(1, hat(G))))), las=2, pos=0, cex.axis=0.8)
graphics::axis(2, at=if(sum(plot.x) == 0) c(graphics::axTicks(2), max(x.plot$counts)) else graphics::axTicks(2), las=1, cex.axis=0.8)
} else if(g == 4) {
graphics::par(defpar)
if(titles) graphics::par(mar=c(4.1, 4.1, 4.1, 4.1))
plot.x <- clust$PCM
i.check <- any(!mispal, (!gx && !all(Gs == 4)))
if(brX) {
ilen <- length(brXs)
if(i.check &&
(length(palette) !=
length(brXs))) warning("'breaks' and 'palette' should be the same length if 'breaks' is supplied\n", call.=FALSE, immediate.=TRUE)
} else ilen <- 18L
i.cols <- if(i.check) palette else grDevices::heat.colors(ilen, rev=TRUE)
PCM <- mat2cols(plot.x, cols=i.cols, na.col=graphics::par()$bg, ...)
plot_cols(replace(PCM, plot.x == 0, NA), na.col=graphics::par()$bg, ...)
if(titles) {
graphics::title(main="Posterior Confusion Matrix")
graphics::mtext(side=1, at=Gseq, Gseq, line=1)
graphics::mtext(side=2, at=Gseq, rev(Gseq), line=1, las=1)
graphics::mtext(side=1, "Cluster", line=2)
graphics::mtext(side=2, "Allocation", line=2)
suppressWarnings(heat_legend(plot.x, cols=i.cols, cex.lab=0.8, ...))
}
graphics::box(lwd=2)
}
if(all(g == 5, z.sim)) {
plot.x <- as.matrix(clust$Z.avgsim$z.sim)
perm <- order(clust$MAP)
plot.x <- if((p.ind <- !identical(perm, clust$MAP))) plot.x[perm,perm] else plot.x
plot.x <- t(plot.x[,seq(from=ncol(plot.x), to=1L, by=-1L)])
graphics::par(defpar)
if(titles) graphics::par(mar=c(4.1, 4.1, 4.1, 4.1))
z.check <- any(!mispal, (!gx && !all(Gs == 5)))
if(brX) {
zlen <- length(brXs)
if(z.check &&
(length(palette) !=
length(brXs))) warning("'breaks' and 'palette' should be the same length if 'breaks' is supplied\n", call.=FALSE, immediate.=TRUE)
} else zlen <- 12L
z.col <- if(any(!mispal, (!gx && !all(Gs == 5)))) palette else grDevices::heat.colors(zlen, rev=TRUE)
col.mat <- mat2cols(plot.x, cols=z.col, na.col=graphics::par()$bg, ...)
col.mat[plot.x == 0] <- NA
plot_cols(col.mat, na.col=graphics::par()$bg, ...)
if(titles) {
graphics::title(main=list("Average Similarity Matrix"))
graphics::axis(1, at=n.obs/2, labels=paste0("Observation 1:N", if(p.ind) " (Reordered)"), tick=FALSE)
graphics::axis(2, at=n.obs/2, labels=paste0("Observation 1:N", if(p.ind) " (Reordered)"), tick=FALSE)
suppressWarnings(heat_legend(data=plot.x, cols = z.col, cex.lab=0.8, ...))
}
graphics::box(lwd=2)
if(p.ind) message("Rows and columns of similarity matrix reordered to correspond to MAP clustering\n")
}
if(g == min(Gs)) {
if(all(labelmiss,
z.miss)) {
cat("clustering table :")
print(table(clust$MAP), row.names=FALSE)
cat("\n")
}
if(g <= 3 &&
!is.null(prf)) {
class(prf) <- "listof"
print(prf)
}
}
}
if(m.sw["P.sw"]) {
plot.x <- switch(EXPR=param,
means= if(show.last) x$Means$last.mu else x$Means$post.mu,
uniquenesses= if(show.last) x$Uniquenesses$last.psi else x$Uniquenesses$post.psi,
loadings = if(show.last) x$Loadings$last.load[[g]] else x$Loadings$post.load[[g]])
plot.x <- switch(EXPR=param, loadings=plot.x[,rev(seq_len(Q)), drop=FALSE], plot.x)
x.plot <- rowRanges(plot.x, na.rm=TRUE)
plot.x <- if(param == "uniquenesses" && is.element(uni.type, c("isotropic", "single"))) plot.x else apply(plot.x, 2L, function(x) (x - min(x, na.rm=TRUE))/(max(x, na.rm=TRUE) - min(x, na.rm=TRUE)))
varnam <- paste0(toupper(substr(param, 1L, 1L)), substr(param, 2L, nchar(param)))
if(any(grp.ind, param == "loadings")) {
if(mispal) grDevices::palette(viridis(max(switch(EXPR=param, loadings=Q, G), 2L), option="D", alpha=transparency))
graphics::layout(rbind(1, 2), heights=c(9, 1))
graphics::par(mar=c(3.1, 4.1, 4.1, 2.1))
}
jitcol <- switch(EXPR=param, loadings=Q, G)
jit.x <- G == 1 || (param == "uniquenesses" && uni.type == "constrained")
type.u <- ifelse(type.x, switch(EXPR=param, uniquenesses=switch(EXPR=uni.type, constrained=, unconstrained="p", single=, isotropic="l"), "p"), type)
if(!is.element(type.u,
c("l", "p"))) stop("Invalid 'type' for parallel coordinates plot", call.=FALSE)
graphics::matplot(seq_len(n.var) + if(!jit.x) switch(EXPR=type.u, p=matrix(stats::rnorm(jitcol * n.var, 0, min(0, max(1e-02, 1/n.var^2))), nrow=n.var, ncol=jitcol), 0) else 0,
plot.x, type=type.u, pch=15, col=switch(EXPR=param, loadings=rev(seq_len(Q)), seq_len(G)), xlab=switch(EXPR=uni.type, constrained=, unconstrained="Variable", ""),
lty=1, ylab=paste0(switch(EXPR=param, uniquenesses=switch(EXPR=uni.type, constrained=, unconstrained="Standardised ", ""), "Standardised "), varnam),
xaxt="n", bty="n", main=paste0("Parallel Coordinates - ", post.last, ": ", varnam, ifelse(all(grp.ind, param == "loadings"), paste0("\nCluster ", g), "")))
graphics::axis(1, at=seq_len(n.var), labels=if(titles && n.var < 100) rownames(plot.x) else character(n.var), cex.axis=0.5, tick=FALSE, line=-0.5)
for(i in seq_len(n.var)) {
graphics::lines(c(i, i), c(0, 1), col=grey)
if(titles && n.var < 100) {
graphics::text(c(i, i), c(switch(EXPR=param, uniquenesses=switch(EXPR=uni.type, single=, isotropic=graphics::par("usr")[3L], 0), 0),
switch(EXPR=param, uniquenesses=switch(EXPR=uni.type, single=, isotropic=graphics::par("usr")[4L], 1), 1)),
labels=format(x.plot[i,], digits=3), xpd=NA, offset=0.3, pos=c(1, 3), cex=0.5)
}
}
if(any(grp.ind, param == "loadings")) {
graphics::par(mar=c(0, 0, 0, 0))
graphics::plot.new()
Xp <- switch(EXPR=param, loadings=Q, G)
Xseq <- seq_len(Xp)
tmp <- if(Xp > 5) unlist(lapply(Xseq, function(x) c(Xseq[x], Xseq[x + ceiling(Xp/2)])))[Xseq] else Xseq
ltxt <- paste0(switch(EXPR=param, loadings="Factor", "Cluster"), tmp)
lcol <- Xseq[tmp]
graphics::legend("center", pch=15, col=lcol, legend=ltxt, ncol=if(Xp > 5) ceiling(Xp/2) else Xp, bty="n", cex=max(0.7, 1 - 0.03 * Xp))
}
}
if(m.sw["E.sw"]) {
Pind <- is.element(errX, c("All", "PPRE")) && g == 1
Hind <- is.element(errX, c("All", "PPRE")) && g == 2
Cind <- (errX == "All" && g == 3) || !any(Pind, Hind)
error <- x$Error
if(Pind) {
graphics::boxplot(error$PPRE, col=palette[length(palette)])
if(titles) {
graphics::title(main=list(paste0("Posterior Predictive Reconstruction Error\n(using the ", switch(EXPR=toupper(attr(error, "Norm")), O=, "1"="One", I="Infinity", "F"="Frobenius", M="Maximum", "2"="Spectral"), " norm)")))
graphics::mtext("PPRE", side=2, line=2)
graphics::mtext(method, side=1, line=1)
}
indp <- switch(EXPR=errX, All=7L, PPRE=1L)
print(c(error$CIs[indp,], Mean=unname(error$Avg[indp]), Median=unname(error$Median[indp]), "Last Valid Sample"=unname(error$Final[indp]))[c(1L, 3L:5L, 2L)])
}
if(Hind) {
if(titles) {
graphics::layout(rbind(1, 2), heights=c(9, 1))
graphics::par(mar=c(3.1, 4.1, 4.1, 2.1))
}
ci.x <- error$RepCounts[[ind]]
dat.x <- error$DatCounts[[ind]]
dat.x[dat.x == 0] <- NA
suppressWarnings(.plot_CI(PPRE <- graphics::barplot(dat.x, ylim=c(0L, max(ci.x[3L,], dat.x, na.rm=TRUE)), col=grey),
ci.x[2L,], li=ci.x[1L,], ui=ci.x[3L,], add=TRUE, gap=TRUE, slty=2, scol="red", pch=15))
if(titles) {
graphics::axis(1, at=c(PPRE[1L] - 0.5, PPRE[-1L] - 0.6, PPRE[length(PPRE)] + 0.5), round(error$Breaks[[ind]], 2))
graphics::title(main=list(paste0(var.names[ind], " Variable")))
graphics::par(mar=c(0, 0, 0, 0))
graphics::plot.new()
ltxt <- c("Data Bin Counts", "Median Replicate Bin Counts")
temp <- graphics::legend("center", legend=character(2L), text.width=max(graphics::strwidth(ltxt)), ncol=1, bty="n", cex=0.75, pt.cex=1.25, fill=c(grey, "black"), xjust=0.5)
graphics::text(temp$rect$left + temp$rect$w * 0.55, temp$text$y, ltxt)
}
}
if(Cind && is.element(errX, c("All", "Covs"))) {
post.x <- error$Post
plot.x <- switch(EXPR=errX, All=error$Median[-7L], error$Median)
last.x <- switch(EXPR=errX, All=error$Final[-7L], error$Final)
if(titles) {
graphics::layout(rbind(1, 2), heights=c(9, 1))
graphics::par(mar=c(3.1, 4.1, 4.1, 2.1))
}
col.e <- seq_along(plot.x)
if(mispal) grDevices::palette(viridis(length(col.e) + 2L, option="D", alpha=transparency)[-seq_len(2L)])
ci.x <- switch(EXPR=errX, All=error$CIs[-7L,], error$CIs)
erange <- pretty(c(min(c(ci.x[,1L], plot.x, post.x)), max(c(ci.x[,2L], plot.x, post.x))))
x.plot <- graphics::barplot(plot.x, col=col.e, ylim=erange[c(1L, length(erange))], main="", yaxt="n", ylab="Error")
graphics::axis(2, at=erange, labels=erange)
e.col <- grDevices::adjustcolor(c("red", "darkorchid"), alpha.f=transparency)
graphics::points(x=x.plot, post.x, pch=15, col=e.col[1L], cex=2, xpd=TRUE)
graphics::points(x=x.plot, last.x, pch=18, col=e.col[2L], cex=2, xpd=TRUE)
suppressWarnings(.plot_CI(x.plot, plot.x, li=ci.x[,1L], ui=ci.x[,2L], add=TRUE, gap=TRUE, slty=3, lwd=3, scol=grey, pch=19))
if(titles) {
graphics::title(main=list("Covariance Error Metrics"))
graphics::par(mar=c(0, 0, 0, 0))
graphics::plot.new()
ltxt <- c("Median Error Metrics", "Error Metrics Evaluated at Posterior Mean", "Error Metrics Evaluated at Last Valid Sample")
temp <- graphics::legend("center", legend=character(3L), text.width=max(graphics::strwidth(ltxt)), ncol=1, bty="n", cex=0.75, pt.cex=1.25, pch=c(19, 15, 18), col=c("black", e.col), xjust=0.5)
graphics::text(temp$rect$left + temp$rect$w * 0.55, temp$text$y, ltxt)
}
metric <- rbind(plot.x, post.x, last.x)
rownames(metric) <- c("Medians", "Evaluated at Posterior Mean", "Evaluated at Last Valid Sample")
print(metric)
} else if(Cind) {
graphics::par(defpar)
plot.x <- error$Post
col.e <- seq_along(plot.x)
if(mispal) grDevices::palette(viridis(length(col.e) + 2L, option="D", alpha=transparency)[-seq_len(2L)])
graphics::barplot(plot.x, col=col.e, main="", ylab="Error")
print(provideDimnames(matrix(plot.x, nrow=1), base=list("Evaluated at Posterior Mean", names(plot.x))))
}
}
if(m.sw["C.sw"]) {
grDevices::palette(tmp.pal)
if(!all.ind) {
partial <- FALSE
graphics::par(mai=c(1.25, 1, 0.75, 0.5), mfrow=c(1, 2), oma=c(0, 0, 2, 0))
}
if(param == "means") {
plot.x <- x$Means$mus[[g]]
if(!partial) {
stats::acf(plot.x[ind,], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all.ind, paste0(":\n", var.names[ind], " Variable"), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind,], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(partial, paste0(":\n", var.names[ind], " Variable"), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Means - ", ifelse(grp.ind, paste0("Cluster ", g, ":\n"), ""), var.names[ind], " Variable")), outer=TRUE)
}
}
if(param == "scores") {
plot.x <- x$Scores$eta
if(!partial) {
stats::acf(plot.x[ind[1L],ind[2L],], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all.ind, paste0(":\n", "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L]), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind[1L],ind[2L],], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(partial, paste0(":\n", "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L]), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Scores - ", "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L])), outer=TRUE)
}
}
if(param == "loadings") {
plot.x <- x$Loadings$lmats[[g]]
if(!partial) {
stats::acf(plot.x[ind[1L],ind[2L],], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all.ind, paste0(":\n", var.names[ind[1L]], " Variable, Factor ", ind[2L]), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind[1L],ind[2L],], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(partial, paste0(":\n", var.names[ind[1L]], " Variable, Factor ", ind[2L]), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Loadings - ", ifelse(grp.ind, paste0("Cluster ", g, ":\n"), ""), var.names[ind[1L]], " Variable, Factor ", ind[2L])), outer=TRUE)
}
}
if(param == "uniquenesses") {
plot.x <- x$Uniquenesses$psis[[g]]
if(!partial) {
stats::acf(plot.x[ind,], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all.ind, switch(EXPR=uni.type, constrained=, unconstrained=paste0(":\n", var.names[ind], " Variable"), ""), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind,], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(partial, switch(EXPR=uni.type, constrained=, unconstrained=paste0(":\n", var.names[ind], " Variable"), ""), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Uniquenesses - ", ifelse(grp.ind, paste0("Cluster ", g, ":\n"), ""), switch(EXPR=uni.type, constrained=, unconstrained=paste0(var.names[ind], " Variable"), ""))), outer=TRUE)
}
}
if(param == "pis") {
plot.x <- clust$pi.prop
if(!partial) {
stats::acf(plot.x[ind,], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all(all.ind, matx), paste0(" - Cluster ", ind), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind,], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(all(all.ind, matx), paste0(" - Cluster ", ind), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Mixing Proportions - Cluster ", ind)), outer=TRUE)
}
}
if(is.element(param, c("alpha", "discount"))) {
plot.x <- switch(EXPR=param, alpha=clust$Alpha$alpha, discount=as.vector(clust$Discount$discount))
if(switch(EXPR=param, alpha=clust$Alpha$alpha.rate, discount=clust$Discount$disc.rate) == 0 ||
((is.null(attr(x, "Discount")) || attr(x, "Discount") >= 0) && length(unique(round(plot.x, min(.ndeci(plot.x))))) == 1)) {
warning(paste0(switch(EXPR=param, alpha="Acceptance", discount=ifelse(attr(x, "Kappa0"), "Acceptance", "Mutation")), " rate too low: can't plot ", ifelse(all.ind, ifelse(partial, "partial-", "auto-"), ""), "correlation function", ifelse(all.ind, "\n", "s\n")), call.=FALSE)
next
}
if(!partial) {
stats::acf(plot.x, main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF")))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x, main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF")))
if(all(!all.ind, titles)) graphics::title(main=list(paste0(switch(EXPR=param, alpha="Alpha", discount="Discount"))), outer=TRUE)
}
}
}
if(all(all.ind, titles)) graphics::title(ifelse(param != "pis", paste0(toupper(substr(param, 1L, 1L)), substr(param, 2L, nchar(param)),
ifelse(all(grp.ind, !is.element(param, c("scores", "pis", "alpha", "discount"))), paste0(" - Cluster ", g), "")),
paste0("Mixing Proportions", ifelse(matx, "", paste0(" - Cluster ", ind)))), outer=TRUE)
if(isTRUE(msgx)) .ent_exit(opts = defopt)
}
}
# Loadings Heatmaps
#' Convert a numeric matrix to colours
#'
#' Converts a matrix to a hex colour code representation for plotting using \code{\link{plot_cols}}. Used internally by \code{\link{plot.Results_IMIFA}} for plotting posterior mean loadings heatmaps.
#' @param mat Either a matrix or, when \code{compare} is \code{TRUE}, a list of matrices.
#' @param cols The colour palette to be used. The default palette uses \code{\link[viridisLite]{viridis}}. Will be checked for validity by \code{\link{is.cols}}.
#' @param compare Logical switch used when desiring comparable colour representations (usually for comparable heat maps) across multiple matrices. Ensures plots will be calibrated to a common colour scale so that, for instance, the colour on the heat map of an entry valued at 0.7 in Matrix A corresponds exactly to the colour of a similar value in Matrix B. When \code{TRUE}, \code{mat} must be supplied as a list of matrices, which must have either the same number of rows, or the same number of columns.
#' @param byrank Logical indicating whether to convert the matrix itself or the sample ranks of the values therein. Defaults to \code{FALSE}.
#' @param breaks Number of gradations in colour to use. Defaults to \code{length(cols)}. Alternatively, a vector of breakpoints for use with \code{\link[base]{cut}}.
#' @param na.col Colour to be used to represent missing data. Will be checked for validity by \code{\link{is.cols}}.
#' @param transparency A factor in [0, 1] modifying the opacity for overplotted lines. Defaults to 1 (i.e. no transparency). Only relevant when \code{cols} is not supplied, otherwise the supplied \code{cols} must already be adjusted for transparency.
#' @param ... Catches unused arguments.
#'
#' @return A matrix of hex colour code representations, or a list of such matrices when \code{compare} is \code{TRUE}.
#' @export
#' @keywords plotting
#' @importFrom viridisLite "viridis"
#'
#' @seealso \code{\link{plot_cols}}, \code{\link{heat_legend}}, \code{\link{is.cols}}, \code{\link[base]{cut}}
#'
#' @usage
#' mat2cols(mat,
#' cols = NULL,
#' compare = FALSE,
#' byrank = FALSE,
#' breaks = NULL,
#' na.col = "#808080FF",
#' transparency = 1,
#' ...)
#' @examples
#' # Generate a colour matrix using mat2cols()
#' mat <- matrix(rnorm(100), nrow=10, ncol=10)
#' mat[2,3] <- NA
#' cols <- heat.colors(12)[12:1]
#' (matcol <- mat2cols(mat, cols=cols))
#'
#' # Use plot_cols() to visualise the colours matrix
#' par(mar=c(5.1, 4.1, 4.1, 3.1))
#' plot_cols(matcol)
#'
#' # Add a legend using heat_legend()
#' heat_legend(mat, cols=cols); box(lwd=2)
#'
#' # Try comparing heat maps of multiple matrices
#' mat1 <- cbind(matrix(rnorm(100, sd=c(4,2)), nr=50, nc=2, byrow=TRUE), 0.1)
#' mat2 <- cbind(matrix(rnorm(150, sd=c(7,5,3)), nr=50, nc=3, byrow=TRUE), 0.1)
#' mat3 <- cbind(matrix(rnorm(50, sd=1), nr=50, nc=1, byrow=TRUE), 0.1)
#' mats <- list(mat1, mat2, mat3)
#' colmats <- mat2cols(mats, cols=cols, compare=TRUE)
#' par(mfrow=c(2, 3), mar=c(1, 2, 1, 2))
#'
#' # Use common palettes (top row)
#' plot_cols(colmats[[1]]); heat_legend(range(mats), cols=cols); box(lwd=2)
#' plot_cols(colmats[[2]]); heat_legend(range(mats), cols=cols); box(lwd=2)
#' plot_cols(colmats[[3]]); heat_legend(range(mats), cols=cols); box(lwd=2)
#'
#' # Use uncommon palettes (bottom row)
#' plot_cols(mat2cols(mat1, cols=cols)); heat_legend(range(mat1), cols=cols); box(lwd=2)
#' plot_cols(mat2cols(mat2, cols=cols)); heat_legend(range(mat2), cols=cols); box(lwd=2)
#' plot_cols(mat2cols(mat3, cols=cols)); heat_legend(range(mat3), cols=cols); box(lwd=2)
mat2cols <- function(mat, cols = NULL, compare = FALSE, byrank = FALSE, breaks = NULL, na.col = "#808080FF", transparency = 1, ...) {
if(isTRUE(compare)) {
if(!inherits(mat, "list") &&
!all(vapply(mat, function(x)
(is.matrix(x) ||
is.data.frame(x)) &&
(is.numeric(x) ||
is.logical(x)),
logical(1L)))) stop("'mat' must be a list of numeric/logical matrices or data.frames when 'compare' is TRUE", call.=FALSE)
nc <- vapply(mat, ncol, numeric(1L))
nr <- vapply(mat, nrow, numeric(1L))
uc <- unique(nc)
ur <- unique(nr)
if(length(ur) == 1) {
mat <- do.call(cbind, mat)
spl <- matrix(rep(seq_along(nc), nc), nrow=ur, ncol=ncol(mat), byrow=TRUE)
} else if(length(uc) == 1) {
mat <- do.call(rbind, mat)
spl <- matrix(rep(seq_along(nr), nr), nrow=nrow(mat), ncol=uc, byrow=FALSE)
} else stop("Matrices must have either the same number of rows or the same number of columns", call.=FALSE)
} else if(!is.matrix(mat) ||
(!is.numeric(mat) &&
!is.logical(mat))) stop("'mat' must be a numeric/logical matrix when 'compare' is FALSE", call.=FALSE)
if(missing(cols)) {
trx <- grDevices::dev.capabilities()$semiTransparency
xtr <- missing(transparency)
if(length(transparency) != 1 &&
any(!is.numeric(transparency),
(transparency < 0 ||
transparency > 1))) stop("'transparency' must be a single number in [0, 1]", call.=FALSE)
if(transparency != 1 && !trx) {
if(!xtr) message("'transparency' not supported on this device\n")
transparency <- 1
}
cols <- viridis(30L, option="B", alpha=transparency)
}
if(!all(is.cols(cols))) stop("Invalid 'cols' colour palette supplied", call.=FALSE)
if(any(!is.logical(byrank),
length(byrank) != 1)) stop("'byrank' must be a single logical indicator", call.=FALSE)
breaks <- if(missing(breaks)) length(cols) else breaks
m1 <- if(isTRUE(byrank)) rank(mat) else mat
facs <- cut(as.numeric(m1), breaks, include.lowest=TRUE)
answer <- matrix(cols[as.numeric(facs)], nrow=nrow(mat), ncol=ncol(mat))
if(any((NM <- is.na(mat)))) {
if(length(na.col != 1) &&
!is.cols(na.col)) stop("'na.col' must be a valid colour in the presence of missing data", call.=FALSE)
answer <- replace(answer, NM, na.col)
}
rownames(answer) <- rownames(mat)
colnames(answer) <- colnames(mat)
if(isTRUE(compare)) {
splans <- split(answer, spl)
answer <- if(length(ur) == 1) lapply(splans, matrix, nrow=nr) else lapply(splans, matrix, ncol=nc)
}
answer
}
# Colour Checker
#' Check for Valid Colours
#'
#' Checks if the supplied vector contains valid colours.
#' @param cols A vector of colours, usually as a character string.
#'
#' @return A logical vector of length \code{length(cols)} which is \code{TRUE} for entries which are valid colours and \code{FALSE} otherwise.
#' @keywords utility
#' @export
#'
#' @examples
#' all(is.cols(1:5))
#'
#' all(is.cols(heat.colors(30)))
#'
#' any(!is.cols(c("red", "green", "aquamarine")))
is.cols <- function(cols) {
vapply(cols, function(x) { tryCatch(is.matrix(grDevices::col2rgb(x)), error = function(e) FALSE) }, logical(1L))
}
# Heatmap Legends
#' Add a colour key legend to heatmap plots
#'
#' Using only base graphics, this function appends a colour key legend for heatmaps produced by, for instance, \code{\link{plot_cols}} or \code{\link[graphics]{image}}.
#' @param data Either the data with which the heatmap was created or a vector containing its minimum and maximum values. Missing values are ignored.
#' @param cols The colour palette used when the heatmap was created. By default, the same \code{\link[viridisLite]{viridis}} default as in \code{\link{mat2cols}} is used. Will be checked for validity by \code{\link{is.cols}}.
#' @param breaks Optional argument giving the break-points for the axis labels.
#' @param cex.lab Magnification of axis annotation, indicating the amount by which plotting text and symbols should be scaled relative to the default of 1.
#' @param ... Catches unused arguments.
#'
#' @return Modifies an existing plot by adding a colour key legend.
#' @export
#' @keywords plotting
#'
#' @seealso \code{\link[graphics]{image}}, \code{\link{plot_cols}}, \code{\link{mat2cols}}, \code{\link{is.cols}}
#' @usage
#' heat_legend(data,
#' cols = NULL,
#' breaks = NULL,
#' cex.lab = 1,
#' ...)
#' @examples
#' # Generate a matrix and plot it with a legend
#' data <- matrix(rnorm(50), nrow=10, ncol=5)
#' cols <- heat.colors(12)[12:1]
#' par(mar=c(5.1, 4.1, 4.1, 3.1))
#'
#' plot_cols(mat2cols(data, col=cols))
#' heat_legend(data, cols); box(lwd=2)
heat_legend <- function(data, cols = NULL, breaks = NULL, cex.lab = 1, ...) {
if(length(cex.lab) > 1 || (!is.numeric(cex.lab) ||
cex.lab <= 0)) stop("Invalid 'cex.lab' supplied", call.=FALSE)
if(!is.numeric(data)) stop("'data' must be numeric", call.=FALSE)
if(missing(cols)) {
cols <- viridis(30L, option="B", alpha=1L)
} else if(!all(is.cols(cols))) stop("Invalid 'cols' colour palette supplied", call.=FALSE)
bx <- graphics::par("usr")
xpd <- graphics::par()$xpd
box.cx <- c(bx[2L] + (bx[2L] - bx[1L])/1000, bx[2L] + (bx[2L] - bx[1L])/1000 + (bx[2L] - bx[1L])/50)
box.cy <- c(bx[3L], bx[3L])
box.sy <- (bx[4L] - bx[3L]) / length(cols)
xx <- rep(box.cx, each = 2L)
graphics::par(xpd = TRUE)
for(i in seq_along(cols)) {
yy <- c(box.cy[1L] + (box.sy * (i - 1L)),
box.cy[1L] + (box.sy * (i)),
box.cy[1L] + (box.sy * (i)),
box.cy[1L] + (box.sy * (i - 1L)))
graphics::polygon(xx, yy, col = cols[i], border = cols[i])
}
graphics::par(new = TRUE)
yrange <- range(data, na.rm = TRUE)
base::plot(0, 0, type = "n", ylim = yrange, yaxt = "n", ylab = "", xaxt = "n", xlab = "", frame.plot = FALSE)
if(is.null(breaks)) {
graphics::axis(side = 4, las = 2, tick = FALSE, line = 0.1, cex.axis = cex.lab)
} else {
if(length(breaks) !=
length(cols)) warning("'breaks' and 'cols' should be the same length if 'breaks' is supplied\n", call.=FALSE, immediate.=TRUE)
graphics::axis(side = 4, las = 2, tick = FALSE, line = 0.1, cex.axis = cex.lab,
at=seq(min(yrange), max(yrange), length.out = length(breaks)), labels=round(breaks, 2))
}
suppressWarnings(graphics::par(xpd = xpd))
}
# Prior No. Clusters (DP & PY)
#' Plot Pitman-Yor / Dirichlet Process Priors
#'
#' Plots the prior distribution of the number of clusters under a Pitman-Yor / Dirichlet process prior, for a sample of size \code{N} at given values of the concentration parameter \code{alpha} and optionally also the \code{discount} parameter. Useful for soliciting sensible priors (or fixed values) for \code{alpha} or \code{discount} under the \code{"IMFA"} and \code{"IMIFA"} methods for \code{\link{mcmc_IMIFA}}.
#' @param N The sample size.
#' @param alpha The concentration parameter. Must be specified and must be strictly greater than \code{-discount}. The case \code{alpha=0} is accommodated. When \code{discount} is negative \code{alpha} must be a positive integer multiple of \code{abs(discount)}.
#' @param discount The discount parameter for the Pitman-Yor process. Must be less than 1, but typically lies in the interval [0, 1). Defaults to 0 (i.e. the Dirichlet process). When \code{discount} is negative \code{alpha} must be a positive integer multiple of \code{abs(discount)}.
#' @param show.plot Logical indicating whether the plot should be displayed (default = \code{TRUE}).
#' @param type The type of plot to be drawn, as per \code{\link{plot}}. Defaults to \code{"h"}: histogram-like vertical lines.
#'
#' @details All arguments are vectorised. Users can also consult \code{\link{G_expected}}, \code{\link{G_variance}}, and \code{\link{G_calibrate}} in order to solicit sensible priors.
#' @note The actual density values are returned invisibly. Therefore, they can be visualised as desired by the user even if \code{show.plot} is \code{FALSE}.
#'
#' @return A plot of the prior distribution if \code{show.plot} is \code{TRUE}. Density values are returned invisibly. Note that the density values may not strictly sum to one in certain cases, as values small enough to be represented as zero may well be returned.
#' @export
#' @keywords plotting
#' @seealso \code{\link{G_moments}}, \code{\link[Rmpfr]{Rmpfr}}
#'
#' @note Requires use of the \code{\link[Rmpfr]{Rmpfr}} and \code{gmp} libraries; may encounter difficulty and slowness for large \code{N}, especially with non-zero \code{discount} values. Despite the high precision arithmetic used, the functions can be unstable for small values of \code{discount}.
#'
#' @author Keefe Murphy - <\email{keefe.murphy@@mu.ie}>
#' @references De Blasi, P., Favaro, S., Lijoi, A., Mena, R. H., Prunster, I., and Ruggiero, M. (2015) Are Gibbs-type priors the most natural generalization of the Dirichlet process?, \emph{IEEE Transactions on Pattern Analysis and Machine Intelligence}, 37(2): 212-229.
#' @usage
#' G_priorDensity(N,
#' alpha,
#' discount = 0,
#' show.plot = TRUE,
#' type = "h")
#' @examples
#' # Plot Dirichlet process priors for different values of alpha
#' (DP <- G_priorDensity(N=50, alpha=c(3, 10, 25)))
#'
#' # Non-zero discount requires loading the "Rmpfr" library
#' # require("Rmpfr")
#'
#' # Verify that these alpha/discount values produce Pitman-Yor process priors with the same mean
#' # G_expected(N=50, alpha=c(19.23356, 6.47006, 1), discount=c(0, 0.47002, 0.7300045))
#'
#' # Now plot them to examine tail behaviour as discount increases
#' # alpha <- c(19.23356, 6.47006, 1)
#' # discount <- c(0, 0.47002, 0.7300045)
#' # (PY <- G_priorDensity(N=50, alpha=alpha, discount=discount, type="l"))
#'
#' #' # Other special cases of the PYP are also facilitated
#' # G_priorDensity(N=50, alpha=c(alpha, 27.1401, 0),
#' # discount=c(discount, -27.1401/100, 0.8054448), type="b")
G_priorDensity <- function(N, alpha, discount = 0, show.plot = TRUE, type = "h") {
igmp <- isNamespaceLoaded("Rmpfr")
mpfrind <- suppressMessages(requireNamespace("Rmpfr", quietly=TRUE)) && .version_above("gmp", "0.5-4")
if(isFALSE(mpfrind)) { stop("'Rmpfr' package not installed", call.=FALSE)
} else if(isFALSE(igmp)) {
on.exit(.detach_pkg("Rmpfr"))
on.exit(.detach_pkg("gmp"), add=TRUE)
}
oldpal <- grDevices::palette()
on.exit(grDevices::palette(oldpal), add=isFALSE(mpfrind))
defpar <- suppressWarnings(graphics::par(no.readonly=TRUE))
defpar$new <- FALSE
suppressWarnings(graphics::par(pty="m"))
on.exit(suppressWarnings(graphics::par(defpar)), add=TRUE)
defopt <- options()
options(expressions = 500000)
on.exit(suppressWarnings(options(defopt)), add=TRUE)
if(any(c(length(N),
length(show.plot)) > 1)) stop("Arguments 'N' and 'show.plot' must be strictly of length 1", call.=FALSE)
if(!is.logical(show.plot)) stop("'show.plot' must be a single logical indicator", call.=FALSE)
if(isTRUE(show.plot)) {
if(length(type) > 1 ||
!is.character(type) ||
nchar(type) > 1) stop("'type' must be a single character", call.=FALSE)
if(!is.element(type,
c("p", "l", "b", "c", "o",
"h", "s", "S", "n"))) stop("Invalid 'type'", call.=FALSE)
}
max.len <- max(length(alpha), length(discount))
if(max.len > 10) stop("Can't plot more than ten distributions simultaneously", call.=FALSE)
if(!is.element(length(alpha),
c(1, max.len))) stop("'alpha' must be of length 1 or length(discount)", call.=FALSE)
if(!is.element(length(discount),
c(1, max.len))) stop("'discount' must be of length 1 or length(alpha)", call.=FALSE)
if(!all(is.numeric(discount), is.numeric(alpha),
is.numeric(N))) stop("'N', 'alpha', and 'discount' inputs must be numeric", call.=FALSE)
if(any(discount >= 1)) stop("'discount' must be less than 1", call.=FALSE)
if(any(discount > 0 &
alpha <= - discount)) stop("'alpha' must be strictly greater than -discount", call.=FALSE)
if(any(discount < 0 &
(alpha <= 0 |
!.IntMult(alpha, discount)))) stop("'alpha' must be a positive integer multiple of 'abs(discount)' when 'discount' is negative", call.=FALSE)
if(any(alpha == 0 &
discount <= 0)) stop("'discount' must be strictly positive when 'alpha'=0", call.=FALSE)
if(length(alpha) != max.len) {
alpha <- rep(alpha, max.len)
}
if(length(discount) != max.len) {
discount <- rep(discount, max.len)
}
rx <- matrix(0, nrow=N, ncol=max.len)
Nseq <- seq_len(N)
Nsq2 <- Rmpfr::mpfr(Nseq, precBits=256)
for(i in seq_len(max.len)) {
alphi <- Rmpfr::mpfr(alpha[i], precBits=256)
disci <- Rmpfr::mpfr(discount[i], precBits=256)
if(disci == 0) {
vnk <- exp(Nsq2 * log(alphi) - log(Rmpfr::pochMpfr(alphi, N)))
rx[,i] <- gmp::asNumeric(abs(vnk * Rmpfr::.bigz2mpfr(gmp::Stirling1.all(N))))
} else {
if(disci > 0) {
vnk <- c(Rmpfr::mpfr(0, precBits=256), cumsum(log(alphi + Nseq[-N] * disci))) -
log(Rmpfr::pochMpfr(alphi + 1, N - 1L)) - Nsq2 * log(disci)
} else {
m <- as.integer(alphi/abs(disci))
mn <- min(m, N)
seqN <- seq_len(mn - 1L)
vnk <- c(c(Rmpfr::mpfr(0, precBits=256), cumsum(log(m - seqN)) + seqN * log(abs(disci))) -
log(Rmpfr::pochMpfr(alphi + 1, N - 1L)) - c(seqN, mn) * log(abs(disci)), rep(-Inf, N - mn))
}
lnkd <- lapply(Nseq, function(g) Rmpfr::sumBinomMpfr(g, f=function(k) Rmpfr::pochMpfr(-k * disci, N), n0=1))
rx[,i] <- gmp::asNumeric(exp(vnk - lfactorial(Nsq2)) * abs(Rmpfr::mpfr2array(unlist(lnkd), dim=N)))
#lnkd <- Rmpfr::sapplyMpfr(Nseq, function(g) Rmpfr::sumBinomMpfr(g, f=function(k) Rmpfr::pochMpfr(-k * disci, N), n0=1))
#rx[,i] <- gmp::asNumeric(exp(vnk - lfactorial(Nsq2)) * abs(lnkd))
}
}
if(isTRUE(show.plot)) {
if(max.len > 1) {
cols <- seq(from=2L, to=max.len + 1L)
grDevices::palette("default")
grDevices::palette(grDevices::adjustcolor(cols, alpha.f=ifelse(grDevices::dev.capabilities()$semiTransparency && max.len > 1, 0.75, 1)))
graphics::matplot(x=seq_len(N), y=rx, type=type, col=cols - 1L, xlab="Clusters", ylim=c(0, max(rx)), ylab="Density", lend=1, xaxt="n",
pch=19, main=paste0("Prior Distribution of G\nN=", N), lwd=seq(3L, 1L, length.out=max.len), lty=seq_len(2L))
} else {
base::plot(x=seq_len(N), y=rx, type=type, xlab="Clusters", ylim=c(0, max(rx)), ylab="Density",
lend=1, pch=19, main=paste0("Prior Distribution of G\nN=", N), lwd=2L, lty=1L, xaxt="n")
}
ax <- pretty(seq_len(N))
ax <- replace(ax, ax == 0, 1L)
graphics::axis(1, at=ax, labels=ax)
}
invisible(if(max.len == 1) drop(rx) else rx)
}
#' Plots a matrix of colours
#'
#' Plots a matrix of colours as a heat map type image or as points. Intended for joint use with \code{mat2cols}.
#' @param cmat A matrix of valid colours, with missing values coded as \code{NA} allowed. Vectors should be supplied as matrices with 1 row or column, as appropriate.
#' @param na.col Colour used for missing \code{NA} entries in \code{cmat}.
#' @param ptype Switch controlling output as either a heat map \code{"image"} (the default) or as \code{"points"}.
#' @param border.col Colour of border drawn around the plot.
#' @param dlabels,rlabels,clabels Vector of labels for the diagonals, rows, and columns, respectively.
#' @param pch Point type used when \code{ptype="points"}.
#' @param cex Point cex used when \code{ptype="points"}.
#' @param label.cex Govens cex parameter used for labels.
#' @param ... Further graphical parameters.
#'
#' @return Either an \code{"image"} or \code{"points"} type plot of the supplied colours.
#' @keywords plotting
#' @export
#'
#' @seealso \code{\link{mat2cols}}, \code{\link[graphics]{image}}, \code{\link{heat_legend}}, \code{\link{is.cols}}
#' @usage
#' plot_cols(cmat,
#' na.col = "#808080FF",
#' ptype = c("image", "points"),
#' border.col = "#808080FF",
#' dlabels = NULL,
#' rlabels = FALSE,
#' clabels = FALSE,
#' pch = 15,
#' cex = 3,
#' label.cex = 0.6,
#' ...)
#' @examples
#' # Generate a colour matrix using mat2cols()
#' mat <- matrix(rnorm(100), nrow=10, ncol=10)
#' mat[2,3] <- NA
#' cols <- heat.colors(12)[12:1]
#' (matcol <- mat2cols(mat, cols=cols))
#'
#' # Use plot_cols() to visualise the colours matrix
#' par(mar=c(5.1, 4.1, 4.1, 3.1))
#' plot_cols(matcol)
#'
#' # Add a legend using heat_legend()
#' heat_legend(mat, cols=cols); box(lwd=2)
#'
#' # Replace colour of exact zero entries:
#' # Often important to call mat2cols() first (to include 0 in the cuts),
#' # then replace relevant entries with NA for plot_cols(), i.e.
#' mat[2,3] <- 0
#' matcol2 <- mat2cols(mat, cols=cols)
#' plot_cols(replace(matcol2, mat == 0, NA), na.col="blue")
#' heat_legend(mat, cols=cols); box(lwd=2)
plot_cols <- function(cmat, na.col = "#808080FF", ptype = c("image", "points"), border.col = "#808080FF",
dlabels = NULL, rlabels = FALSE, clabels = FALSE, pch = 15, cex = 3, label.cex = 0.6, ...) {
if(!all(is.cols(cmat),
is.matrix(cmat))) stop("'cmat' needs to be a valid colour matrix:\ntry supplying a vector as a matrix with 1 row or column, as appropriate", call.=FALSE)
if(!all(is.cols(na.col),
length(na.col) == 1)) stop("'na.col' needs to a valid single colour", call.=FALSE)
if(!all(is.cols(border.col),
length(border.col) == 1)) stop("'border.col' needs to a valid single colour", call.=FALSE)
if(!all(is.character(ptype))) stop("'ptype' must be a character vector of length 1", call.=FALSE)
ptype <- match.arg(ptype)
N <- nrow(cmat)
P <- ncol(cmat)
cmat <- replace(cmat, is.na(cmat), na.col)
if(ptype == "image") {
levels <- sort(unique(as.vector(cmat)))
z <- matrix(unclass(factor(cmat, levels = levels, labels = seq_along(levels))), nrow=N, ncol=P)
info <- list(x = seq_len(P), y=seq_len(N), z=t(z), col = levels)
graphics::image(info$x, info$y, info$z[,N:1L, drop=FALSE], col = info$col, axes = FALSE, xlab = "", ylab = "", ...)
} else {
base::plot(rep(seq_len(P), rep(N, P)), rep(N:1L, P), col = as.vector(cmat), cex = cex, pch = pch,
axes = FALSE, xlab = "", ylab = "", xlim = c(0.5, P + 0.5), ylim = c(0.5, N + 0.5), ...)
}
graphics::axis(3, at = seq_len(P), tick = FALSE, labels = clabels, las = 2, cex.axis = label.cex)
graphics::axis(2, at = N:1L, tick = FALSE, labels = rlabels, las = 2, cex.axis = label.cex)
if(is.vector(dlabels)) {
Nd <- length(dlabels)
graphics::text(seq_len(Nd), Nd:1L, dlabels, cex = label.cex)
}
graphics::box(col = border.col)
}
#' Show image of grayscale grid
#'
#' Plots an image of a grayscale grid representation of a digit.
#' @param dat A \code{matrix} or \code{data.frame} with the same number of rows and columns (or a vector which can be coerced to such a format), representing a grayscale map of a single digit.
#' @param col The colour scale to be used. Defaults to \code{grey(seq(1, 0, length = ncol(dat)))}.
#' @param ... Additional arguments to be passed to \code{\link{mat2cols}} and/or \code{\link{plot_cols}} (e.g. \code{na.col}) when \code{dat} is a matrix or \code{\link[graphics]{image}} when \code{dat} is a vector.
#'
#' @return The desired image representation of the digit.
#' @export
#' @seealso \code{\link{USPSdigits}}, \code{\link{show_IMIFA_digit}}, \code{\link{mat2cols}}, \code{\link{plot_cols}}
#' @keywords plotting
#' @author Keefe Murphy - <\email{keefe.murphy@@mu.ie}>
#' @usage
#' show_digit(dat,
#' col = NULL,
#' ...)
#' @examples
#' data(USPSdigits)
#'
#' # Plot the first digit
#' show_digit(USPSdigits$train[1,-1])
#'
#' # Visualise the overall mean
#' show_digit(colMeans(USPSdigits$train[,-1]))
show_digit <- function(dat, col = NULL, ...) {
x <- if(df <- !is.data.frame(dat)) dat else as.matrix(dat)
odims <- ifelse(is.matrix(dat), ncol(dat), length(dat))
dims <- sqrt(odims)
x <- if(is.matrix(dat) && df) dat else matrix(unlist(dat), nrow = dims, ncol = dims, byrow=!is.vector(dat))
col <- if(!missing(col)) col else grDevices::grey(seq(1L, 0L, length = odims))
if(nrow(x) != ncol(x)) {
x <- matrix(dat, nrow=dims, ncol=dims, byrow=FALSE)
if(diff(dim(x)) != 0) stop("'dat' must be coercible to a square matrix", call. = FALSE)
}
if(!all(is.cols(col))) stop("Invalid 'col'", call. = FALSE)
if(is.vector(dat)) {
graphics::image(matrix(x, nrow = dims)[,dims:1L], col = col, ...)
} else {
plot_cols(mat2cols(x, cols = col, ...), ...)
}
graphics::box(lwd = 1)
invisible()
}
#' Plot the posterior mean image
#'
#' Plots the posterior mean of a given cluster from an \code{"IMIFA"}-related model fit to a digit data set in the form of a square grayscale grid.
#' @param res An object of class \code{"Results_IMIFA"} generated by \code{\link{get_IMIFA_results}}.
#' @param G The index of the cluster for which the posterior mean digit is to be represented.
#' @param what A switch controlling whether the \code{"mean"} or \code{"last"} valid sample is to be plotted.
#' @param dat The full grayscale grid data set (prior to centering and scaling). Necessary when \code{ind} is supplied or if pixels with standard deviation of 0 exist in the data set (which will have been automatically removed by \code{\link{mcmc_IMIFA}}).
#' @param ind The index of columns of \code{dat} which were discarded prior to fitting the \code{"IMIFA"}-related model via \code{\link{mcmc_IMIFA}}. Can be a vector of column indices of \code{dat} or an equivalent vector of logicals. The discarded pixels are replaced by the column-means corresponding to \code{ind} among images assigned to the given cluster \code{G}.
#' @param ... Additional arguments to be passed, via \code{\link{show_digit}}, to \code{\link{mat2cols}} and/or \code{\link{plot_cols}}.
#'
#' @return The desired image representation of the posterior mean digit (or the last valid sample) from the desired cluster.
#' @note Note that both centering and scaling of the original data prior to modelling is accounted for in reconstructing the means, but \code{dat}, if necessary, must be the raw data prior to pre-processing.
#' @details This function is a wrapper to \code{\link{show_digit}} which supplies the posterior mean digit of a given cluster from a \code{"IMIFA"} model.
#' @importFrom matrixStats "colMeans2"
#' @export
#' @seealso \code{\link{USPSdigits}}, \code{\link{show_digit}}, \code{\link{get_IMIFA_results}}, \code{\link{mcmc_IMIFA}}, \code{\link{mat2cols}}, \code{\link{plot_cols}}
#' @keywords plotting
#' @author Keefe Murphy - <\email{keefe.murphy@@mu.ie}>
#' @usage
#' show_IMIFA_digit(res,
#' G = 1,
#' what = c("mean", "last"),
#' dat = NULL,
#' ind = NULL,
#' ...)
#' @examples
#' # Load the USPS data and discard peripheral digits
#' data(USPSdigits)
#' ylab <- USPSdigits$train[,1]
#' train <- USPSdigits$train[,-1]
#' ind <- apply(train, 2, sd) > 0.7
#' dat <- train[,ind]
#'
#' \donttest{# Fit an IMIFA model (warning: quite slow!)
#' # sim <- mcmc_IMIFA(dat, n.iters=1000, prec.mu=1e-03, z.init="kmeans",
#' # centering=FALSE, scaling="none")
#' # res <- get_IMIFA_results(sim, zlabels=ylab)
#'
#' # Examine the posterior mean image of the first two clusters
#' # show_IMIFA_digit(res, dat=train, ind=ind)
#' # show_IMIFA_digit(res, dat=train, ind=ind, G=2)}
show_IMIFA_digit <- function(res, G = 1L, what = c("mean", "last"), dat = NULL, ind = NULL, ...) {
UseMethod("show_IMIFA_digit")
}
#' @method show_IMIFA_digit Results_IMIFA
#' @importFrom matrixStats "colMeans2"
#' @export
show_IMIFA_digit.Results_IMIFA <- function(res, G = 1L, what = c("mean", "last"), dat = NULL, ind = NULL, ...) {
if(!inherits(res,
"Results_IMIFA")) stop("Results object of class 'Results_IMIFA' must be supplied", call. = FALSE)
if(G > res$GQ.results$G) stop("Invalid 'G'", call. = FALSE)
if(!all(is.character(what))) stop("'what' must be a character vector of length 1", call. = FALSE)
sd0 <- if(missing(ind)) attr(res, "Sd0.drop") else if(is.logical(ind)) !ind else !(seq_len(attr(res, "Vars")) %in% ind)
mu <- switch(EXPR=match.arg(what), mean = res$Means$post.mu[,G], last = res$Means$last.mu[,G])
center <- attr(res, "Center")
scale <- attr(res, "Scaling") != "none"
if(!is.null(sd0)) {
if(missing(dat)) stop("'dat' must be supplied when pixels were discarded &/or 'ind' is supplied", call.=FALSE)
x <- rep(NA, attr(res, "Vars"))
x[sd0] <- colMeans2(.scale2(data.matrix(dat), center=center, scale=attr(res, "Scale"))[res$Clust$MAP == G,sd0, drop=FALSE])
x[!sd0] <- mu
} else x <- mu
x <- if(scale) x * attr(res, "G.Scale") else x
x <- if(center) x + attr(res, "G.Mean") else x
show_digit(x, ...)
}
#
| /R/PlottingFunctions.R | no_license | cran/IMIFA | R | false | false | 124,995 | r | #' Plotting output and parameters of inferential interest for IMIFA and related models
#'
#' @param x An object of class \code{"Results_IMIFA"} generated by \code{\link{get_IMIFA_results}}.
#' @param plot.meth The type of plot to be produced for the \code{param} of interest, where \code{correlation} refers to ACF/PACF plots, \code{means} refers to posterior means, \code{density}, \code{trace} and \code{parallel.coords} are self-explanatory. \code{"all"} in this case, the default, refers to {\code{"trace"}, \code{"density"}, \code{"means"}, and \code{"correlation"}}. \code{"parallel.coords"} is only available when \code{param} is one of \code{"means"}, \code{"loadings"} or \code{"uniquenesses"} - note that this method applies a small amount of horizontal jitter to avoid overplotting.
#'
#' Special types of plots which don't require a \code{param} are:
#' \describe{
#' \item{\code{"GQ"}}{for plotting the posterior summaries of the numbers of clusters/factors, if available.}
#' \item{\code{"zlabels"}}{for plotting clustering uncertainties - in four different ways (incl. the posterior confusion matrix) - if clustering has taken place, with or without the clustering labels being supplied via the \code{zlabels} argument. If available, the average similarity matrix, reordered according to the MAP labels, is shown as a 5-th plot.}
#' \item{\code{"errors"}}{for conducting posterior predictive checking of the appropriateness of the fitted model by visualising the posterior predictive reconstruction error (PPRE) &/or histograms comparing the data to replicate draws from the posterior distribution &/or error metrics quantifying the difference between the estimated and empirical covariance matrices. The type of plot(s) produced depends on how the \code{error.metrics} argument was supplied to \code{\link{get_IMIFA_results}} and what parameters were stored.}
#' }
#' The argument \code{g} can be used to cycle through the available plots in each case. \code{ind} can also be used to govern which variable is shown for the 2-nd plot.
#' @param param The parameter of interest for any of the following \code{plot.meth} options: \code{all}, \code{trace}, \code{density}, \code{means}, \code{correlation}. The \code{param} must have been stored when \code{\link{mcmc_IMIFA}} was initially ran. Includes \code{pis} for methods where clustering takes place, and allows posterior inference on \code{alpha} (for the \code{"IMFA"}, \code{"IMIFA"}, \code{"OMFA"}, and \code{"OMIFA"} methods) and \code{discount} (for the \code{"IMFA"} and \code{"IMIFA"} methods). Otherwise \code{"means"}, \code{"scores"}, \code{"loadings"}, and \code{"uniquenesses"} can be plotted.
#' @param g Optional argument that allows specification of exactly which cluster the plot of interest is to be produced for. If not supplied, the user will be prompted to cycle through plots for all clusters. Also functions as an index for which plot to return when \code{plot.meth} is \code{GQ}, \code{zlabels}, or \code{errors} in much the same way.
#' @param mat Logical indicating whether a \code{\link[graphics]{matplot}} is produced (defaults to \code{TRUE}). If given as \code{FALSE}, \code{ind} is invoked.
#' @param zlabels The true labels can be supplied if they are known. If this is not supplied, the function uses the labels that were supplied, if any, to \code{\link{get_IMIFA_results}}. Only relevant when \code{plot.meth = "zlabels"}. When explicitly supplied, misclassified observations are highlighted in the first type of uncertainty plot (otherwise observations whose uncertainty exceed the inverse of the number of clusters are highlighted). For the second type of uncertainty plot, when \code{zlabels} are explicitly supplied, the uncertainty of misclassified observations is marked by vertical lines on the profile plot.
#' @param heat.map A logical which controls plotting posterior mean loadings or posterior mean scores as a heatmap, or else as something akin to \code{link{plot(..., type="h")}}. Only relevant if \code{param = "loadings"} (in which case the default is \code{TRUE}) or \code{param = "scores"} (in which case the default is \code{FALSE}). Heatmaps are produced with the aid of \code{\link{mat2cols}} and \code{\link{plot_cols}}.
#' @param show.last A logical indicator which defaults to \code{FALSE}, but when \code{TRUE} replaces any instance of the posterior mean with the last valid sample. Only relevant when \code{param} is one of \code{"means"} \code{"scores"}, \code{"loadings"}, \code{"uniquenesses"}, or \code{"pis"} and \code{plot.meth} is one of \code{"all"} or \code{"means"}. Also relevant for \code{"means"}, \code{"loadings"} and \code{"uniquenesses"} when \code{plot.meth} is \code{"parallel.coords"}. When \code{TRUE}, this has the effect of forcing \code{intervals} to be \code{FALSE}.
#' @param palette An optional colour palette to be supplied if overwriting the default palette set inside the function by \code{\link[viridisLite]{viridis}} is desired. It makes little sense to a supply a \code{palette} when \code{plot.meth="all"} and \code{param} is one of \code{"scores"} or \code{"loadings"}.
#' @param ind Either a single number indicating which variable to plot when \code{param} is one of \code{means} or \code{uniquenesses} (or \code{plot.meth="errors"}), or which cluster to plot if \code{param} is \code{pis}. If \code{scores} are plotted, a vector of length two giving which observation and factor to plot; if \code{loadings} are plotted, a vector of length two giving which variable and factor to plot. Will be recycled to length 2 if necessary. Also governs which two factors are displayed on posterior mean plots of the \code{"scores"} when \code{heat.map} is \code{FALSE}; otherwise only relevant when \code{mat} is \code{FALSE}.
#' @param fac Optional argument that provides an alternative way to specify \code{ind[2]} when \code{mat} is \code{FALSE} and \code{param} is one of \code{scores} or \code{loadings}.
#' @param by.fac Optionally allows (mat)plotting of scores and loadings by factor - i.e. observation(s) (scores) or variable(s) (loadings) for a given factor, respectively, controlled by \code{ind} or \code{fac}) when set to \code{TRUE}. Otherwise all factor(s) are plotted for a given observation or variable when set to \code{FALSE} (the default), again controlled by \code{ind} or \code{fac}. Only relevant when \code{param} is one of \code{scores} or \code{loadings}.
#' @param type The manner in which the plot is to be drawn, as per the \code{type} argument to \code{\link{plot}}.
#' @param intervals Logical indicating whether credible intervals around the posterior mean(s) are to be plotted when \code{is.element(plot.meth, c("all", "means"))}. Defaults to \code{TRUE}, but can only be \code{TRUE} when \code{show.last} is \code{FALSE}.
#' @param common Logical indicating whether plots with \code{plot.meth="means"} (or the corresponding plots for \code{plot.meth="all"}) when \code{param} is one of \code{"means"}, \code{"scores"}, \code{"loadings"}, or \code{"uniquenesses"} are calibrated to a common scale based on the range of the \code{param} parameters across all clusters (defaults to \code{TRUE}, and only relevant when there are clusters). Otherwise, the only the range corresponding to the image being plotted is used to determine the scale.
#'
#' Note that this affects the \code{"loadings"} and \code{"scores"} plots regardless of the value of \code{heat.map}. An exception is the \code{"scores"} plots when \code{plot.meth="means"} and \code{heat.map} is \code{FALSE}, in which case \code{common} defaults to \code{FALSE}.
#' @param partial Logical indicating whether plots of type \code{"correlation"} use the PACF. The default, \code{FALSE}, ensures the ACF is used. Only relevant when \code{plot.meth = "all"}, otherwise both plots are produced when \code{plot.meth = "correlation"}.
#' @param titles Logical indicating whether default plot titles are to be used (\code{TRUE}), or suppressed (\code{FALSE}).
#' @param transparency A factor in [0, 1] modifying the opacity for overplotted lines. Defaults to 0.75, unless semi-transparency is not supported. Only relevant when \code{palette} is not supplied, otherwise the supplied \code{palette} must already be adjusted for transparency.
#' @param ... Other arguments typically passed to \code{\link{plot}} or the \code{breaks} argument to \code{\link{mat2cols}} and \code{\link{heat_legend}} when heatmaps are plotted.
#'
#' @return The desired plot with appropriate output and summary statistics printed to the console screen.
#' @export
#' @note Supplying the argument \code{zlabels} does \strong{not} have the same effect of reordering the sampled parameters as it does if supplied directly to \code{\link{get_IMIFA_results}}.
#'
#' When \code{mat} is \code{TRUE} and \code{by.fac} is \code{FALSE} (both defaults), the convention for dealing with overplotting for \code{trace} and \code{density} plots when \code{param} is either \code{scores} or \code{loadings} is to plot the last factor first, such that the first factor appears 'on top'.
#' @keywords plotting main
#' @method plot Results_IMIFA
#' @importFrom Rfast "colMedians" "Median"
#' @importFrom matrixStats "rowRanges"
#' @importFrom mclust "classError"
#' @importFrom viridisLite "viridis"
#' @seealso \code{\link{mcmc_IMIFA}}, \code{\link{get_IMIFA_results}}, \code{\link{mat2cols}}, \code{\link{plot_cols}}
#' @references Murphy, K., Viroli, C., and Gormley, I. C. (2020) Infinite mixtures of infinite factor analysers, \emph{Bayesian Analysis}, 15(3): 937-963. <\href{https://projecteuclid.org/euclid.ba/1570586978}{doi:10.1214/19-BA1179}>.
#'
#' @author Keefe Murphy - <\email{keefe.murphy@@mu.ie}>
#' @usage
#' \method{plot}{Results_IMIFA}(x,
#' plot.meth = c("all", "correlation", "density", "errors", "GQ",
#' "means", "parallel.coords", "trace", "zlabels"),
#' param = c("means", "scores", "loadings", "uniquenesses",
#' "pis", "alpha", "discount"),
#' g = NULL,
#' mat = TRUE,
#' zlabels = NULL,
#' heat.map = TRUE,
#' show.last = FALSE,
#' palette = NULL,
#' ind = NULL,
#' fac = NULL,
#' by.fac = FALSE,
#' type = c("h", "n", "p", "l"),
#' intervals = TRUE,
#' common = TRUE,
#' partial = FALSE,
#' titles = TRUE,
#' transparency = 0.75,
#' ...)
#' @examples
#' \donttest{# See the vignette associated with the package for more graphical examples:
#' # vignette("IMIFA", package = "IMIFA")
#'
#' # data(olive)
#' # simIMIFA <- mcmc_IMIFA(olive, method="IMIFA")
#' # resIMIFA <- get_IMIFA_results(simIMIFA, z.avgsim=TRUE)
#'
#' # Examine the posterior distribution(s) of the number(s) of clusters (G) &/or latent factors (Q)
#' # For the IM(I)FA and OM(I)FA methods, this also plots the trace of the active/non-empty clusters
#' # plot(resIMIFA, plot.meth="GQ")
#' # plot(resIMIFA, plot.meth="GQ", g=2)
#'
#' # Plot clustering uncertainty (and, if available, the similarity matrix)
#' # plot(resIMIFA, plot.meth="zlabels", zlabels=olive$area)
#'
#' # Visualise the posterior predictive reconstruction error
#' # plot(resIMIFA, plot.meth="errors", g=1)
#'
#' # Compare histograms of the data vs. replicate draw from the posterior for the 1st variable
#' # plot(resIMIFA, plot.meth="errors", g=2, ind=1)
#'
#' # Visualise empirical vs. estimated covariance error metrics
#' # plot(resIMIFA, plot.meth="errors", g=3)
#'
#' # Look at the trace, density, posterior mean, and correlation of various parameters of interest
#' # plot(resIMIFA, plot.meth="all", param="means", g=1)
#' # plot(resIMIFA, plot.meth="all", param="means", g=1, ind=2)
#' # plot(resIMIFA, plot.meth="trace", param="scores")
#' # plot(resIMIFA, plot.meth="trace", param="scores", by.fac=TRUE)
#' # plot(resIMIFA, plot.meth="mean", param="loadings", g=1)
#' # plot(resIMIFA, plot.meth="mean", param="loadings", g=1, heat.map=FALSE)
#' # plot(resIMIFA, plot.meth="parallel.coords", param="uniquenesses")
#' # plot(resIMIFA, plot.meth="density", param="pis", intervals=FALSE, partial=TRUE)
#' # plot(resIMIFA, plot.meth="all", param="alpha")
#' # plot(resIMIFA, plot.meth="all", param="discount")}
plot.Results_IMIFA <- function(x, plot.meth = c("all", "correlation", "density", "errors", "GQ", "means", "parallel.coords", "trace", "zlabels"), param = c("means", "scores", "loadings", "uniquenesses", "pis", "alpha", "discount"), g = NULL, mat = TRUE,
zlabels = NULL, heat.map = TRUE, show.last = FALSE, palette = NULL, ind = NULL, fac = NULL, by.fac = FALSE, type = c("h", "n", "p", "l"), intervals = TRUE, common = TRUE, partial = FALSE, titles = TRUE, transparency = 0.75, ...) {
if(missing(x)) stop("'x' must be supplied", call.=FALSE)
if(!inherits(x, "Results_IMIFA")) stop("Results object of class 'Results_IMIFA' must be supplied", call.=FALSE)
GQ.res <- x$GQ.results
G <- GQ.res$G
Gseq <- seq_len(G)
Qs <- unname(GQ.res$Q)
Q.max <- max(Qs)
Qmseq <- seq_len(Q.max)
nLx <- Qs != 0
defpar <- suppressWarnings(graphics::par(no.readonly=TRUE))
defpar$new <- FALSE
suppressWarnings(graphics::par(pty="m"))
mispal <- missing(palette)
oldpal <- grDevices::palette()
if(mispal) palette <- viridis(min(10L, max(G, Q.max, 5L)), option="D")
if(!all(is.cols(cols=palette))) stop("Supplied colour palette contains invalid colours", call.=FALSE)
if(length(palette) < 5) warning("Palette should contain 5 or more colours\n", call.=FALSE)
trx <- grDevices::dev.capabilities()$semiTransparency
xtr <- missing(transparency)
if(length(transparency) != 1 &&
any(!is.numeric(transparency),
(transparency < 0 ||
transparency > 1))) stop("'transparency' must be a single number in [0, 1]", call.=FALSE)
if(transparency != 1 && !trx) {
if(!xtr) message("'transparency' not supported on this device\n")
transparency <- 1
}
tmp.pal <- palette
palette <- if(mispal) grDevices::adjustcolor(palette, alpha.f=transparency) else palette
grDevices::palette(palette)
grey <- ifelse(trx, grDevices::adjustcolor("grey50", alpha.f=transparency), "grey50")
defopt <- options()
options(warn=1)
suppressWarnings(graphics::par(cex.axis=0.8, new=FALSE))
on.exit(suppressWarnings(graphics::par(defpar)))
on.exit(do.call(graphics::clip, as.list(defpar$usr)), add=TRUE)
on.exit(grDevices::palette(oldpal), add=TRUE)
on.exit(suppressWarnings(options(defopt)), add=TRUE)
dots <- list(...)
dots <- dots[unique(names(dots))]
if(brX <- "breaks" %in% names(dots)) {
brXs <- dots[["breaks"]]
}
n.grp <- attr(GQ.res, "Clusters")
n.fac <- attr(GQ.res, "Factors")
G.supp <- attr(GQ.res, "Supplied")["G"]
Q.supp <- attr(GQ.res, "Supplied")["Q"]
method <- attr(x, "Method")
store <- attr(x, "Store")
n.var <- attr(x, "Vars")
var.pal <- max(min(n.var, 1024L), 2L)
n.obs <- attr(x, "Obs")
z.sim <- attr(x, "Z.sim")
plot.mx <- missing(plot.meth)
param.x <- missing(param)
type.x <- missing(type)
if(!all(is.character(plot.meth))) stop("'plot.meth' must be a character vector of length 1", call.=FALSE)
if(!all(is.character(param))) stop("'param' must be a character vector of length 1", call.=FALSE)
if(!all(is.character(type))) stop("'type' must be a character vector of length 1", call.=FALSE)
if(plot.mx) {
if(!param.x) { plot.meth <- "all"
} else stop("'plot.meth' not supplied:\nWhat type of plot would you like to produce?", call.=FALSE)
}
if(is.element(plot.meth,
c("G", "Q",
"QG"))) { plot.meth <- "GQ"
}
plot.meth <- match.arg(plot.meth)
param <- match.arg(param)
type <- match.arg(type)
if(!is.element(plot.meth, c("errors", "GQ", "zlabels")) &&
param.x) stop("'param' not supplied:\nWhat variable would you like to plot?", call.=FALSE)
m.sw <- c(G.sw = FALSE, Z.sw = FALSE, E.sw = FALSE, P.sw = FALSE, C.sw = FALSE, D.sw = FALSE, M.sw = FALSE, T.sw = FALSE)
v.sw <- attr(x, "Switch")
obs.names <- attr(x, "Obsnames")
var.names <- attr(x, "Varnames")
obs.names <- if(is.null(obs.names)) seq_len(n.obs) else obs.names
var.names <- if(is.null(var.names)) seq_len(n.var) else var.names
v.sw <- c(v.sw[-6L], v.sw[6L])
names(v.sw) <- c(as.character(formals()$param)[-1L], "u.sw")
ci.sw <- v.sw
uni.type <- unname(attr(x, "Uni.Meth")['Uni.Type'])
if((grp.ind <- !is.element(method, c("FA", "IFA")) && !(param == "uniquenesses" && is.element(uni.type, c("constrained", "single"))))) {
clust <- x$Clust
grp.size <- clust$post.sizes
labelmiss <- !is.null(attr(clust, "Label.Sup")) && !attr(clust, "Label.Sup")
} else grp.size <- n.obs
grp.ind <- all(G != 1, grp.ind)
if((all.ind <- plot.meth == "all")) {
if(v.sw[param]) {
m.sw[-seq_len(4L)] <- !m.sw[-seq_len(4L)]
graphics::layout(matrix(c(1, 2, 3, 4), nrow=2L, ncol=2L, byrow=TRUE))
graphics::par(cex=0.8, mai=c(0.5, 0.5, 0.5, 0.2), mgp=c(2, 1, 0), oma=c(0, 0.5, 2, 0.5))
}
} else {
graphics::layout(1)
sw.n <- paste0(toupper(substring(plot.meth, 1L, 1L)), ".sw")
m.sw[sw.n] <- TRUE
}
if(param == "uniquenesses") {
mat <- switch(EXPR=uni.type, constrained=, unconstrained=mat, FALSE)
}
mat <- n.var != 1 && mat
z.miss <- missing(zlabels)
if(!z.miss) {
if(all(!is.factor(zlabels), !is.logical(zlabels), !is.numeric(zlabels)) ||
length(zlabels) != n.obs) stop(paste0("'zlabels' must be a factor of length N=", n.obs), call.=FALSE)
}
if(m.sw["P.sw"]) {
if(!is.element(param, c("means",
"loadings", "uniquenesses"))) stop("Can only plot parallel coordinates for means, loadings or uniquenesses", call.=FALSE)
}
if(!grp.ind) {
if(m.sw["Z.sw"]) stop("Can't use 'zlabels' for 'plot.meth' as no clustering has taken place", call.=FALSE)
if(param == "pis") stop("Can't plot mixing proportions as no clustering has taken place", call.=FALSE)
}
if(m.sw["E.sw"]) {
errX <- attr(x, "Errors")
if(is.element(errX,
c("None", "Vars"))) { stop("Can't plot error metrics as they were not calculated within get_IMIFA_results()", call.=FALSE)
} else if(errX == "PPRE") { warning("Can only plot the posterior predictive reconstruction error, and not error metrics between covariance matrices\n", call.=FALSE)
} else if(errX == "Covs") { warning("Can only plot error metrics between covariance matrices, and not the posterior predictive reconstruction error\n", call.=FALSE)
} else if(errX == "Post") warning("Can only plot error metrics between covariance matrices evaluated at the posterior mean, as they were not calculated for every iteration within get_IMIFA_results\n", call.=FALSE)
}
if(all(any(m.sw["M.sw"], m.sw["P.sw"], all.ind),
is.element(param, c("means", "uniquenesses")),
!v.sw[param],
is.element(method, c("FA", "IFA")))) {
if(show.last) { stop(paste0("Can't plot last valid sample, as ", param, switch(EXPR=param, alpha=, discount=" wasn't", " weren't"), " stored"), call.=FALSE)
} else if(param == "means" &&
!v.sw["u.sw"]) { stop("Nothing to plot as means were not updated", call.=FALSE)
} else if(all.ind) { warning(paste0("Can only plot posterior mean, as ", param, switch(EXPR=param, alpha=, discount=" wasn't", " weren't"), " stored\n"), call.=FALSE)
all.ind <- FALSE
m.sw["M.sw"] <- TRUE
}
v.sw[param] <- !v.sw[param]
}
if(all(!v.sw[param], !m.sw["G.sw"],
!m.sw["Z.sw"], !m.sw["E.sw"])) stop(paste0("Nothing to plot: ", param, ifelse(is.element(param, c("alpha", "discount")), ifelse(any(all(param == "alpha", is.element(method, c("FA", "IFA"))),
all(param == "discount", !is.element(method, c("IMFA", "IMIFA")))), paste0(" not used for the ", method, " method"), paste0(" was fixed at ", ifelse(param == "alpha",
attr(x, "Alpha"), attr(x, "Discount")))), " weren't stored"), ifelse(param == "pis" && attr(x, "Equal.Pi"), " as mixing proportions were constrained to be equal across clusters", "")), call.=FALSE)
heat.map <- ifelse(missing(heat.map), param == "loadings", heat.map)
int.miss <- !missing(intervals)
if(any(!is.logical(heat.map),
length(heat.map) != 1)) stop("'heat.map' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(show.last),
length(show.last) != 1)) stop("'show.last' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(intervals),
length(intervals) != 1)) stop("'intervals' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(mat),
length(mat) != 1)) stop("'mat' must be a single logical indicator", call.=FALSE)
common <- !(missing(common) && all(grp.ind, !all.ind, m.sw["M.sw"], param == "scores", heat.map)) && (!grp.ind || common)
if(any(!is.logical(common),
length(common) != 1)) stop("'common' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(partial),
length(partial) != 1)) stop("'partial' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(titles),
length(titles) != 1)) stop("'titles' must be a single logical indicator", call.=FALSE)
if(any(!is.logical(by.fac),
length(by.fac) != 1)) stop("'by.fac' must be a single logical indicator", call.=FALSE)
if(all(show.last, intervals)) {
if(int.miss) message("Forcing 'intervals' to FALSE as 'show.last' is TRUE\n")
intervals <- FALSE
}
post.last <- ifelse(show.last, "Last Valid Sample", "Posterior Mean")
indx <- missing(ind)
facx <- missing(fac)
gx <- missing(g)
if(!indx) {
ind <- as.integer(ind)
xind <- ind
}
if(!facx) {
fac <- as.integer(fac)
fl <- length(fac)
if(fl == 1) fac <- rep(fac, G)
fl <- length(fac)
if(fl != G && is.element(param,
c("loadings", "scores"))) stop(paste0("'fac' must be supplied for each of the ", G, " clusters"), call.=FALSE)
}
g.score <- param == "scores" && all(grp.ind, !all.ind, !common)
if(!gx) g <- as.integer(g)
if(!gx && any(length(g) != 1,
!is.numeric(g))) stop("If 'g' is supplied it must be of length 1", call.=FALSE)
if(any(all(is.element(method, c("IMFA", "OMFA")), m.sw["G.sw"]), m.sw["Z.sw"])) {
if(m.sw["G.sw"]) {
Gs <- if(gx) seq_len(2L) else ifelse(g <= 2, g,
stop("Invalid 'g' value", call.=FALSE))
} else if(m.sw["Z.sw"]) {
Gs <- if(gx) (if(z.sim) seq_len(5L) else seq_len(4L)) else ifelse(g <=
ifelse(z.sim, 5, 4), g, stop(paste0("Invalid 'g' value", ifelse(z.sim, ": similarity matrix not available", "")), call.=FALSE))
}
} else if(all(is.element(method, c("IMIFA", "OMIFA")), m.sw["G.sw"])) {
if(m.sw["G.sw"]) {
Gs <- if(gx) seq_len(3L) else ifelse(g <= 3, g,
stop("Invalid 'g' value", call.=FALSE))
} else if(m.sw["Z.sw"]) {
Gs <- if(gx) (if(z.sim) seq_len(5L) else seq_len(4L)) else ifelse(g <=
ifelse(z.sim, 5, 4), g, stop(paste0("Invalid 'g' value", ifelse(z.sim, ": similarity matrix not available", "")), call.=FALSE))
}
} else if(m.sw["E.sw"]) {
Gs <- if(gx) switch(EXPR=errX, All=seq_len(3L), PPRE=seq_len(2L), 1L) else ifelse(g <= switch(EXPR=errX, All=3L, PPRE=2L, 1L), g,
stop("Invalid 'g' value", call.=FALSE))
} else if(any(all(is.element(param, c("scores", "pis", "alpha", "discount")), any(all.ind, common, param != "scores", !m.sw["M.sw"])), m.sw["G.sw"],
all(m.sw["P.sw"], param != "loadings"), all(param == "uniquenesses", is.element(uni.type, c("constrained", "single"))))) {
Gs <- 1L
} else if(!gx) {
if(!is.element(method, c("FA", "IFA"))) {
if(!is.element(g, Gseq)) stop("This g value was not used during simulation", call.=FALSE)
Gs <- g
} else if(g > 1) { message(paste0("Forced g=1 for the ", method, " method\n"))
Gs <- 1L
}
} else if(!interactive()) { stop("g must be supplied for non-interactive sessions", call.=FALSE)
} else {
Gs <- Gseq
}
if(m.sw["Z.sw"] && !all(Gs == 5)) {
prf <- NULL
uncer <- attr(clust$uncertainty, "Obs")
if(any(!labelmiss, !z.miss)) {
if(all(!labelmiss, z.miss)) {
prf <- clust$perf
} else {
pzs <- factor(clust$MAP, levels=seq_len(G))
tab <- table(pzs, zlabels, dnn=list("Predicted", "Observed"))
prf <- c(.class_agreement(tab), classError(classification=pzs, class=zlabels))
if(nrow(tab) != ncol(tab)) {
prf <- prf[-seq_len(2L)]
names(prf)[4L] <- "error.rate"
} else {
names(prf)[6L] <- "error.rate"
}
if(prf$error.rate == 0) {
prf$misclassified <- NULL
}
prf <- c(list(confusion.matrix = tab), prf, if(!is.null(uncer)) list(uncertain = uncer))
}
prf$confusion.matrix <- if(!is.null(prf$confusion.matrix)) stats::addmargins(prf$confusion.matrix, quiet=TRUE)
prf$error.rate <- if(!is.null(prf$error.rate)) paste0(round(100L * prf$error.rate, 2L), "%")
} else {
prf <- if(!is.null(uncer)) list(uncertain = uncer)
prf <- if(!is.null(prf[[1L]])) prf
}
}
for(g in Gs) {
Q <- Qs[g]
ng <- ifelse(grp.ind, grp.size[g], n.obs)
g.ind <- which(Gs == g)
msgx <- all(interactive(), g != max(Gs))
if(any(all(Qs == 0, param == "scores"),
all(Q == 0, param == "loadings"),
all(ng == 0, param == "scores", m.sw["M.sw"] && !all.ind))) {
warning(paste0("Can't plot ", param, paste0(ifelse(any(all(param == "scores", ng == 0), all(param == "loadings", grp.ind)), paste0(" for cluster ", g), "")), " as they contain no ", ifelse(all(param == "scores", ng == 0), "rows/observations\n", "columns/factors\n")), call.=FALSE)
if(g == max(Gs)) {
break
} else {
if(isTRUE(msgx)) .ent_exit(opts = defopt)
next
}
}
if(any(is.element(param, c("alpha", "discount")),
all(is.element(param, c("means", "uniquenesses")), !indx),
all(param == "loadings", Q == 1), all(param == "scores",
Q.max == 1))) { matx <- FALSE
} else {
matx <- mat
}
if(!matx) {
iter <- switch(EXPR=param, scores=seq_len(attr(x$Score, "Eta.store")), loadings=seq_len(attr(x, "N.Loadstore")[g]), seq_along(store))
}
if(is.element(param, c("scores", "loadings"))) {
if(all((g == min(Gs)), m.sw["M.sw"], isTRUE(heat.map))) {
if(brX) {
hlen <- length(brXs)
if(!mispal && (hlen !=
length(palette))) warning("'breaks' and 'palette' should be the same length if 'breaks' is supplied\n", call.=FALSE, immediate.=TRUE)
} else hlen <- 30L
hcols <- if(mispal) viridis(hlen, option="B") else palette
}
if(indx) ind <- c(1L, 1L)
if(!facx) ind[2L] <- fac[g]
if(all(length(ind) == 1,
mat)) { ind <- rep(ind, 2L)
if(g == 1) xind <- rep(xind, 2L)
}
if(length(ind) != 2) stop(paste0("Length of plotting indices must be 2 for the ", param, " parameter when 'mat' is FALSE"), call.=FALSE)
if(param == "scores") {
if(ind[1L] > n.obs) stop(paste0("First index can't be greater than the number of observations: ", n.obs), call.=FALSE)
if(ind[2L] > Q.max) { warning(paste0("Second index can't be greater than ", Q.max, ", the total number of factors", ifelse(grp.ind, " in the widest loadings matrix\n", "\n")), call.=FALSE)
if(isTRUE(msgx)) .ent_exit(opts = defopt)
next
}
} else {
if(ind[1L] > n.var) stop(paste0("First index can't be greater than the number of variables: ", n.var), call.=FALSE)
if(ind[2L] > Q) { warning(paste0("Second index can't be greater than ", Q, ", the number of factors", if(grp.ind) paste0(" in cluster ", g), ".\nTry specifying a vector of fac values with maximum entries ", paste0(Qs, collapse=", "), "\n"), call.=FALSE)
if(isTRUE(msgx)) .ent_exit(opts = defopt)
next
}
}
} else {
if(any(is.element(param, c("alpha", "discount")),
indx)) ind <- 1L
if(length(ind) > 1) stop("Length of plotting indices can't be greater than 1", call.=FALSE)
if(param == "pis") {
if(ind > G) stop(paste0("Index can't be greater than the number of clusters: ", G), call.=FALSE)
} else {
if(ind > n.var) stop(paste0("Index can't be greater than the number of variables: ", n.var), call.=FALSE)
}
}
if(m.sw["T.sw"]) {
if(param == "means") {
plot.x <- x$Means$mus[[g]]
if(matx) {
if(mispal) grDevices::palette(viridis(var.pal, option="D", alpha=transparency))
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, ylim=if(is.element(method, c("FA", "IFA")) && attr(x, "Center")) c(-1, 1), col=seq_along(grDevices::palette()))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nMeans", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
} else {
base::plot(x=iter, y=plot.x[ind,], type="l", ylab="", xlab="Iteration", ylim=if(is.element(method, c("FA", "IFA")) && attr(x, "Center")) c(-1, 1))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", paste0(":\nMean - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind], " Variable")))
}
}
if(param == "scores") {
x.plot <- x$Scores$eta
if(by.fac) {
plot.x <- x.plot[,ind[2L],]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, n.obs)), option="D", alpha=transparency))
} else {
plot.x <- if(Q.max > 1) x.plot[ind[1L],rev(Qmseq),] else t(x.plot[ind[1L],rev(Qmseq),])
if(mispal) grDevices::palette(viridis(min(10L, max(2L, Q.max)), option="D", alpha=transparency))
}
if(matx) {
scols <- seq_along(grDevices::palette())
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, col=if(by.fac) scols else rev(scols))
if(by.fac) {
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", ":\nScores - "), "Factor ", ind[2L])))
} else {
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", ":\nScores - "), "Observation ", obs.names[ind[1L]])))
}
} else {
base::plot(x=iter, y=x.plot[ind[1L],ind[2L],], type="l", ylab="", xlab="Iteration")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", ":\nScores - "), "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L])))
}
}
if(param == "loadings") {
x.plot <- x$Loadings$lmats[[g]]
if(by.fac) {
plot.x <- x.plot[,ind[2L],]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, n.var)), option="D", alpha=transparency))
} else {
plot.x <- x.plot[ind[1L],rev(seq_len(Q)),]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, Q)), option="D", alpha=transparency))
}
if(matx) {
lcols <- seq_along(grDevices::palette())
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, col=if(by.fac) lcols else rev(lcols))
if(by.fac) {
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), "Factor ", ind[2L])))
} else {
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind[1L]], " Variable")))
}
} else {
base::plot(x=iter, y=x.plot[ind[1L],ind[2L],], type="l", ylab="", xlab="Iteration")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind[1L]], " Variable, Factor ", ind[2L])))
}
}
if(param == "uniquenesses") {
plot.x <- x$Uniquenesses$psis[[g]]
if(matx) {
if(mispal) grDevices::palette(viridis(var.pal, option="D", alpha=transparency))
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, col=seq_along(grDevices::palette()))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nUniquenesses", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
} else {
base::plot(x=iter, y=plot.x[ind,], ylab="", type="l", xlab="Iteration")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, switch(EXPR=uni.type, constrained=, unconstrained=paste0(":\n"), ""), paste0(":\nUniquenesses - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), switch(EXPR=uni.type, constrained=, unconstrained=paste0(var.names[ind], " Variable"), ""))))
}
}
if(param == "pis") {
plot.x <- clust$pi.prop
if(matx) {
if(mispal) grDevices::palette(viridis(max(G, 2L), option="D", alpha=transparency))
graphics::matplot(t(plot.x), type="l", ylab="", xlab="Iteration", lty=1, col=seq_along(grDevices::palette()), ylim=c(0, 1))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nMixing Proportions")))))
} else {
base::plot(x=iter, y=plot.x[ind,], ylab="", type="l", xlab="Iteration")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nMixing Proportion - Cluster ", ind)))))
}
}
if(param == "alpha") {
plot.x <- clust$Alpha
base::plot(plot.x$alpha, ylab="", type="l", xlab="Iteration", main="")
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nAlpha")))))
if(all(intervals, ci.sw[param])) {
ci.x <- plot.x$ci.alpha
graphics::abline(h=plot.x$post.alpha, col=2, lty=2)
graphics::abline(h=ci.x[1L], col=grey, lty=2)
graphics::abline(h=ci.x[2L], col=grey, lty=2)
}
}
if(param == "discount") {
plot.x <- clust$Discount
base::plot(as.vector(plot.x$discount), ylab="", type="l", xlab="Iteration", main="", ylim=c(0, 1))
if(titles) graphics::title(main=list(paste0("Trace", ifelse(all.ind, "", paste0(":\nDiscount")))))
if(all(intervals, ci.sw[param])) {
ci.x <- plot.x$ci.disc
graphics::abline(h=plot.x$post.disc, col=2, lty=2)
graphics::abline(h=ci.x[1L], col=grey, lty=2)
graphics::abline(h=ci.x[2L], col=grey, lty=2)
}
}
if(!indx) { ind[1L] <- xind[1L]
if(all(facx, is.element(param, c("scores",
"loadings")))) ind[2L] <- xind[2L]
}
if(all.ind) xxind <- ind
}
if(m.sw["D.sw"]) {
if(param == "means") {
x.plot <- x$Means$mus[[g]]
if(matx) {
if(mispal) grDevices::palette(viridis(var.pal, option="D", alpha=transparency))
plot.x <- tryCatch(apply(x.plot, 1L, stats::density, bw="SJ"), error = function(e) apply(x.plot, 1L, stats::density))
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", xlab="", ylab="", lty=1, col=seq_along(grDevices::palette()))
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nMeans", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
} else {
plot.d <- tryCatch(stats::density(x.plot[ind,], bw="SJ"), error = function(e) stats::density(x.plot[ind,]))
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", paste0(":\nMeans - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind], " Variable")))
graphics::polygon(plot.d, col=grey, border=NA)
}
}
if(param == "scores") {
x.plot <- x$Scores$eta
if(by.fac) {
plot.x <- x.plot[,ind[2L],]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, n.obs)), option="D", alpha=transparency))
} else {
plot.x <- if(Q > 1) x.plot[ind[1],rev(Qmseq),] else t(x.plot[ind[1L],rev(Qmseq),])
if(mispal) grDevices::palette(viridis(min(10L, max(2L, Q.max)), option="D", alpha=transparency))
}
if(matx) {
scols <- seq_along(grDevices::palette())
plot.x <- tryCatch(apply(x.plot, 1L, stats::density, bw="SJ"), error = function(e) apply(x.plot, 1L, stats::density))
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", xlab="", ylab="", lty=1, col=if(by.fac) scols else rev(scols))
if(by.fac) {
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", ":\nScores - "), "Factor ", ind[2L])))
} else {
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", ":\nScores - "), "Observation ", obs.names[ind[1L]])))
}
} else {
plot.d <- tryCatch(stats::density(x.plot[ind[1L],ind[2L],], bw="SJ"), error = function(e) stats::density(x.plot[ind[1L],ind[2L],]))
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", ":\nScores - "), "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L])))
graphics::polygon(plot.d, col=grey, border=NA)
}
}
if(param == "loadings") {
x.plot <- x$Loadings$lmats[[g]]
if(by.fac) {
plot.x <- x.plot[,ind[2L],]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, n.var)), option="D", alpha=transparency))
} else {
plot.x <- x.plot[ind[1L],rev(seq_len(Q)),]
if(mispal) grDevices::palette(viridis(min(10L, max(2L, Q)), option="D", alpha=transparency))
}
if(matx) {
lcols <- seq_along(grDevices::palette())
plot.x <- tryCatch(apply(plot.x, 1L, stats::density, bw="SJ"), error = function(e) apply(plot.x, 1L, stats::density))
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", xlab="", ylab="", lty=1, col=if(by.fac) lcols else rev(lcols))
if(by.fac) {
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), "Factor ", ind[2L])))
} else {
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind[1L]], " Variable")))
}
} else {
plot.d <- tryCatch(stats::density(x.plot[ind[1L],ind[2L],], bw="SJ"), error = function(e) stats::density(x.plot[ind[1L],ind[2L],]))
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, ":\n", paste0(":\nLoadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), var.names[ind[1L]], " Variable, Factor ", ind[2L])))
graphics::polygon(plot.d, col=grey, border=NA)
}
}
if(param == "uniquenesses") {
x.plot <- x$Uniquenesses$psis[[g]]
if(matx) {
if(mispal) grDevices::palette(viridis(var.pal, option="D", alpha=transparency))
plot.x <- apply(x.plot, 1L, .logdensity, bw="SJ")
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", xlab="", ylab="", lty=1, col=seq_along(grDevices::palette()))
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nUniquenesses", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
} else {
plot.d <- .logdensity(x.plot[ind,], bw="SJ")
plot.d$y[plot.d$x < 0] <- 0
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, switch(EXPR=uni.type, constrained=, unconstrained=paste0(":\n"), ""), paste0(":\nUniquenesses - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), ""))), switch(EXPR=uni.type, constrained=, unconstrained=paste0(var.names[ind], " Variable"), ""))))
graphics::polygon(plot.d, col=grey, border=NA)
}
}
if(param == "pis") {
x.plot <- t(clust$pi.prop)
if(matx) {
if(mispal) grDevices::palette(viridis(max(G, 2L), option="D", alpha=transparency))
plot.x <- lapply(as.data.frame(x.plot), .logitdensity, bw="SJ")
fitx <- sapply(plot.x, "[[", "x")
fity <- sapply(plot.x, "[[", "y")
graphics::matplot(fitx, fity, type="l", ylab="", lty=1, col=seq_along(grDevices::palette()), xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nMixing Proportions")))))
} else {
x.plot <- x.plot[,ind]
fit <- .logitdensity(x.plot, bw="SJ")
fitx <- fit$x
fity <- fit$y
base::plot(fitx, fity, type="l", main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nMixing Proportions - Cluster ", ind)))))
graphics::polygon(c(min(fitx), fitx), c(0, fity), col=grey, border=NA)
}
}
if(param == "alpha") {
plot.x <- clust$Alpha
tr <- ifelse(attr(x, "Pitman"), - max(if(is.null(attr(x, "Discount"))) clust$Discount$discount else attr(x, "Discount"), 0), 0)
plot.d <- .logdensity(plot.x$alpha, left=tr, bw="SJ")
plot.d$y[plot.d$x < tr] <- 0L
base::plot(plot.d, main="", ylab="", xlab="")
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nAlpha")))))
graphics::polygon(plot.d, col=grey, border=NA)
if(intervals) {
avg <- plot.x$post.alpha
graphics::clip(avg, avg, 0, plot.d$y[which.min(abs(plot.d$x - avg))])
graphics::abline(v=avg, col=2, lty=2)
}
}
if(param == "discount") {
plot.x <- clust$Discount
x.plot <- as.vector(plot.x$discount)
fit <- try(.logitdensity(x.plot, bw="SJ"), silent = TRUE)
if(!inherits(fit, "try-error")) {
fitx <- fit$x
fity <- fit$y * (1 - plot.x$post.kappa)
base::plot(fitx, fity, type="l", main="", xlab="", ylab="", xlim=c(0, max(fitx)))
usr <- graphics::par("usr")
if(plot.x$post.kappa > 0) {
graphics::clip(usr[1L], usr[2L], 0, usr[4L])
graphics::abline(v=0, col=3, lwd=2)
graphics::clip(usr[1L], usr[2L], usr[3L], usr[4L])
}
if(titles) graphics::title(main=list(paste0("Density", ifelse(all.ind, "", paste0(":\nDiscount")))))
graphics::polygon(c(min(fitx), fitx), c(0, fity), col=grey, border=NA)
if(intervals) {
D <- plot.x$post.disc
d2 <- fity[which.min(abs(fitx - D))]
if(is.finite(d2)) {
graphics::clip(D, D, 0, d2)
graphics::abline(v=D, col=2, lty=2)
graphics::clip(usr[1L], usr[2L], usr[3L], usr[4L])
}
}
} else { warning(paste0(ifelse(attr(x, "Kappa0"), "Acceptance", "Mutation"), " rate too low: can't plot density\n"), call.=FALSE)
if(all.ind) graphics::plot.new()
}
}
}
if(m.sw["M.sw"]) {
if(is.element(param, c("scores", "loadings"))) {
if(indx) {
ind <- switch(EXPR=param, scores=c(1L, min(Q.max, 2L)), c(1L, 1L))
}
if(!facx) {
ind[2L] <- fac[g]
}
if(param == "scores") {
if(any(ind[1L] > Q.max,
ind[2L] > Q.max)) stop(paste0("Only the first ", Q.max, " columns can be plotted"), call.=FALSE)
} else if(ind[2L] > Q) stop(paste0("Only the first ", Q, " columns can be plotted"), call.=FALSE)
}
if(param == "means") {
x.plot <- if(show.last) x$Means$last.mu else x$Means$post.mu
plot.x <- x.plot[,g]
if(ci.sw[param]) ci.x <- x$Means$ci.mu
if(g == min(Gs) && isTRUE(common)) {
pxx <- range(vapply(x.plot, range, numeric(2L)))
cixx <- if(all(intervals, ci.sw[param])) range(vapply(ci.x, range, numeric(2L)))
} else if(!common) {
pxx <- range(plot.x)
cixx <- if(all(intervals, ci.sw[param])) range(ci.x[[g]])
}
if(ci.sw[param]) ci.x <- ci.x[[g]]
base::plot(plot.x, type=type, ylab="", xlab="Variable", ylim=if(is.element(method, c("FA", "IFA")) && attr(x, "Center")) c(-1, 1) else if(all(intervals, ci.sw[param])) cixx else pxx, lend=1)
if(all(intervals, ci.sw[param])) suppressWarnings(.plot_CI(plot.x, li=ci.x[,1L], ui=ci.x[,2L], slty=3, scol=grey, add=TRUE, gap=TRUE, pch=ifelse(type == "n", NA, 20)))
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", paste0(":\nMeans", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
if(type == "n") graphics::text(x=seq_along(plot.x), y=plot.x, var.names, cex=0.5)
}
if(param == "scores") {
labs <- if(z.miss) { if(!grp.ind) 1L else if(show.last) clust$last.z else clust$MAP } else if(is.factor(zlabels)) as.integer(zlabels) else zlabels
p.eta <- if(show.last) x$Scores$last.eta else x$Scores$post.eta
eta1st <- if(plot.meth == "all" || !gx) 1L else which.min(grp.size > 0)
if(g.score) {
if(g.ind == eta1st) tmplab <- labs
z.ind <- tmplab %in% g
plot.x <- p.eta[z.ind,,drop=FALSE]
ind2 <- ifelse(any(!facx, Q <= 1), ind[2L], if(Q > 1) max(2L, ind[2L]))
if(ci.sw[param]) ci.x <- x$Scores$ci.eta[,z.ind,, drop=FALSE]
labs <- g
n.eta <- grp.size[g]
} else {
plot.x <- p.eta
ind2 <- ifelse(any(!facx, Q.max <= 1), ind[2L], if(Q.max > 1) max(2L, ind[2L]))
if(ci.sw[param]) ci.x <- x$Scores$ci.eta
n.eta <- n.obs
}
if(isTRUE(heat.map)) {
if(titles && !all.ind) graphics::par(mar=c(5.1, 4.1, 4.1, 3.1))
if(g.ind == eta1st) {
sxx <- mat2cols(p.eta, cols=hcols, na.col=graphics::par()$bg, ...)
sxx <- if(g.score) lapply(split(sxx, factor(clust$MAP, levels=Gseq)), matrix, ncol=ncol(sxx)) else sxx
pxx <- range(p.eta)
}
pxx <- if(g.score) range(plot.x) else pxx
plot_cols(if(g.score) sxx[[g]] else sxx, ...)
if(!is.element(Q.max, c(1, Q)) && all(plot.meth != "all", !common)) graphics::abline(v=Q + 0.5, lty=2, lwd=2)
if(titles) {
graphics::title(main=list(paste0(post.last, ifelse(!all.ind, " Scores ", " "), "Heatmap", ifelse(all(!all.ind, grp.ind, !common), paste0(" - Cluster ", g), ""))))
if(all.ind || common) {
graphics::axis(1, line=-0.5, tick=FALSE, at=Qmseq, labels=Qmseq)
} else {
graphics::axis(1, line=-0.5, tick=FALSE, at=Qmseq, labels=replace(Qmseq, Q, NA))
if(Q > 0) {
graphics::axis(1, line=-0, tick=FALSE, at=Q, labels=substitute(paste(hat(q)['g'], " = ", Q), list(Q=Q)), cex.axis=1.5)
} else message("Estimated number of columns in corresponding loadings matrix was zero\n")
}
suppressWarnings(heat_legend(data=pxx, cols=hcols, cex.lab=0.8, ...))
if(Q.max != 1) {
absq <- seq(from=graphics::par("usr")[1L], to=graphics::par("usr")[2L], length.out=Q.max + 1L)
graphics::abline(v=absq[-c(1L, length(absq))], lty=2, lwd=1, col=grey)
}
}
graphics::box(lwd=2)
graphics::mtext(ifelse(Q.max > 1, "Factors", "Factor"), side=1, line=2)
} else {
if((mispal && G >= 2) || !z.miss) grDevices::palette(viridis(ifelse(z.miss, max(G, 2L), length(unique(labs))), option="D", alpha=transparency))
col.s <- if(is.factor(labs)) as.integer(levels(labs))[labs] else labs
type.s <- ifelse(any(type.x, type == "l"), "p", type)
if(ind2 != 1) {
if(all(intervals, ci.sw[param])) {
suppressWarnings(.plot_CI(plot.x[,ind[1L]], plot.x[,ind2], li=ci.x[1L,,ind2], ui=ci.x[2L,,ind2], gap=TRUE, pch=NA, scol=grey, slty=3, xlab=paste0("Factor ", ind[1L]), ylab=paste0("Factor ", ind2)))
suppressWarnings(.plot_CI(plot.x[,ind[1L]], plot.x[,ind2], li=ci.x[1L,,ind[1L]], ui=ci.x[2L,,ind[1L]], add=TRUE, gap=TRUE, pch=NA, scol=grey, slty=3, err="x"))
if(type.s != "n") graphics::points(plot.x[,ind[1L]], plot.x[,ind2], type=type.s, col=col.s, pch=20)
} else {
base::plot(plot.x[,ind[1L]], plot.x[,ind2], type=type.s, col=col.s, pch=20, cex=0.8,
xlab=paste0("Factor ", ind[1L]), ylab=paste0("Factor ", ind2))
}
if(type.s == "n") graphics::text(plot.x[,ind[1L]], plot.x[,ind2], obs.names, col=col.s, cex=0.5)
} else {
if(all(intervals, ci.sw[param])) {
suppressWarnings(.plot_CI(if(g.score) seq_len(grp.size[g]) else seq_len(n.obs), plot.x[,ind[1L]], li=ci.x[1L,,ind[1L]], ui=ci.x[2L,,ind[1L]], gap=TRUE, pch=NA, scol=grey, slty=3, xlab="Observation", ylab=paste0("Factor ", ind[1L])))
graphics::points(plot.x[,ind[1L]], type=type.s, col=col.s, pch=20)
} else {
base::plot(plot.x[,ind[1L]], type=type.s, col=col.s, xlab="Observation", ylab=paste0("Factor ", ind[1L]), pch=20)
}
if(type.s == "n") graphics::text(plot.x[,ind[1L]], col=col.s, cex=0.5)
}
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", ":\nScores"), ifelse(g.score, paste0(" - Cluster ", g), ""))))
}
}
if(param == "loadings") {
plot.x <- if(show.last) x$Loadings$last.load else x$Loadings$post.load
if(ci.sw[param]) ci.x <- x$Loadings$ci.load
if(g == min(Gs[nLx[Gs]]) && isTRUE(common)) {
if(isTRUE(heat.map)) {
if(any(Qs == 0)) {
lxx <- vector("list", G)
lxx[nLx] <- mat2cols(Filter(Negate(is.null), plot.x), cols=hcols, compare=G > 1, na.col=graphics::par()$bg, ...)
} else {
lxx <- mat2cols(if(G > 1) plot.x else plot.x[[g]], cols=hcols, compare=G > 1, na.col=graphics::par()$bg, ...)
}
} else {
cixx <- if(all(intervals, ci.sw[param], !heat.map)) { if(by.fac) range(vapply(Filter(Negate(is.null), ci.x), function(x) range(x[,,ind[2L]]), numeric(2L))) else range(vapply(Filter(Negate(is.null), ci.x), function(x) range(x[,ind[1L],]), numeric(2L))) }
}
pxx <- range(vapply(Filter(Negate(is.null), plot.x), range, na.rm=TRUE, numeric(2L)))
}
if(!nLx[g]) { break
} else if(!common) {
if(isTRUE(heat.map)) {
lxx <- mat2cols(plot.x[[g]], cols=hcols, compare=FALSE, na.col=graphics::par()$bg, ...)
} else {
cixx <- if(all(intervals, ci.sw[param], !heat.map)) { if(by.fac) range(ci.x[[g]][,,ind[2L]]) else range(ci.x[[g]][,ind[1L],]) }
}
pxx <- range(plot.x[[g]])
}
if(isTRUE(heat.map)) {
if(titles && !all.ind) graphics::par(mar=c(5.1, 4.1, 4.1, 3.1))
plot_cols(if(G > 1 && isTRUE(common)) lxx[[g]] else lxx, ...)
if(titles) {
graphics::title(main=list(paste0(post.last, ifelse(!all.ind, " Loadings ", " "), "Heatmap", ifelse(all(!all.ind, grp.ind), paste0(" - Cluster ", g), ""))))
graphics::axis(1, line=-0.5, tick=FALSE, at=seq_len(Q), labels=seq_len(Q))
if(n.var < 100) {
graphics::axis(2, cex.axis=0.5, line=-0.5, tick=FALSE, las=1, at=seq_len(n.var), labels=substring(var.names[n.var:1L], 1L, 11L))
}
suppressWarnings(heat_legend(data=pxx, cols=hcols, cex.lab=0.8, ...))
}
graphics::box(lwd=2)
graphics::mtext(ifelse(Q > 1, "Factors", "Factor"), side=1, line=2, cex=0.8)
if(Q != 1 && titles) {
absq <- seq(from=graphics::par("usr")[1L], to=graphics::par("usr")[2L], length.out=Q + 1)
graphics::abline(v=absq[-c(1L, length(absq))], lty=2, lwd=1, col=grey)
}
} else {
plot.x <- plot.x[[g]]
if(ci.sw[param]) ci.x <- ci.x[[g]]
if(!by.fac) {
if(ci.sw[param]) ci.x <- as.matrix(ci.x[,ind[1L],])
base::plot(plot.x[ind[1L],], type=type, xaxt="n", xlab="", ylab="Loading", ylim=if(all(intervals, ci.sw[param])) cixx else pxx, lend=1)
if(all(intervals, ci.sw[param])) suppressWarnings(.plot_CI(plot.x[ind[1L],], li=ci.x[1L,], ui=ci.x[2L,], slty=3, scol=grey, add=TRUE, gap=TRUE, pch=ifelse(type == "n", NA, 20)))
graphics::axis(1, line=-0.5, tick=FALSE, at=seq_len(Q), labels=seq_len(Q))
graphics::mtext("Factors", side=1, line=2)
if(titles) graphics::title(main=list(paste0(post.last, ":\n", ifelse(!all.ind, paste0("Loadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), "")), ""), var.names[ind[1L]], " Variable")))
if(type == "n") graphics::text(x=plot.x[ind[1L],], paste0("Factor ", seq_len(Q)), cex=0.5)
} else {
if(ci.sw[param]) ci.x <- as.matrix(ci.x[,,ind[2L]])
base::plot(plot.x[,ind[2L]], type=type, xaxt="n", xlab="", ylab="Loading", ylim=if(all(intervals, ci.sw[param])) cixx else pxx, lend=1)
if(all(intervals, ci.sw[param])) suppressWarnings(.plot_CI(plot.x[,ind[2L]], li=ci.x[1L,], ui=ci.x[2L,], slty=3, scol=grey, add=TRUE, gap=TRUE, pch=ifelse(type == "n", NA, 20)))
graphics::axis(1, line=-0.5, tick=FALSE, at=seq_len(n.var), labels=seq_len(n.var))
graphics::mtext("Variable #", side=1, line=2, cex=0.8)
if(titles) graphics::title(main=list(paste0(post.last, ":\n", ifelse(!all.ind, paste0("Loadings - ", ifelse(grp.ind, paste0("Cluster ", g, " - "), "")), ""), "Factor ", ind[2L])))
if(type == "n") graphics::text(x=plot.x, var.names, cex=0.5)
}
}
}
if(param == "uniquenesses") {
x.plot <- if(show.last) x$Uniquenesses$last.psi else x$Uniquenesses$post.psi
plot.x <- x.plot[,g]
if(ci.sw[param]) ci.x <- x$Uniquenesses$ci.psi
if(g == min(Gs) && isTRUE(common)) {
pxx <- c(0, max(vapply(x.plot, max, numeric(1L))))
cixx <- if(all(intervals, ci.sw[param])) c(0, max(vapply(ci.x, max, numeric(1L))))
} else if(!common) {
pxx <- c(0, max(plot.x))
cixx <- if(all(intervals, ci.sw[param])) c(0, max(ci.x[[g]]))
}
if(ci.sw[param]) ci.x <- ci.x[[g]]
base::plot(plot.x, type=type, ylab="", xlab="Variable", ylim=if(all(intervals, ci.sw[param])) cixx else pxx, lend=1)
if(all(intervals, ci.sw[param])) suppressWarnings(.plot_CI(plot.x, li=ci.x[,1L], ui=ci.x[,2L], slty=3, scol=grey, add=TRUE, gap=TRUE, pch=ifelse(type == "n", NA, 20)))
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", paste0(":\nUniquenesses", ifelse(grp.ind, paste0(" - Cluster ", g), ""))))))
if(type == "n") graphics::text(seq_along(plot.x), plot.x, var.names, cex=0.5)
}
if(param == "pis") {
plot.x <- if(show.last) clust$last.pi else clust$post.pi
if(ci.sw[param]) ci.x <- clust$ci.pi
if(matx) {
if(all(intervals, ci.sw[param])) {
suppressWarnings(.plot_CI(graphics::barplot(plot.x, ylab="", xlab="", col=grey, ylim=c(0, 1), cex.names=0.7),
plot.x, li=ci.x[,1L], ui=ci.x[,2L], slty=3, scol="red", add=TRUE, gap=TRUE, pch=20))
} else {
graphics::barplot(plot.x, ylab="", xlab="", ylim=c(0, 1), cex.names=0.7)
}
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", paste0(":\nMixing Proportions")))))
} else {
if(all(intervals, ci.sw[param])) {
suppressWarnings(.plot_CI(graphics::barplot(plot.x[ind], ylab="", xlab="", ylim=c(0, 1), cex.names=0.7),
plot.x[ind], li=ci.x[ind,1L], ui=ci.x[ind,2L], slty=3, scol="red", add=TRUE, gap=TRUE, pch=20))
} else {
graphics::barplot(plot.x[ind], ylab="", xlab="Variable", ylim=c(0, 1), cex.names=0.7)
}
if(titles) graphics::title(main=list(paste0(post.last, ifelse(all.ind, "", paste0(":\nMixing Proportions - Cluster ", ind)))))
}
}
if(is.element(param, c("alpha", "discount"))) {
if(param == "discount" &&
attr(x, "Kappa0")) message(paste0("Spike-and-slab prior not invoked as alpha was fixed <= 0 (alpha=", attr(x, "Alpha"), ")\n"))
base::plot(c(0, 1), c(0, 1), ann=FALSE, bty='n', type='n', xaxt='n', yaxt='n')
if(titles) graphics::title(main=list(paste0("Summary Statistics", ifelse(all.ind, "", paste0(":\n", switch(EXPR=param, alpha="Alpha", discount="Discount"))))))
plot.x <- switch(EXPR=param, alpha=clust$Alpha[-1L], discount=clust$Discount[-1L])
x.step <- switch(EXPR=param, alpha=attr(x, "Alph.step"), discount=attr(x, "Disc.step"))
conf <- attr(x, "Conf.Level")
digits <- options()$digits
MH <- switch(EXPR=param, alpha=is.element(method, c("OMFA", "OMIFA")) || plot.x$alpha.rate != 1, discount=plot.x$disc.rate != 1)
a.adj <- rep(0.5, 2)
a.cex <- graphics::par()$fin[2L]/ifelse(MH, 4, 3)
pen <- ifelse(MH, 0, 0.15)
tz <- isTRUE(attr(x, "TuneZeta"))
y1 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.9, 0.85), discount=0.9), 0.925) - pen/3
y2 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.725, 0.675), discount=0.725), 0.825) - pen/3
y3 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.6125, 0.55), discount=0.6125), 0.7625) - pen
y4 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.5375, 0.4875), discount=0.55), 0.7125) - pen * 5/4
y5 <- ifelse(MH, switch(EXPR=param, alpha=ifelse(tz, 0.2, 0.1375), discount=0.2125), 0.1375)
y6 <- y5 + 0.0125
graphics::text(x=0.5, y=y1, cex=a.cex, col="black", adj=a.adj, expression(bold("Posterior Mean:\n")))
graphics::text(x=0.5, y=y1, cex=a.cex, col="black", adj=a.adj, bquote(.(round(switch(EXPR=param, alpha=plot.x$post.alpha, discount=plot.x$post.disc), digits))))
graphics::text(x=0.5, y=y2 - pen, cex=a.cex, col="black", adj=a.adj, expression(bold("\nVariance:\n")))
graphics::text(x=0.5, y=y2 - pen, cex=a.cex, col="black", adj=a.adj, bquote(.(round(switch(EXPR=param, alpha=plot.x$var.alpha, discount=plot.x$var.disc), digits))))
graphics::text(x=0.5, y=y3 - pen, cex=a.cex, col="black", adj=a.adj, bquote(bold(.(100 * conf)) * bold("% Credible Interval:")))
graphics::text(x=0.5, y=y4 - pen, cex=a.cex, col="black", adj=a.adj, bquote(paste("[", .(round(switch(EXPR=param, alpha=plot.x$ci.alpha[1L], discount=plot.x$ci.disc[1L]), digits)), ", ", .(round(switch(EXPR=param, alpha=plot.x$ci.alpha[2L], discount=plot.x$ci.disc[2L]), digits)), "]")))
graphics::text(x=0.5, y=y5, cex=a.cex, col="black", adj=a.adj, expression(bold("Last Valid Sample:\n")))
graphics::text(x=0.5, y=y6, cex=a.cex, col="black", adj=a.adj, bquote(.(round(switch(EXPR=param, alpha=plot.x$last.alpha, discount=plot.x$last.disc), digits))))
if(isTRUE(MH)) {
rate <- switch(EXPR=param, alpha="Acceptance Rate:", discount=paste0(ifelse(attr(x, "Kappa0"), "Acceptance", "Mutation"), " Rate:"))
y7 <- switch(EXPR=param, alpha=ifelse(tz, 0.4375, 0.3625), discount=0.4375)
y8 <- switch(EXPR=param, alpha=ifelse(tz, 0.375, 0.3125), discount=0.375)
graphics::text(x=0.5, y=y7, cex=a.cex, col="black", adj=a.adj, substitute(bold(rate)))
graphics::text(x=0.5, y=y8, cex=a.cex, col="black", adj=a.adj, bquote(paste(.(round(100 * switch(EXPR=param, alpha=plot.x$alpha.rate, discount=plot.x$disc.rate), 2L)), "%")))
}
if(param == "discount") {
graphics::text(x=0.5, y=0.1275, cex=a.cex, col="black", adj=a.adj, bquote(bold(hat(kappa)) * bold(" - Posterior Proportion of Zeros:")))
graphics::text(x=0.5, y=0.0575, cex=a.cex, col="black", adj=a.adj, bquote(.(round(plot.x$post.kappa, digits))))
}
if(param == "alpha" && tz) {
graphics::text(x=0.5, y=0.1275, cex=a.cex, col="black", adj=a.adj, bquote(bold(hat(zeta)) * bold(" - Posterior Mean Zeta:")))
graphics::text(x=0.5, y=0.0575, cex=a.cex, col="black", adj=a.adj, bquote(.(round(plot.x$avg.zeta, digits))))
}
}
if(!indx) { ind[1L] <- xind[1L]
if(all(facx, is.element(param, c("scores",
"loadings")))) ind[2L] <- xind[2L]
}
if(all.ind) ind <- xxind
}
if(m.sw["G.sw"]) {
plotG.ind <- is.element(method, c("IMIFA", "IMFA", "OMIFA", "OMFA"))
plotQ.ind <- adapt <- any(is.element(method, c("IFA", "MIFA")), all(is.element(method, c("IMIFA", "OMIFA")), g == 2))
plotT.ind <- any(all(g == 2, is.element(method, c("IMFA", "OMFA"))), all(is.element(method, c("IMIFA", "OMIFA")), g == 3))
if(!(critx <- is.null(crit <- GQ.res$Criteria))) {
aicm <- round(crit$AICMs, 2L)
bicm <- round(crit$BICMs, 2L)
dic <- round(crit$DICs, 2L)
if(is.element(method, c("FA", "MFA", "OMFA", "IMFA"))) {
aic.mcmc <- round(crit$AIC.mcmcs, 2L)
bic.mcmc <- round(crit$BIC.mcmcs, 2L)
}
}
if(all(plotG.ind, g == 1)) {
graphics::layout(1)
graphics::par(mar=c(5.1, 4.1, 4.1, 2.1))
plot.G <- GQ.res$G.Counts
G.name <- names(plot.G)
rangeG <- as.numeric(G.name)
rangeG <- seq(from=min(rangeG), to=max(rangeG), by=1)
missG <- setdiff(rangeG, G.name)
missG <- stats::setNames(rep(NA, length(missG)), as.character(missG))
plot.G <- c(plot.G, missG)
plot.G <- plot.G[order(as.numeric(names(plot.G)))]
col.G <- c(1L, ceiling(length(palette)/2))[(rangeG == G) + 1L]
G.plot <- graphics::barplot(plot.G, ylab="Frequency", xaxt="n", col=col.G)
if(titles) graphics::title(main=list("Posterior Distribution of G"))
graphics::axis(1, at=G.plot, labels=names(plot.G), tick=FALSE)
graphics::axis(1, at=Median(G.plot), labels="G", tick=FALSE, line=1.5)
}
if(plotQ.ind) {
if(method == "IFA") {
graphics::layout(1)
graphics::par(mar=c(5.1, 4.1, 4.1, 2.1))
plot.Q <- GQ.res$Q.Counts
Q.name <- names(plot.Q)
rangeQ <- as.numeric(Q.name)
rangeQ <- seq(from=min(rangeQ), to=max(rangeQ), by=1)
missQ <- setdiff(rangeQ, Q.name)
missQ <- stats::setNames(rep(NA, length(missQ)), as.character(missQ))
plot.Q <- c(plot.Q, missQ)
plot.Q <- plot.Q[order(as.numeric(names(plot.Q)))]
col.Q <- c(1L, ceiling(length(palette)/2))[(rangeQ == Q) + 1L]
Q.plot <- graphics::barplot(plot.Q, ylab="Frequency", xaxt="n", col=col.Q)
if(titles) graphics::title(main=list("Posterior Distribution of Q"))
graphics::axis(1, at=Q.plot, labels=names(plot.Q), tick=FALSE)
graphics::axis(1, at=Median(Q.plot), labels="Q", tick=FALSE, line=1.5)
} else {
if(mispal) grDevices::palette(viridis(max(G, 2L), option="D", alpha=transparency))
graphics::layout(1)
graphics::par(mar=c(5.1, 4.1, 4.1, 2.1))
plot.Q <- GQ.res$Q.Counts
plot.Q <- if(inherits(plot.Q, "listof")) plot.Q else list(plot.Q)
Q.name <- lapply(plot.Q, names)
rangeQ <- as.numeric(unique(unlist(Q.name, use.names=FALSE)))
rangeQ <- seq(from=min(rangeQ), to=max(rangeQ), by=1)
missQ <- lapply(Gseq, function(g) setdiff(rangeQ, as.numeric(Q.name[[g]])))
missQ <- lapply(Gseq, function(g) stats::setNames(rep(NA, length(missQ[[g]])), as.character(missQ[[g]])))
plot.Q <- lapply(Gseq, function(g) c(plot.Q[[g]], missQ[[g]]))
plot.Q <- do.call(rbind, lapply(Gseq, function(g) plot.Q[[g]][order(as.numeric(names(plot.Q[[g]])))]))
if(titles) {
graphics::layout(rbind(1, 2), heights=c(9, 1))
graphics::par(mar=c(3.1, 4.1, 4.1, 2.1))
}
Q.plot <- graphics::barplot(plot.Q, beside=TRUE, ylab="Frequency", xaxt="n", col=Gseq, space=c(0, 2))
if(titles) graphics::title(main=list(expression('Posterior Distribution of Q'["g"])))
graphics::axis(1, at=Rfast::colMedians(Q.plot), labels=colnames(plot.Q), tick=FALSE)
graphics::axis(1, at=Median(Q.plot), labels="Q", tick=FALSE, line=1)
if(titles) {
graphics::par(mar=c(0, 0, 0, 0))
graphics::plot.new()
tmp <- if(G > 5) unlist(lapply(Gseq, function(g) c(Gseq[g], Gseq[g + ceiling(G/2)])))[Gseq] else Gseq
ltxt <- paste0("Cluster ", tmp)
lcol <- Gseq[tmp]
graphics::legend("center", legend=ltxt, ncol=if(G > 5) ceiling(G/2) else G, bty="n", pch=15, col=lcol, cex=max(0.7, 1 - 0.03 * G))
}
}
adapt <- attr(x, "Adapt") && length(unique(plot.Q[!is.na(plot.Q)])) != 1
}
if(plotT.ind) {
graphics::layout(1)
graphics::par(mar=c(5.1, 4.1, 4.1, 2.1))
col.G <- c(ceiling(length(palette)/2), 1)
x.plot <- GQ.res$Stored.G
plot.x <- if(is.element(method, c("IMFA", "IMIFA"))) t(x.plot) else cbind(as.vector(x.plot), rep(attr(x, "range.G"), ncol(x.plot)))
graphics::matplot(plot.x, type="l", col=palette[col.G], ylab="G", xlab="Iteration", main="", lty=if(is.element(method, c("IMFA", "IMIFA"))) 1 else seq_len(2L), ylim=c(0, max(plot.x)), las=1, yaxt="n")
g.axis <- pretty(c(0L, max(plot.x)))
g.axis <- unique(c(1L, g.axis[g.axis != 0]))
graphics::axis(2, at=g.axis, labels=g.axis, las=1)
if(titles) {
graphics::title(main=list("Trace: \n\n"))
graphics::title(expression("Active" * phantom(" and Non-empty Clusters")), col.main = palette[1L])
graphics::title(expression(phantom("Active ") * "and" * phantom(" Non-empty Clusters")), col.main="black")
graphics::title(expression(phantom("Active and ") * "Non-empty" * phantom(" Clusters")), col.main = palette[col.G[1L]])
graphics::title(expression(phantom("Active and Non-empty ") * "Clusters"), col.main="black")
if(length(unique(plot.x[,1L])) > 1) {
G.ci <- GQ.res$G.CI
graphics::lines(x=c(0, nrow(plot.x)), y=rep(G, 2), col=length(palette), lty=2, lwd=1)
if(G.ci[1L] != G) graphics::lines(x=c(0, nrow(plot.x)), y=rep(G.ci[1L], 2), lty=2, lwd=0.5, col=grey)
if(G.ci[2L] != G) graphics::lines(x=c(0, nrow(plot.x)), y=rep(G.ci[2L], 2), lty=2, lwd=0.5, col=grey)
}
}
}
if(!any(plotQ.ind,
plotG.ind, plotT.ind)) message(paste0("Nothing to plot", switch(EXPR=method, FA=paste0(": Q = ", Q, "\n"), "\n")))
gq.nam <- toupper(substring(names(GQ.res), 1L, 1L))
if(is.element(method, c("IMIFA", "OMIFA"))) {
if(g == 1) {
print(GQ.res[gq.nam == "G"])
} else if(g == 2) {
if(adapt) {
print(if(attr(x, "C.Shrink")) GQ.res[gq.nam == "Q" | gq.nam == "P"] else GQ.res[gq.nam == "Q"])
#print(if(attr(x, "C.Shrink") || attr(x, "G.shrink")) GQ.res[gq.nam == "Q" | gq.nam == "P"] else GQ.res[gq.nam == "Q"])
} else print(GQ.res[gq.nam == "P"])
} else if(g == 3 && !critx) {
print(GQ.res[gq.nam == "C"])
}
} else if(is.element(method, c("OMFA", "IMFA"))) {
if(g == 1) {
print(GQ.res[gq.nam == "G"])
} else if(!critx) {
print(GQ.res[gq.nam != "G" & gq.nam != "S"])
}
} else if(!critx) {
switch(EXPR=method, MFA= {
print(GQ.res[gq.nam != "S"])
}, MIFA={
if(adapt) {
print(GQ.res[gq.nam != "S"])
} else {
print(GQ.res[gq.nam != "Q" & gq.nam != "S"])
}
}, IFA= {
if(adapt) {
print(GQ.res[gq.nam != "S"][-1L])
} else {
print(GQ.res[gq.nam == "C"])
}
})
}
if(all(g == max(Gs), !critx && any(dim(bicm) > 1))) {
G.ind <- if(any(G.supp, !is.element(method, c("MFA", "MIFA")))) 1L else n.grp == G
Q.ind <- if(any(Q.supp, !is.element(method, c("FA", "MFA")))) 1L else n.fac == Q
if(!is.element(method, c("IFA", "MIFA"))) {
cat(paste0("AIC.mcmc = ", aic.mcmc[G.ind,Q.ind], "\n"))
cat(paste0("BIC.mcmc = ", bic.mcmc[G.ind,Q.ind], "\n"))
}
cat(paste0("AICM = ", aicm[G.ind,Q.ind], "\n"))
cat(paste0("BICM = ", bicm[G.ind,Q.ind], "\n"))
cat(paste0("DIC = ", dic[G.ind,Q.ind], "\n\n"))
}
if(!isTRUE(attr(x, "Nowarn.G"))) { cat("\n"); message(attr(x, "Nowarn.G"))
}
if(!isTRUE(attr(x, "Nowarn.Q"))) {
if(isTRUE(attr(x, "Nowarn.G"))) { cat("\n")}
if(!is.element(method,
c("OMFA", "IMFA")) || plotT.ind) message(attr(x, "Nowarn.Q"))
}
if(plotQ.ind) {
if(!adapt) message("No adaptation took place\n")
forceQg <- attr(x, "ForceQg")
if(attr(GQ.res, "Q.big")) warning(paste0("Q had to be prevented from exceeding its initial value", ifelse(forceQg, " (or exceeding the number of observations in one or more clusters)", ""), ".\nConsider re-running the model with a higher value for 'range.Q'", ifelse(forceQg, " or setting 'forceQg' to FALSE\n", "\n")), call.=FALSE)
}
}
if(m.sw["Z.sw"]) {
if(type == "l") stop("'type' cannot be 'l' for clustering uncertainty plots", call.=FALSE)
plot.x <- as.vector(clust$uncertainty)
if(g == 1 || g == 2) {
graphics::layout(1)
oneG <- 1/G
minG <- 1 - oneG
yax <- unique(c(0, pretty(c(0, minG))))
YAX <- which.min(abs(yax - minG))
yax[YAX] <- minG
yax <- abs(yax[yax < 1])
mind <- !is.null(prf) && !z.miss
}
if(g == 1) {
if(mispal) grDevices::palette(replace(viridis(8L, option="D", alpha=transparency), 2L, "red"))
col.x <- if(mind) replace(rep(5L, n.obs), prf$misclassified, 2L) else c(5L, 2L)[(plot.x >= oneG) + 1L]
if(type != "n") col.x[plot.x == 0] <- NA
graphics::par(mar=c(5.1, 4.1, 4.1, 3.1))
base::plot(plot.x, type=type, ylim=range(yax), col=col.x, yaxt="n", main="Clustering Uncertainty", ylab="Uncertainty", xlab="Observation", pch=ifelse(type == "n", NA, 16), lend=1)
graphics::lines(x=c(0, n.obs), y=c(oneG, oneG), lty=2, col=1)
graphics::axis(2, at=yax, labels=replace(yax, YAX, expression(1 - frac(1, hat(G)))), las=2, cex.axis=0.9, xpd=TRUE)
graphics::axis(2, at=oneG, labels=expression(frac(1, hat(G))), las=2, xpd=TRUE, side=4, xpd=TRUE)
if(type == "n") {
znam <- obs.names
znam[plot.x == 0] <- ""
graphics::text(x=seq_along(plot.x), y=plot.x, znam, col=col.x, cex=0.5)
}
} else if(g == 2) {
graphics::par(mar=c(5.1, 4.1, 4.1, 3.1))
x.ord <- order(plot.x)
x.plot <- plot.x[x.ord]
if(mind) mcO <- which(x.ord %in% prf$misclassified)
base::plot(x.plot, type="n", ylim=c(-max(x.plot)/32, max(yax)), main="Clustering Uncertainty Profile", ylab="Uncertainty", xaxt="n", yaxt="n", xlab="Observations in order of increasing uncertainty")
graphics::lines(x=c(0, n.obs), y=c(0, 0), lty=3, col=grey)
graphics::lines(x.plot)
graphics::points(x.plot, pch=15, cex=if(mind) replace(rep(0.5, n.obs), mcO, 0.75) else 0.5, col=if(mind) replace(rep(1, n.obs), mcO, 3) else 1)
graphics::lines(x=c(0, n.obs), y=c(oneG, oneG), lty=2, col=3)
graphics::axis(2, at=yax, labels=replace(yax, YAX, expression(1 - frac(1, hat(G)))), las=2, cex.axis=0.9, xpd=TRUE)
graphics::axis(2, at=oneG, labels=expression(frac(1, hat(G))), las=2, xpd=TRUE, side=4, xpd=TRUE)
if(mind) {
Nseq <- seq_len(n.obs)
for(i in prf$misclassified) {
x <- Nseq[x.ord == i]
graphics::lines(c(x, x), c(-max(plot.x)/32, plot.x[i]), lty=1, col=3, lend=1)
}
}
} else if(g == 3) {
if(titles) {
graphics::layout(rbind(1, 2), heights=c(1, 6))
graphics::par(mar=c(0, 4.1, 0.5, 2.1))
graphics::plot.new()
graphics::legend("center", legend=bquote({NA >= 1/hat(G)} == 1/.(G)), title="", pch=15, col=3, bty="n", y.intersp=graphics::par()$fin[2L] * 7/5)
graphics::legend("center", legend=c(" "," "), title=expression(bold("Clustering Uncertainty")), bty='n', y.intersp=graphics::par()$fin[2L] * 2/5, cex=graphics::par()$cex.main)
graphics::par(mar=c(5.1, 4.1, 0.5, 2.1))
}
x.plot <- graphics::hist(plot.x, plot=FALSE)
breaks <- if(sum(plot.x != 0)) x.plot$breaks else seq(from=0, to=max(plot.x, 1/G), by=1/G)
cols <- 2L + (breaks >= 1/G)
cols[cols == 2] <- grey
base::plot(x.plot, main="", xlab="Uncertainties", xlim=c(0, 1 - 1/G), col=cols, xaxt="n", ylim=c(0, max(x.plot$counts)), yaxt="n")
graphics::axis(1, at=c(breaks[round(breaks, 1) < min(0.8, 1 - 1/G)], 1 - 1/G), labels=(c(round(breaks[round(breaks, 1) < min(0.8, 1 - 1/G)], 3), expression(1 - frac(1, hat(G))))), las=2, pos=0, cex.axis=0.8)
graphics::axis(2, at=if(sum(plot.x) == 0) c(graphics::axTicks(2), max(x.plot$counts)) else graphics::axTicks(2), las=1, cex.axis=0.8)
} else if(g == 4) {
graphics::par(defpar)
if(titles) graphics::par(mar=c(4.1, 4.1, 4.1, 4.1))
plot.x <- clust$PCM
i.check <- any(!mispal, (!gx && !all(Gs == 4)))
if(brX) {
ilen <- length(brXs)
if(i.check &&
(length(palette) !=
length(brXs))) warning("'breaks' and 'palette' should be the same length if 'breaks' is supplied\n", call.=FALSE, immediate.=TRUE)
} else ilen <- 18L
i.cols <- if(i.check) palette else grDevices::heat.colors(ilen, rev=TRUE)
PCM <- mat2cols(plot.x, cols=i.cols, na.col=graphics::par()$bg, ...)
plot_cols(replace(PCM, plot.x == 0, NA), na.col=graphics::par()$bg, ...)
if(titles) {
graphics::title(main="Posterior Confusion Matrix")
graphics::mtext(side=1, at=Gseq, Gseq, line=1)
graphics::mtext(side=2, at=Gseq, rev(Gseq), line=1, las=1)
graphics::mtext(side=1, "Cluster", line=2)
graphics::mtext(side=2, "Allocation", line=2)
suppressWarnings(heat_legend(plot.x, cols=i.cols, cex.lab=0.8, ...))
}
graphics::box(lwd=2)
}
if(all(g == 5, z.sim)) {
plot.x <- as.matrix(clust$Z.avgsim$z.sim)
perm <- order(clust$MAP)
plot.x <- if((p.ind <- !identical(perm, clust$MAP))) plot.x[perm,perm] else plot.x
plot.x <- t(plot.x[,seq(from=ncol(plot.x), to=1L, by=-1L)])
graphics::par(defpar)
if(titles) graphics::par(mar=c(4.1, 4.1, 4.1, 4.1))
z.check <- any(!mispal, (!gx && !all(Gs == 5)))
if(brX) {
zlen <- length(brXs)
if(z.check &&
(length(palette) !=
length(brXs))) warning("'breaks' and 'palette' should be the same length if 'breaks' is supplied\n", call.=FALSE, immediate.=TRUE)
} else zlen <- 12L
z.col <- if(any(!mispal, (!gx && !all(Gs == 5)))) palette else grDevices::heat.colors(zlen, rev=TRUE)
col.mat <- mat2cols(plot.x, cols=z.col, na.col=graphics::par()$bg, ...)
col.mat[plot.x == 0] <- NA
plot_cols(col.mat, na.col=graphics::par()$bg, ...)
if(titles) {
graphics::title(main=list("Average Similarity Matrix"))
graphics::axis(1, at=n.obs/2, labels=paste0("Observation 1:N", if(p.ind) " (Reordered)"), tick=FALSE)
graphics::axis(2, at=n.obs/2, labels=paste0("Observation 1:N", if(p.ind) " (Reordered)"), tick=FALSE)
suppressWarnings(heat_legend(data=plot.x, cols = z.col, cex.lab=0.8, ...))
}
graphics::box(lwd=2)
if(p.ind) message("Rows and columns of similarity matrix reordered to correspond to MAP clustering\n")
}
if(g == min(Gs)) {
if(all(labelmiss,
z.miss)) {
cat("clustering table :")
print(table(clust$MAP), row.names=FALSE)
cat("\n")
}
if(g <= 3 &&
!is.null(prf)) {
class(prf) <- "listof"
print(prf)
}
}
}
if(m.sw["P.sw"]) {
plot.x <- switch(EXPR=param,
means= if(show.last) x$Means$last.mu else x$Means$post.mu,
uniquenesses= if(show.last) x$Uniquenesses$last.psi else x$Uniquenesses$post.psi,
loadings = if(show.last) x$Loadings$last.load[[g]] else x$Loadings$post.load[[g]])
plot.x <- switch(EXPR=param, loadings=plot.x[,rev(seq_len(Q)), drop=FALSE], plot.x)
x.plot <- rowRanges(plot.x, na.rm=TRUE)
plot.x <- if(param == "uniquenesses" && is.element(uni.type, c("isotropic", "single"))) plot.x else apply(plot.x, 2L, function(x) (x - min(x, na.rm=TRUE))/(max(x, na.rm=TRUE) - min(x, na.rm=TRUE)))
varnam <- paste0(toupper(substr(param, 1L, 1L)), substr(param, 2L, nchar(param)))
if(any(grp.ind, param == "loadings")) {
if(mispal) grDevices::palette(viridis(max(switch(EXPR=param, loadings=Q, G), 2L), option="D", alpha=transparency))
graphics::layout(rbind(1, 2), heights=c(9, 1))
graphics::par(mar=c(3.1, 4.1, 4.1, 2.1))
}
jitcol <- switch(EXPR=param, loadings=Q, G)
jit.x <- G == 1 || (param == "uniquenesses" && uni.type == "constrained")
type.u <- ifelse(type.x, switch(EXPR=param, uniquenesses=switch(EXPR=uni.type, constrained=, unconstrained="p", single=, isotropic="l"), "p"), type)
if(!is.element(type.u,
c("l", "p"))) stop("Invalid 'type' for parallel coordinates plot", call.=FALSE)
graphics::matplot(seq_len(n.var) + if(!jit.x) switch(EXPR=type.u, p=matrix(stats::rnorm(jitcol * n.var, 0, min(0, max(1e-02, 1/n.var^2))), nrow=n.var, ncol=jitcol), 0) else 0,
plot.x, type=type.u, pch=15, col=switch(EXPR=param, loadings=rev(seq_len(Q)), seq_len(G)), xlab=switch(EXPR=uni.type, constrained=, unconstrained="Variable", ""),
lty=1, ylab=paste0(switch(EXPR=param, uniquenesses=switch(EXPR=uni.type, constrained=, unconstrained="Standardised ", ""), "Standardised "), varnam),
xaxt="n", bty="n", main=paste0("Parallel Coordinates - ", post.last, ": ", varnam, ifelse(all(grp.ind, param == "loadings"), paste0("\nCluster ", g), "")))
graphics::axis(1, at=seq_len(n.var), labels=if(titles && n.var < 100) rownames(plot.x) else character(n.var), cex.axis=0.5, tick=FALSE, line=-0.5)
for(i in seq_len(n.var)) {
graphics::lines(c(i, i), c(0, 1), col=grey)
if(titles && n.var < 100) {
graphics::text(c(i, i), c(switch(EXPR=param, uniquenesses=switch(EXPR=uni.type, single=, isotropic=graphics::par("usr")[3L], 0), 0),
switch(EXPR=param, uniquenesses=switch(EXPR=uni.type, single=, isotropic=graphics::par("usr")[4L], 1), 1)),
labels=format(x.plot[i,], digits=3), xpd=NA, offset=0.3, pos=c(1, 3), cex=0.5)
}
}
if(any(grp.ind, param == "loadings")) {
graphics::par(mar=c(0, 0, 0, 0))
graphics::plot.new()
Xp <- switch(EXPR=param, loadings=Q, G)
Xseq <- seq_len(Xp)
tmp <- if(Xp > 5) unlist(lapply(Xseq, function(x) c(Xseq[x], Xseq[x + ceiling(Xp/2)])))[Xseq] else Xseq
ltxt <- paste0(switch(EXPR=param, loadings="Factor", "Cluster"), tmp)
lcol <- Xseq[tmp]
graphics::legend("center", pch=15, col=lcol, legend=ltxt, ncol=if(Xp > 5) ceiling(Xp/2) else Xp, bty="n", cex=max(0.7, 1 - 0.03 * Xp))
}
}
if(m.sw["E.sw"]) {
Pind <- is.element(errX, c("All", "PPRE")) && g == 1
Hind <- is.element(errX, c("All", "PPRE")) && g == 2
Cind <- (errX == "All" && g == 3) || !any(Pind, Hind)
error <- x$Error
if(Pind) {
graphics::boxplot(error$PPRE, col=palette[length(palette)])
if(titles) {
graphics::title(main=list(paste0("Posterior Predictive Reconstruction Error\n(using the ", switch(EXPR=toupper(attr(error, "Norm")), O=, "1"="One", I="Infinity", "F"="Frobenius", M="Maximum", "2"="Spectral"), " norm)")))
graphics::mtext("PPRE", side=2, line=2)
graphics::mtext(method, side=1, line=1)
}
indp <- switch(EXPR=errX, All=7L, PPRE=1L)
print(c(error$CIs[indp,], Mean=unname(error$Avg[indp]), Median=unname(error$Median[indp]), "Last Valid Sample"=unname(error$Final[indp]))[c(1L, 3L:5L, 2L)])
}
if(Hind) {
if(titles) {
graphics::layout(rbind(1, 2), heights=c(9, 1))
graphics::par(mar=c(3.1, 4.1, 4.1, 2.1))
}
ci.x <- error$RepCounts[[ind]]
dat.x <- error$DatCounts[[ind]]
dat.x[dat.x == 0] <- NA
suppressWarnings(.plot_CI(PPRE <- graphics::barplot(dat.x, ylim=c(0L, max(ci.x[3L,], dat.x, na.rm=TRUE)), col=grey),
ci.x[2L,], li=ci.x[1L,], ui=ci.x[3L,], add=TRUE, gap=TRUE, slty=2, scol="red", pch=15))
if(titles) {
graphics::axis(1, at=c(PPRE[1L] - 0.5, PPRE[-1L] - 0.6, PPRE[length(PPRE)] + 0.5), round(error$Breaks[[ind]], 2))
graphics::title(main=list(paste0(var.names[ind], " Variable")))
graphics::par(mar=c(0, 0, 0, 0))
graphics::plot.new()
ltxt <- c("Data Bin Counts", "Median Replicate Bin Counts")
temp <- graphics::legend("center", legend=character(2L), text.width=max(graphics::strwidth(ltxt)), ncol=1, bty="n", cex=0.75, pt.cex=1.25, fill=c(grey, "black"), xjust=0.5)
graphics::text(temp$rect$left + temp$rect$w * 0.55, temp$text$y, ltxt)
}
}
if(Cind && is.element(errX, c("All", "Covs"))) {
post.x <- error$Post
plot.x <- switch(EXPR=errX, All=error$Median[-7L], error$Median)
last.x <- switch(EXPR=errX, All=error$Final[-7L], error$Final)
if(titles) {
graphics::layout(rbind(1, 2), heights=c(9, 1))
graphics::par(mar=c(3.1, 4.1, 4.1, 2.1))
}
col.e <- seq_along(plot.x)
if(mispal) grDevices::palette(viridis(length(col.e) + 2L, option="D", alpha=transparency)[-seq_len(2L)])
ci.x <- switch(EXPR=errX, All=error$CIs[-7L,], error$CIs)
erange <- pretty(c(min(c(ci.x[,1L], plot.x, post.x)), max(c(ci.x[,2L], plot.x, post.x))))
x.plot <- graphics::barplot(plot.x, col=col.e, ylim=erange[c(1L, length(erange))], main="", yaxt="n", ylab="Error")
graphics::axis(2, at=erange, labels=erange)
e.col <- grDevices::adjustcolor(c("red", "darkorchid"), alpha.f=transparency)
graphics::points(x=x.plot, post.x, pch=15, col=e.col[1L], cex=2, xpd=TRUE)
graphics::points(x=x.plot, last.x, pch=18, col=e.col[2L], cex=2, xpd=TRUE)
suppressWarnings(.plot_CI(x.plot, plot.x, li=ci.x[,1L], ui=ci.x[,2L], add=TRUE, gap=TRUE, slty=3, lwd=3, scol=grey, pch=19))
if(titles) {
graphics::title(main=list("Covariance Error Metrics"))
graphics::par(mar=c(0, 0, 0, 0))
graphics::plot.new()
ltxt <- c("Median Error Metrics", "Error Metrics Evaluated at Posterior Mean", "Error Metrics Evaluated at Last Valid Sample")
temp <- graphics::legend("center", legend=character(3L), text.width=max(graphics::strwidth(ltxt)), ncol=1, bty="n", cex=0.75, pt.cex=1.25, pch=c(19, 15, 18), col=c("black", e.col), xjust=0.5)
graphics::text(temp$rect$left + temp$rect$w * 0.55, temp$text$y, ltxt)
}
metric <- rbind(plot.x, post.x, last.x)
rownames(metric) <- c("Medians", "Evaluated at Posterior Mean", "Evaluated at Last Valid Sample")
print(metric)
} else if(Cind) {
graphics::par(defpar)
plot.x <- error$Post
col.e <- seq_along(plot.x)
if(mispal) grDevices::palette(viridis(length(col.e) + 2L, option="D", alpha=transparency)[-seq_len(2L)])
graphics::barplot(plot.x, col=col.e, main="", ylab="Error")
print(provideDimnames(matrix(plot.x, nrow=1), base=list("Evaluated at Posterior Mean", names(plot.x))))
}
}
if(m.sw["C.sw"]) {
grDevices::palette(tmp.pal)
if(!all.ind) {
partial <- FALSE
graphics::par(mai=c(1.25, 1, 0.75, 0.5), mfrow=c(1, 2), oma=c(0, 0, 2, 0))
}
if(param == "means") {
plot.x <- x$Means$mus[[g]]
if(!partial) {
stats::acf(plot.x[ind,], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all.ind, paste0(":\n", var.names[ind], " Variable"), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind,], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(partial, paste0(":\n", var.names[ind], " Variable"), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Means - ", ifelse(grp.ind, paste0("Cluster ", g, ":\n"), ""), var.names[ind], " Variable")), outer=TRUE)
}
}
if(param == "scores") {
plot.x <- x$Scores$eta
if(!partial) {
stats::acf(plot.x[ind[1L],ind[2L],], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all.ind, paste0(":\n", "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L]), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind[1L],ind[2L],], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(partial, paste0(":\n", "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L]), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Scores - ", "Observation ", obs.names[ind[1L]], ", Factor ", ind[2L])), outer=TRUE)
}
}
if(param == "loadings") {
plot.x <- x$Loadings$lmats[[g]]
if(!partial) {
stats::acf(plot.x[ind[1L],ind[2L],], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all.ind, paste0(":\n", var.names[ind[1L]], " Variable, Factor ", ind[2L]), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind[1L],ind[2L],], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(partial, paste0(":\n", var.names[ind[1L]], " Variable, Factor ", ind[2L]), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Loadings - ", ifelse(grp.ind, paste0("Cluster ", g, ":\n"), ""), var.names[ind[1L]], " Variable, Factor ", ind[2L])), outer=TRUE)
}
}
if(param == "uniquenesses") {
plot.x <- x$Uniquenesses$psis[[g]]
if(!partial) {
stats::acf(plot.x[ind,], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all.ind, switch(EXPR=uni.type, constrained=, unconstrained=paste0(":\n", var.names[ind], " Variable"), ""), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind,], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(partial, switch(EXPR=uni.type, constrained=, unconstrained=paste0(":\n", var.names[ind], " Variable"), ""), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Uniquenesses - ", ifelse(grp.ind, paste0("Cluster ", g, ":\n"), ""), switch(EXPR=uni.type, constrained=, unconstrained=paste0(var.names[ind], " Variable"), ""))), outer=TRUE)
}
}
if(param == "pis") {
plot.x <- clust$pi.prop
if(!partial) {
stats::acf(plot.x[ind,], main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF", ifelse(all(all.ind, matx), paste0(" - Cluster ", ind), ""))))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x[ind,], main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF", ifelse(all(all.ind, matx), paste0(" - Cluster ", ind), ""))))
if(all(!all.ind, titles)) graphics::title(main=list(paste0("Mixing Proportions - Cluster ", ind)), outer=TRUE)
}
}
if(is.element(param, c("alpha", "discount"))) {
plot.x <- switch(EXPR=param, alpha=clust$Alpha$alpha, discount=as.vector(clust$Discount$discount))
if(switch(EXPR=param, alpha=clust$Alpha$alpha.rate, discount=clust$Discount$disc.rate) == 0 ||
((is.null(attr(x, "Discount")) || attr(x, "Discount") >= 0) && length(unique(round(plot.x, min(.ndeci(plot.x))))) == 1)) {
warning(paste0(switch(EXPR=param, alpha="Acceptance", discount=ifelse(attr(x, "Kappa0"), "Acceptance", "Mutation")), " rate too low: can't plot ", ifelse(all.ind, ifelse(partial, "partial-", "auto-"), ""), "correlation function", ifelse(all.ind, "\n", "s\n")), call.=FALSE)
next
}
if(!partial) {
stats::acf(plot.x, main="", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("ACF")))
}
if(any(!all.ind, partial)) {
stats::acf(plot.x, main="", type="partial", ci.col=4, ylab="")
if(titles) graphics::title(main=list(paste0("PACF")))
if(all(!all.ind, titles)) graphics::title(main=list(paste0(switch(EXPR=param, alpha="Alpha", discount="Discount"))), outer=TRUE)
}
}
}
if(all(all.ind, titles)) graphics::title(ifelse(param != "pis", paste0(toupper(substr(param, 1L, 1L)), substr(param, 2L, nchar(param)),
ifelse(all(grp.ind, !is.element(param, c("scores", "pis", "alpha", "discount"))), paste0(" - Cluster ", g), "")),
paste0("Mixing Proportions", ifelse(matx, "", paste0(" - Cluster ", ind)))), outer=TRUE)
if(isTRUE(msgx)) .ent_exit(opts = defopt)
}
}
# Loadings Heatmaps
#' Convert a numeric matrix to colours
#'
#' Converts a matrix to a hex colour code representation for plotting using \code{\link{plot_cols}}. Used internally by \code{\link{plot.Results_IMIFA}} for plotting posterior mean loadings heatmaps.
#' @param mat Either a matrix or, when \code{compare} is \code{TRUE}, a list of matrices.
#' @param cols The colour palette to be used. The default palette uses \code{\link[viridisLite]{viridis}}. Will be checked for validity by \code{\link{is.cols}}.
#' @param compare Logical switch used when desiring comparable colour representations (usually for comparable heat maps) across multiple matrices. Ensures plots will be calibrated to a common colour scale so that, for instance, the colour on the heat map of an entry valued at 0.7 in Matrix A corresponds exactly to the colour of a similar value in Matrix B. When \code{TRUE}, \code{mat} must be supplied as a list of matrices, which must have either the same number of rows, or the same number of columns.
#' @param byrank Logical indicating whether to convert the matrix itself or the sample ranks of the values therein. Defaults to \code{FALSE}.
#' @param breaks Number of gradations in colour to use. Defaults to \code{length(cols)}. Alternatively, a vector of breakpoints for use with \code{\link[base]{cut}}.
#' @param na.col Colour to be used to represent missing data. Will be checked for validity by \code{\link{is.cols}}.
#' @param transparency A factor in [0, 1] modifying the opacity for overplotted lines. Defaults to 1 (i.e. no transparency). Only relevant when \code{cols} is not supplied, otherwise the supplied \code{cols} must already be adjusted for transparency.
#' @param ... Catches unused arguments.
#'
#' @return A matrix of hex colour code representations, or a list of such matrices when \code{compare} is \code{TRUE}.
#' @export
#' @keywords plotting
#' @importFrom viridisLite "viridis"
#'
#' @seealso \code{\link{plot_cols}}, \code{\link{heat_legend}}, \code{\link{is.cols}}, \code{\link[base]{cut}}
#'
#' @usage
#' mat2cols(mat,
#' cols = NULL,
#' compare = FALSE,
#' byrank = FALSE,
#' breaks = NULL,
#' na.col = "#808080FF",
#' transparency = 1,
#' ...)
#' @examples
#' # Generate a colour matrix using mat2cols()
#' mat <- matrix(rnorm(100), nrow=10, ncol=10)
#' mat[2,3] <- NA
#' cols <- heat.colors(12)[12:1]
#' (matcol <- mat2cols(mat, cols=cols))
#'
#' # Use plot_cols() to visualise the colours matrix
#' par(mar=c(5.1, 4.1, 4.1, 3.1))
#' plot_cols(matcol)
#'
#' # Add a legend using heat_legend()
#' heat_legend(mat, cols=cols); box(lwd=2)
#'
#' # Try comparing heat maps of multiple matrices
#' mat1 <- cbind(matrix(rnorm(100, sd=c(4,2)), nr=50, nc=2, byrow=TRUE), 0.1)
#' mat2 <- cbind(matrix(rnorm(150, sd=c(7,5,3)), nr=50, nc=3, byrow=TRUE), 0.1)
#' mat3 <- cbind(matrix(rnorm(50, sd=1), nr=50, nc=1, byrow=TRUE), 0.1)
#' mats <- list(mat1, mat2, mat3)
#' colmats <- mat2cols(mats, cols=cols, compare=TRUE)
#' par(mfrow=c(2, 3), mar=c(1, 2, 1, 2))
#'
#' # Use common palettes (top row)
#' plot_cols(colmats[[1]]); heat_legend(range(mats), cols=cols); box(lwd=2)
#' plot_cols(colmats[[2]]); heat_legend(range(mats), cols=cols); box(lwd=2)
#' plot_cols(colmats[[3]]); heat_legend(range(mats), cols=cols); box(lwd=2)
#'
#' # Use uncommon palettes (bottom row)
#' plot_cols(mat2cols(mat1, cols=cols)); heat_legend(range(mat1), cols=cols); box(lwd=2)
#' plot_cols(mat2cols(mat2, cols=cols)); heat_legend(range(mat2), cols=cols); box(lwd=2)
#' plot_cols(mat2cols(mat3, cols=cols)); heat_legend(range(mat3), cols=cols); box(lwd=2)
mat2cols <- function(mat, cols = NULL, compare = FALSE, byrank = FALSE, breaks = NULL, na.col = "#808080FF", transparency = 1, ...) {
if(isTRUE(compare)) {
if(!inherits(mat, "list") &&
!all(vapply(mat, function(x)
(is.matrix(x) ||
is.data.frame(x)) &&
(is.numeric(x) ||
is.logical(x)),
logical(1L)))) stop("'mat' must be a list of numeric/logical matrices or data.frames when 'compare' is TRUE", call.=FALSE)
nc <- vapply(mat, ncol, numeric(1L))
nr <- vapply(mat, nrow, numeric(1L))
uc <- unique(nc)
ur <- unique(nr)
if(length(ur) == 1) {
mat <- do.call(cbind, mat)
spl <- matrix(rep(seq_along(nc), nc), nrow=ur, ncol=ncol(mat), byrow=TRUE)
} else if(length(uc) == 1) {
mat <- do.call(rbind, mat)
spl <- matrix(rep(seq_along(nr), nr), nrow=nrow(mat), ncol=uc, byrow=FALSE)
} else stop("Matrices must have either the same number of rows or the same number of columns", call.=FALSE)
} else if(!is.matrix(mat) ||
(!is.numeric(mat) &&
!is.logical(mat))) stop("'mat' must be a numeric/logical matrix when 'compare' is FALSE", call.=FALSE)
if(missing(cols)) {
trx <- grDevices::dev.capabilities()$semiTransparency
xtr <- missing(transparency)
if(length(transparency) != 1 &&
any(!is.numeric(transparency),
(transparency < 0 ||
transparency > 1))) stop("'transparency' must be a single number in [0, 1]", call.=FALSE)
if(transparency != 1 && !trx) {
if(!xtr) message("'transparency' not supported on this device\n")
transparency <- 1
}
cols <- viridis(30L, option="B", alpha=transparency)
}
if(!all(is.cols(cols))) stop("Invalid 'cols' colour palette supplied", call.=FALSE)
if(any(!is.logical(byrank),
length(byrank) != 1)) stop("'byrank' must be a single logical indicator", call.=FALSE)
breaks <- if(missing(breaks)) length(cols) else breaks
m1 <- if(isTRUE(byrank)) rank(mat) else mat
facs <- cut(as.numeric(m1), breaks, include.lowest=TRUE)
answer <- matrix(cols[as.numeric(facs)], nrow=nrow(mat), ncol=ncol(mat))
if(any((NM <- is.na(mat)))) {
if(length(na.col != 1) &&
!is.cols(na.col)) stop("'na.col' must be a valid colour in the presence of missing data", call.=FALSE)
answer <- replace(answer, NM, na.col)
}
rownames(answer) <- rownames(mat)
colnames(answer) <- colnames(mat)
if(isTRUE(compare)) {
splans <- split(answer, spl)
answer <- if(length(ur) == 1) lapply(splans, matrix, nrow=nr) else lapply(splans, matrix, ncol=nc)
}
answer
}
# Colour Checker
#' Check for Valid Colours
#'
#' Checks if the supplied vector contains valid colours.
#' @param cols A vector of colours, usually as a character string.
#'
#' @return A logical vector of length \code{length(cols)} which is \code{TRUE} for entries which are valid colours and \code{FALSE} otherwise.
#' @keywords utility
#' @export
#'
#' @examples
#' all(is.cols(1:5))
#'
#' all(is.cols(heat.colors(30)))
#'
#' any(!is.cols(c("red", "green", "aquamarine")))
is.cols <- function(cols) {
vapply(cols, function(x) { tryCatch(is.matrix(grDevices::col2rgb(x)), error = function(e) FALSE) }, logical(1L))
}
# Heatmap Legends
#' Add a colour key legend to heatmap plots
#'
#' Using only base graphics, this function appends a colour key legend for heatmaps produced by, for instance, \code{\link{plot_cols}} or \code{\link[graphics]{image}}.
#' @param data Either the data with which the heatmap was created or a vector containing its minimum and maximum values. Missing values are ignored.
#' @param cols The colour palette used when the heatmap was created. By default, the same \code{\link[viridisLite]{viridis}} default as in \code{\link{mat2cols}} is used. Will be checked for validity by \code{\link{is.cols}}.
#' @param breaks Optional argument giving the break-points for the axis labels.
#' @param cex.lab Magnification of axis annotation, indicating the amount by which plotting text and symbols should be scaled relative to the default of 1.
#' @param ... Catches unused arguments.
#'
#' @return Modifies an existing plot by adding a colour key legend.
#' @export
#' @keywords plotting
#'
#' @seealso \code{\link[graphics]{image}}, \code{\link{plot_cols}}, \code{\link{mat2cols}}, \code{\link{is.cols}}
#' @usage
#' heat_legend(data,
#' cols = NULL,
#' breaks = NULL,
#' cex.lab = 1,
#' ...)
#' @examples
#' # Generate a matrix and plot it with a legend
#' data <- matrix(rnorm(50), nrow=10, ncol=5)
#' cols <- heat.colors(12)[12:1]
#' par(mar=c(5.1, 4.1, 4.1, 3.1))
#'
#' plot_cols(mat2cols(data, col=cols))
#' heat_legend(data, cols); box(lwd=2)
heat_legend <- function(data, cols = NULL, breaks = NULL, cex.lab = 1, ...) {
if(length(cex.lab) > 1 || (!is.numeric(cex.lab) ||
cex.lab <= 0)) stop("Invalid 'cex.lab' supplied", call.=FALSE)
if(!is.numeric(data)) stop("'data' must be numeric", call.=FALSE)
if(missing(cols)) {
cols <- viridis(30L, option="B", alpha=1L)
} else if(!all(is.cols(cols))) stop("Invalid 'cols' colour palette supplied", call.=FALSE)
bx <- graphics::par("usr")
xpd <- graphics::par()$xpd
box.cx <- c(bx[2L] + (bx[2L] - bx[1L])/1000, bx[2L] + (bx[2L] - bx[1L])/1000 + (bx[2L] - bx[1L])/50)
box.cy <- c(bx[3L], bx[3L])
box.sy <- (bx[4L] - bx[3L]) / length(cols)
xx <- rep(box.cx, each = 2L)
graphics::par(xpd = TRUE)
for(i in seq_along(cols)) {
yy <- c(box.cy[1L] + (box.sy * (i - 1L)),
box.cy[1L] + (box.sy * (i)),
box.cy[1L] + (box.sy * (i)),
box.cy[1L] + (box.sy * (i - 1L)))
graphics::polygon(xx, yy, col = cols[i], border = cols[i])
}
graphics::par(new = TRUE)
yrange <- range(data, na.rm = TRUE)
base::plot(0, 0, type = "n", ylim = yrange, yaxt = "n", ylab = "", xaxt = "n", xlab = "", frame.plot = FALSE)
if(is.null(breaks)) {
graphics::axis(side = 4, las = 2, tick = FALSE, line = 0.1, cex.axis = cex.lab)
} else {
if(length(breaks) !=
length(cols)) warning("'breaks' and 'cols' should be the same length if 'breaks' is supplied\n", call.=FALSE, immediate.=TRUE)
graphics::axis(side = 4, las = 2, tick = FALSE, line = 0.1, cex.axis = cex.lab,
at=seq(min(yrange), max(yrange), length.out = length(breaks)), labels=round(breaks, 2))
}
suppressWarnings(graphics::par(xpd = xpd))
}
# Prior No. Clusters (DP & PY)
#' Plot Pitman-Yor / Dirichlet Process Priors
#'
#' Plots the prior distribution of the number of clusters under a Pitman-Yor / Dirichlet process prior, for a sample of size \code{N} at given values of the concentration parameter \code{alpha} and optionally also the \code{discount} parameter. Useful for soliciting sensible priors (or fixed values) for \code{alpha} or \code{discount} under the \code{"IMFA"} and \code{"IMIFA"} methods for \code{\link{mcmc_IMIFA}}.
#' @param N The sample size.
#' @param alpha The concentration parameter. Must be specified and must be strictly greater than \code{-discount}. The case \code{alpha=0} is accommodated. When \code{discount} is negative \code{alpha} must be a positive integer multiple of \code{abs(discount)}.
#' @param discount The discount parameter for the Pitman-Yor process. Must be less than 1, but typically lies in the interval [0, 1). Defaults to 0 (i.e. the Dirichlet process). When \code{discount} is negative \code{alpha} must be a positive integer multiple of \code{abs(discount)}.
#' @param show.plot Logical indicating whether the plot should be displayed (default = \code{TRUE}).
#' @param type The type of plot to be drawn, as per \code{\link{plot}}. Defaults to \code{"h"}: histogram-like vertical lines.
#'
#' @details All arguments are vectorised. Users can also consult \code{\link{G_expected}}, \code{\link{G_variance}}, and \code{\link{G_calibrate}} in order to solicit sensible priors.
#' @note The actual density values are returned invisibly. Therefore, they can be visualised as desired by the user even if \code{show.plot} is \code{FALSE}.
#'
#' @return A plot of the prior distribution if \code{show.plot} is \code{TRUE}. Density values are returned invisibly. Note that the density values may not strictly sum to one in certain cases, as values small enough to be represented as zero may well be returned.
#' @export
#' @keywords plotting
#' @seealso \code{\link{G_moments}}, \code{\link[Rmpfr]{Rmpfr}}
#'
#' @note Requires use of the \code{\link[Rmpfr]{Rmpfr}} and \code{gmp} libraries; may encounter difficulty and slowness for large \code{N}, especially with non-zero \code{discount} values. Despite the high precision arithmetic used, the functions can be unstable for small values of \code{discount}.
#'
#' @author Keefe Murphy - <\email{keefe.murphy@@mu.ie}>
#' @references De Blasi, P., Favaro, S., Lijoi, A., Mena, R. H., Prunster, I., and Ruggiero, M. (2015) Are Gibbs-type priors the most natural generalization of the Dirichlet process?, \emph{IEEE Transactions on Pattern Analysis and Machine Intelligence}, 37(2): 212-229.
#' @usage
#' G_priorDensity(N,
#' alpha,
#' discount = 0,
#' show.plot = TRUE,
#' type = "h")
#' @examples
#' # Plot Dirichlet process priors for different values of alpha
#' (DP <- G_priorDensity(N=50, alpha=c(3, 10, 25)))
#'
#' # Non-zero discount requires loading the "Rmpfr" library
#' # require("Rmpfr")
#'
#' # Verify that these alpha/discount values produce Pitman-Yor process priors with the same mean
#' # G_expected(N=50, alpha=c(19.23356, 6.47006, 1), discount=c(0, 0.47002, 0.7300045))
#'
#' # Now plot them to examine tail behaviour as discount increases
#' # alpha <- c(19.23356, 6.47006, 1)
#' # discount <- c(0, 0.47002, 0.7300045)
#' # (PY <- G_priorDensity(N=50, alpha=alpha, discount=discount, type="l"))
#'
#' #' # Other special cases of the PYP are also facilitated
#' # G_priorDensity(N=50, alpha=c(alpha, 27.1401, 0),
#' # discount=c(discount, -27.1401/100, 0.8054448), type="b")
G_priorDensity <- function(N, alpha, discount = 0, show.plot = TRUE, type = "h") {
igmp <- isNamespaceLoaded("Rmpfr")
mpfrind <- suppressMessages(requireNamespace("Rmpfr", quietly=TRUE)) && .version_above("gmp", "0.5-4")
if(isFALSE(mpfrind)) { stop("'Rmpfr' package not installed", call.=FALSE)
} else if(isFALSE(igmp)) {
on.exit(.detach_pkg("Rmpfr"))
on.exit(.detach_pkg("gmp"), add=TRUE)
}
oldpal <- grDevices::palette()
on.exit(grDevices::palette(oldpal), add=isFALSE(mpfrind))
defpar <- suppressWarnings(graphics::par(no.readonly=TRUE))
defpar$new <- FALSE
suppressWarnings(graphics::par(pty="m"))
on.exit(suppressWarnings(graphics::par(defpar)), add=TRUE)
defopt <- options()
options(expressions = 500000)
on.exit(suppressWarnings(options(defopt)), add=TRUE)
if(any(c(length(N),
length(show.plot)) > 1)) stop("Arguments 'N' and 'show.plot' must be strictly of length 1", call.=FALSE)
if(!is.logical(show.plot)) stop("'show.plot' must be a single logical indicator", call.=FALSE)
if(isTRUE(show.plot)) {
if(length(type) > 1 ||
!is.character(type) ||
nchar(type) > 1) stop("'type' must be a single character", call.=FALSE)
if(!is.element(type,
c("p", "l", "b", "c", "o",
"h", "s", "S", "n"))) stop("Invalid 'type'", call.=FALSE)
}
max.len <- max(length(alpha), length(discount))
if(max.len > 10) stop("Can't plot more than ten distributions simultaneously", call.=FALSE)
if(!is.element(length(alpha),
c(1, max.len))) stop("'alpha' must be of length 1 or length(discount)", call.=FALSE)
if(!is.element(length(discount),
c(1, max.len))) stop("'discount' must be of length 1 or length(alpha)", call.=FALSE)
if(!all(is.numeric(discount), is.numeric(alpha),
is.numeric(N))) stop("'N', 'alpha', and 'discount' inputs must be numeric", call.=FALSE)
if(any(discount >= 1)) stop("'discount' must be less than 1", call.=FALSE)
if(any(discount > 0 &
alpha <= - discount)) stop("'alpha' must be strictly greater than -discount", call.=FALSE)
if(any(discount < 0 &
(alpha <= 0 |
!.IntMult(alpha, discount)))) stop("'alpha' must be a positive integer multiple of 'abs(discount)' when 'discount' is negative", call.=FALSE)
if(any(alpha == 0 &
discount <= 0)) stop("'discount' must be strictly positive when 'alpha'=0", call.=FALSE)
if(length(alpha) != max.len) {
alpha <- rep(alpha, max.len)
}
if(length(discount) != max.len) {
discount <- rep(discount, max.len)
}
rx <- matrix(0, nrow=N, ncol=max.len)
Nseq <- seq_len(N)
Nsq2 <- Rmpfr::mpfr(Nseq, precBits=256)
for(i in seq_len(max.len)) {
alphi <- Rmpfr::mpfr(alpha[i], precBits=256)
disci <- Rmpfr::mpfr(discount[i], precBits=256)
if(disci == 0) {
vnk <- exp(Nsq2 * log(alphi) - log(Rmpfr::pochMpfr(alphi, N)))
rx[,i] <- gmp::asNumeric(abs(vnk * Rmpfr::.bigz2mpfr(gmp::Stirling1.all(N))))
} else {
if(disci > 0) {
vnk <- c(Rmpfr::mpfr(0, precBits=256), cumsum(log(alphi + Nseq[-N] * disci))) -
log(Rmpfr::pochMpfr(alphi + 1, N - 1L)) - Nsq2 * log(disci)
} else {
m <- as.integer(alphi/abs(disci))
mn <- min(m, N)
seqN <- seq_len(mn - 1L)
vnk <- c(c(Rmpfr::mpfr(0, precBits=256), cumsum(log(m - seqN)) + seqN * log(abs(disci))) -
log(Rmpfr::pochMpfr(alphi + 1, N - 1L)) - c(seqN, mn) * log(abs(disci)), rep(-Inf, N - mn))
}
lnkd <- lapply(Nseq, function(g) Rmpfr::sumBinomMpfr(g, f=function(k) Rmpfr::pochMpfr(-k * disci, N), n0=1))
rx[,i] <- gmp::asNumeric(exp(vnk - lfactorial(Nsq2)) * abs(Rmpfr::mpfr2array(unlist(lnkd), dim=N)))
#lnkd <- Rmpfr::sapplyMpfr(Nseq, function(g) Rmpfr::sumBinomMpfr(g, f=function(k) Rmpfr::pochMpfr(-k * disci, N), n0=1))
#rx[,i] <- gmp::asNumeric(exp(vnk - lfactorial(Nsq2)) * abs(lnkd))
}
}
if(isTRUE(show.plot)) {
if(max.len > 1) {
cols <- seq(from=2L, to=max.len + 1L)
grDevices::palette("default")
grDevices::palette(grDevices::adjustcolor(cols, alpha.f=ifelse(grDevices::dev.capabilities()$semiTransparency && max.len > 1, 0.75, 1)))
graphics::matplot(x=seq_len(N), y=rx, type=type, col=cols - 1L, xlab="Clusters", ylim=c(0, max(rx)), ylab="Density", lend=1, xaxt="n",
pch=19, main=paste0("Prior Distribution of G\nN=", N), lwd=seq(3L, 1L, length.out=max.len), lty=seq_len(2L))
} else {
base::plot(x=seq_len(N), y=rx, type=type, xlab="Clusters", ylim=c(0, max(rx)), ylab="Density",
lend=1, pch=19, main=paste0("Prior Distribution of G\nN=", N), lwd=2L, lty=1L, xaxt="n")
}
ax <- pretty(seq_len(N))
ax <- replace(ax, ax == 0, 1L)
graphics::axis(1, at=ax, labels=ax)
}
invisible(if(max.len == 1) drop(rx) else rx)
}
#' Plots a matrix of colours
#'
#' Plots a matrix of colours as a heat map type image or as points. Intended for joint use with \code{mat2cols}.
#' @param cmat A matrix of valid colours, with missing values coded as \code{NA} allowed. Vectors should be supplied as matrices with 1 row or column, as appropriate.
#' @param na.col Colour used for missing \code{NA} entries in \code{cmat}.
#' @param ptype Switch controlling output as either a heat map \code{"image"} (the default) or as \code{"points"}.
#' @param border.col Colour of border drawn around the plot.
#' @param dlabels,rlabels,clabels Vector of labels for the diagonals, rows, and columns, respectively.
#' @param pch Point type used when \code{ptype="points"}.
#' @param cex Point cex used when \code{ptype="points"}.
#' @param label.cex Govens cex parameter used for labels.
#' @param ... Further graphical parameters.
#'
#' @return Either an \code{"image"} or \code{"points"} type plot of the supplied colours.
#' @keywords plotting
#' @export
#'
#' @seealso \code{\link{mat2cols}}, \code{\link[graphics]{image}}, \code{\link{heat_legend}}, \code{\link{is.cols}}
#' @usage
#' plot_cols(cmat,
#' na.col = "#808080FF",
#' ptype = c("image", "points"),
#' border.col = "#808080FF",
#' dlabels = NULL,
#' rlabels = FALSE,
#' clabels = FALSE,
#' pch = 15,
#' cex = 3,
#' label.cex = 0.6,
#' ...)
#' @examples
#' # Generate a colour matrix using mat2cols()
#' mat <- matrix(rnorm(100), nrow=10, ncol=10)
#' mat[2,3] <- NA
#' cols <- heat.colors(12)[12:1]
#' (matcol <- mat2cols(mat, cols=cols))
#'
#' # Use plot_cols() to visualise the colours matrix
#' par(mar=c(5.1, 4.1, 4.1, 3.1))
#' plot_cols(matcol)
#'
#' # Add a legend using heat_legend()
#' heat_legend(mat, cols=cols); box(lwd=2)
#'
#' # Replace colour of exact zero entries:
#' # Often important to call mat2cols() first (to include 0 in the cuts),
#' # then replace relevant entries with NA for plot_cols(), i.e.
#' mat[2,3] <- 0
#' matcol2 <- mat2cols(mat, cols=cols)
#' plot_cols(replace(matcol2, mat == 0, NA), na.col="blue")
#' heat_legend(mat, cols=cols); box(lwd=2)
plot_cols <- function(cmat, na.col = "#808080FF", ptype = c("image", "points"), border.col = "#808080FF",
dlabels = NULL, rlabels = FALSE, clabels = FALSE, pch = 15, cex = 3, label.cex = 0.6, ...) {
if(!all(is.cols(cmat),
is.matrix(cmat))) stop("'cmat' needs to be a valid colour matrix:\ntry supplying a vector as a matrix with 1 row or column, as appropriate", call.=FALSE)
if(!all(is.cols(na.col),
length(na.col) == 1)) stop("'na.col' needs to a valid single colour", call.=FALSE)
if(!all(is.cols(border.col),
length(border.col) == 1)) stop("'border.col' needs to a valid single colour", call.=FALSE)
if(!all(is.character(ptype))) stop("'ptype' must be a character vector of length 1", call.=FALSE)
ptype <- match.arg(ptype)
N <- nrow(cmat)
P <- ncol(cmat)
cmat <- replace(cmat, is.na(cmat), na.col)
if(ptype == "image") {
levels <- sort(unique(as.vector(cmat)))
z <- matrix(unclass(factor(cmat, levels = levels, labels = seq_along(levels))), nrow=N, ncol=P)
info <- list(x = seq_len(P), y=seq_len(N), z=t(z), col = levels)
graphics::image(info$x, info$y, info$z[,N:1L, drop=FALSE], col = info$col, axes = FALSE, xlab = "", ylab = "", ...)
} else {
base::plot(rep(seq_len(P), rep(N, P)), rep(N:1L, P), col = as.vector(cmat), cex = cex, pch = pch,
axes = FALSE, xlab = "", ylab = "", xlim = c(0.5, P + 0.5), ylim = c(0.5, N + 0.5), ...)
}
graphics::axis(3, at = seq_len(P), tick = FALSE, labels = clabels, las = 2, cex.axis = label.cex)
graphics::axis(2, at = N:1L, tick = FALSE, labels = rlabels, las = 2, cex.axis = label.cex)
if(is.vector(dlabels)) {
Nd <- length(dlabels)
graphics::text(seq_len(Nd), Nd:1L, dlabels, cex = label.cex)
}
graphics::box(col = border.col)
}
#' Show image of grayscale grid
#'
#' Plots an image of a grayscale grid representation of a digit.
#' @param dat A \code{matrix} or \code{data.frame} with the same number of rows and columns (or a vector which can be coerced to such a format), representing a grayscale map of a single digit.
#' @param col The colour scale to be used. Defaults to \code{grey(seq(1, 0, length = ncol(dat)))}.
#' @param ... Additional arguments to be passed to \code{\link{mat2cols}} and/or \code{\link{plot_cols}} (e.g. \code{na.col}) when \code{dat} is a matrix or \code{\link[graphics]{image}} when \code{dat} is a vector.
#'
#' @return The desired image representation of the digit.
#' @export
#' @seealso \code{\link{USPSdigits}}, \code{\link{show_IMIFA_digit}}, \code{\link{mat2cols}}, \code{\link{plot_cols}}
#' @keywords plotting
#' @author Keefe Murphy - <\email{keefe.murphy@@mu.ie}>
#' @usage
#' show_digit(dat,
#' col = NULL,
#' ...)
#' @examples
#' data(USPSdigits)
#'
#' # Plot the first digit
#' show_digit(USPSdigits$train[1,-1])
#'
#' # Visualise the overall mean
#' show_digit(colMeans(USPSdigits$train[,-1]))
show_digit <- function(dat, col = NULL, ...) {
x <- if(df <- !is.data.frame(dat)) dat else as.matrix(dat)
odims <- ifelse(is.matrix(dat), ncol(dat), length(dat))
dims <- sqrt(odims)
x <- if(is.matrix(dat) && df) dat else matrix(unlist(dat), nrow = dims, ncol = dims, byrow=!is.vector(dat))
col <- if(!missing(col)) col else grDevices::grey(seq(1L, 0L, length = odims))
if(nrow(x) != ncol(x)) {
x <- matrix(dat, nrow=dims, ncol=dims, byrow=FALSE)
if(diff(dim(x)) != 0) stop("'dat' must be coercible to a square matrix", call. = FALSE)
}
if(!all(is.cols(col))) stop("Invalid 'col'", call. = FALSE)
if(is.vector(dat)) {
graphics::image(matrix(x, nrow = dims)[,dims:1L], col = col, ...)
} else {
plot_cols(mat2cols(x, cols = col, ...), ...)
}
graphics::box(lwd = 1)
invisible()
}
#' Plot the posterior mean image
#'
#' Plots the posterior mean of a given cluster from an \code{"IMIFA"}-related model fit to a digit data set in the form of a square grayscale grid.
#' @param res An object of class \code{"Results_IMIFA"} generated by \code{\link{get_IMIFA_results}}.
#' @param G The index of the cluster for which the posterior mean digit is to be represented.
#' @param what A switch controlling whether the \code{"mean"} or \code{"last"} valid sample is to be plotted.
#' @param dat The full grayscale grid data set (prior to centering and scaling). Necessary when \code{ind} is supplied or if pixels with standard deviation of 0 exist in the data set (which will have been automatically removed by \code{\link{mcmc_IMIFA}}).
#' @param ind The index of columns of \code{dat} which were discarded prior to fitting the \code{"IMIFA"}-related model via \code{\link{mcmc_IMIFA}}. Can be a vector of column indices of \code{dat} or an equivalent vector of logicals. The discarded pixels are replaced by the column-means corresponding to \code{ind} among images assigned to the given cluster \code{G}.
#' @param ... Additional arguments to be passed, via \code{\link{show_digit}}, to \code{\link{mat2cols}} and/or \code{\link{plot_cols}}.
#'
#' @return The desired image representation of the posterior mean digit (or the last valid sample) from the desired cluster.
#' @note Note that both centering and scaling of the original data prior to modelling is accounted for in reconstructing the means, but \code{dat}, if necessary, must be the raw data prior to pre-processing.
#' @details This function is a wrapper to \code{\link{show_digit}} which supplies the posterior mean digit of a given cluster from a \code{"IMIFA"} model.
#' @importFrom matrixStats "colMeans2"
#' @export
#' @seealso \code{\link{USPSdigits}}, \code{\link{show_digit}}, \code{\link{get_IMIFA_results}}, \code{\link{mcmc_IMIFA}}, \code{\link{mat2cols}}, \code{\link{plot_cols}}
#' @keywords plotting
#' @author Keefe Murphy - <\email{keefe.murphy@@mu.ie}>
#' @usage
#' show_IMIFA_digit(res,
#' G = 1,
#' what = c("mean", "last"),
#' dat = NULL,
#' ind = NULL,
#' ...)
#' @examples
#' # Load the USPS data and discard peripheral digits
#' data(USPSdigits)
#' ylab <- USPSdigits$train[,1]
#' train <- USPSdigits$train[,-1]
#' ind <- apply(train, 2, sd) > 0.7
#' dat <- train[,ind]
#'
#' \donttest{# Fit an IMIFA model (warning: quite slow!)
#' # sim <- mcmc_IMIFA(dat, n.iters=1000, prec.mu=1e-03, z.init="kmeans",
#' # centering=FALSE, scaling="none")
#' # res <- get_IMIFA_results(sim, zlabels=ylab)
#'
#' # Examine the posterior mean image of the first two clusters
#' # show_IMIFA_digit(res, dat=train, ind=ind)
#' # show_IMIFA_digit(res, dat=train, ind=ind, G=2)}
show_IMIFA_digit <- function(res, G = 1L, what = c("mean", "last"), dat = NULL, ind = NULL, ...) {
UseMethod("show_IMIFA_digit")
}
#' @method show_IMIFA_digit Results_IMIFA
#' @importFrom matrixStats "colMeans2"
#' @export
show_IMIFA_digit.Results_IMIFA <- function(res, G = 1L, what = c("mean", "last"), dat = NULL, ind = NULL, ...) {
if(!inherits(res,
"Results_IMIFA")) stop("Results object of class 'Results_IMIFA' must be supplied", call. = FALSE)
if(G > res$GQ.results$G) stop("Invalid 'G'", call. = FALSE)
if(!all(is.character(what))) stop("'what' must be a character vector of length 1", call. = FALSE)
sd0 <- if(missing(ind)) attr(res, "Sd0.drop") else if(is.logical(ind)) !ind else !(seq_len(attr(res, "Vars")) %in% ind)
mu <- switch(EXPR=match.arg(what), mean = res$Means$post.mu[,G], last = res$Means$last.mu[,G])
center <- attr(res, "Center")
scale <- attr(res, "Scaling") != "none"
if(!is.null(sd0)) {
if(missing(dat)) stop("'dat' must be supplied when pixels were discarded &/or 'ind' is supplied", call.=FALSE)
x <- rep(NA, attr(res, "Vars"))
x[sd0] <- colMeans2(.scale2(data.matrix(dat), center=center, scale=attr(res, "Scale"))[res$Clust$MAP == G,sd0, drop=FALSE])
x[!sd0] <- mu
} else x <- mu
x <- if(scale) x * attr(res, "G.Scale") else x
x <- if(center) x + attr(res, "G.Mean") else x
show_digit(x, ...)
}
#
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## -----------------------------------------------------------------------------
library(ERSA)
## ----eval=F-------------------------------------------------------------------
# f <- lm(Fertility ~ . , data = swiss)
# exploreReg(f,swiss)
## ----echo=FALSE, out.width='100%'---------------------------------------------
knitr::include_graphics('swiss1.png')
## -----------------------------------------------------------------------------
f <- lm(Fertility ~ . , data = swiss)
summary(f)
## -----------------------------------------------------------------------------
drop1(f, test="F")
## -----------------------------------------------------------------------------
anova(f)
## ----fig.width=4, fig.height=4, fig.align='center'----------------------------
plottStats(f)
cols <- termColours(f)
plottStats(f, cols)
## ----eval=F-------------------------------------------------------------------
# plotCIStats(f,cols)
# plotCIStats(f, cols,stdunits=TRUE)
# plotAnovaStats(f, cols,type="F")
# plotAnovaStats(f, cols,type="SS")
## ----fig.width=6, fig.height=4------------------------------------------------
fr <- revPredOrder(f, swiss)
plotSeqSS(list(f,fr), cols,legend=TRUE)
## ----eval=F-------------------------------------------------------------------
# fselOrder(f)
# bselOrder(f)
# randomPredOrder(f)
# regsubsetsOrder(f)
## ----fig.width=7,fig.height=4, fig.align='center'-----------------------------
pcpPlot(swiss, f)
## ----fig.width=7,fig.height=4,fig.align='center'------------------------------
pcpPlot(swiss, f, type="Residuals")
## ----fig.width=7, fig.height=4,fig.align='center'-----------------------------
pcpPlot(swiss, f, type="Hatvalues", sequential=T)
| /inst/doc/ERSA.R | no_license | cran/ERSA | R | false | false | 1,816 | r | ## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## -----------------------------------------------------------------------------
library(ERSA)
## ----eval=F-------------------------------------------------------------------
# f <- lm(Fertility ~ . , data = swiss)
# exploreReg(f,swiss)
## ----echo=FALSE, out.width='100%'---------------------------------------------
knitr::include_graphics('swiss1.png')
## -----------------------------------------------------------------------------
f <- lm(Fertility ~ . , data = swiss)
summary(f)
## -----------------------------------------------------------------------------
drop1(f, test="F")
## -----------------------------------------------------------------------------
anova(f)
## ----fig.width=4, fig.height=4, fig.align='center'----------------------------
plottStats(f)
cols <- termColours(f)
plottStats(f, cols)
## ----eval=F-------------------------------------------------------------------
# plotCIStats(f,cols)
# plotCIStats(f, cols,stdunits=TRUE)
# plotAnovaStats(f, cols,type="F")
# plotAnovaStats(f, cols,type="SS")
## ----fig.width=6, fig.height=4------------------------------------------------
fr <- revPredOrder(f, swiss)
plotSeqSS(list(f,fr), cols,legend=TRUE)
## ----eval=F-------------------------------------------------------------------
# fselOrder(f)
# bselOrder(f)
# randomPredOrder(f)
# regsubsetsOrder(f)
## ----fig.width=7,fig.height=4, fig.align='center'-----------------------------
pcpPlot(swiss, f)
## ----fig.width=7,fig.height=4,fig.align='center'------------------------------
pcpPlot(swiss, f, type="Residuals")
## ----fig.width=7, fig.height=4,fig.align='center'-----------------------------
pcpPlot(swiss, f, type="Hatvalues", sequential=T)
|
# Install Package
install.packages('tidyverse')
install.packages('readxl')
# Run Library
library(tidyverse)
library(readxl)
# Read File Excel
rm(list = ls())
# Dir file excel
setwd('C:/Users/MuhammadGusKhamim/Documents/Folder Data CSV/vais/')
# read file
my_files <- list.files(pattern = "*.xlsx")
my_files
#
nba = lapply(my_files, function(i){
x = read_excel(i,sheet=1)
x$file = i
x
})
nba[1]
nba = do.call('rbind.data.frame', nba)
| /Merge Excel.R | no_license | amimhayden22/Merge-Multiple-Excel-Files-with-R-Studio | R | false | false | 453 | r | # Install Package
install.packages('tidyverse')
install.packages('readxl')
# Run Library
library(tidyverse)
library(readxl)
# Read File Excel
rm(list = ls())
# Dir file excel
setwd('C:/Users/MuhammadGusKhamim/Documents/Folder Data CSV/vais/')
# read file
my_files <- list.files(pattern = "*.xlsx")
my_files
#
nba = lapply(my_files, function(i){
x = read_excel(i,sheet=1)
x$file = i
x
})
nba[1]
nba = do.call('rbind.data.frame', nba)
|
get_nonmobiles <- function(ua_strings) {
if (any(is.na(ua_strings))) {
stop("Missing values in input vector cannot be parsed.")
}
flag <-
grepl(
"up.browser|up.link|mmp|smartphone|midp|wap|phone|iemobile|mobile|oneplus",
ua_strings,
ignore.case = TRUE
)
return(1 - flag)
}
| /R/get_nonmobiles.R | no_license | kylepeyton/attentive | R | false | false | 313 | r | get_nonmobiles <- function(ua_strings) {
if (any(is.na(ua_strings))) {
stop("Missing values in input vector cannot be parsed.")
}
flag <-
grepl(
"up.browser|up.link|mmp|smartphone|midp|wap|phone|iemobile|mobile|oneplus",
ua_strings,
ignore.case = TRUE
)
return(1 - flag)
}
|
\name{skewness}
\alias{skewness}
\alias{skewness.default}
\alias{skewness.data.frame}
\alias{skew}
\title{Coefficient of Skewness}
\description{
Generate the coefficient of skewness based on a sample.
}
\usage{
skewness(x, na.rm = TRUE)
skew(x, na.rm = TRUE)
}
\arguments{
\item{x}{An \R object}
\item{na.rm}{a logical value indicating whether \code{NA} should
be stripped before the computation proceeds.}
}
\author{Alaine A. Gulles}
\keyword{univar}
| /R Package Creation/STAR/man/skewness.Rd | no_license | djnpisano/RScriptLibrary | R | false | false | 477 | rd | \name{skewness}
\alias{skewness}
\alias{skewness.default}
\alias{skewness.data.frame}
\alias{skew}
\title{Coefficient of Skewness}
\description{
Generate the coefficient of skewness based on a sample.
}
\usage{
skewness(x, na.rm = TRUE)
skew(x, na.rm = TRUE)
}
\arguments{
\item{x}{An \R object}
\item{na.rm}{a logical value indicating whether \code{NA} should
be stripped before the computation proceeds.}
}
\author{Alaine A. Gulles}
\keyword{univar}
|
#' @title Efficiencies
#'
#' @description Extract the scores (optimal objective values) of the evaluated DMUs
#' from a fuzzy DEA solution. Note that these scores may not always be interpreted
#' as efficiencies.
#'
#' @param x Object of class \code{dea_fuzzy} obtained with some of the fuzzy DEA
#' \code{modelfuzzy_*} functions.
#' @param ... Other options (for compatibility).
#'
#' @author
#' \strong{Vicente Coll-Serrano} (\email{vicente.coll@@uv.es}).
#' \emph{Quantitative Methods for Measuring Culture (MC2). Applied Economics.}
#'
#' \strong{Vicente Bolós} (\email{vicente.bolos@@uv.es}).
#' \emph{Department of Business Mathematics}
#'
#' \strong{Rafael Benítez} (\email{rafael.suarez@@uv.es}).
#' \emph{Department of Business Mathematics}
#'
#' University of Valencia (Spain)
#'
#' @references
#' Boscá, J.E.; Liern, V.; Sala, R.; Martínez, A. (2011). "Ranking Decision Making
#' Units by Means of Soft Computing DEA Models". International Journal of Uncertainty,
#' Fuzziness and Knowledge-Based Systems, 19(1), p.115-134.
#'
#' @examples
#' # Replication of results in Boscá, Liern, Sala and Martínez (2011, p.125)
#' data("Leon2003")
#' data_example <- make_deadata_fuzzy(datadea = Leon2003,
#' inputs.mL = 2,
#' inputs.dL = 3,
#' outputs.mL = 4,
#' outputs.dL = 5)
#' result <- modelfuzzy_kaoliu(data_example,
#' kaoliu_modelname = "basic",
#' alpha = seq(0, 1, by = 0.1),
#' orientation = "io",
#' rts = "vrs")
#' efficiencies(result)
#'
#' @method efficiencies dea_fuzzy
#' @export
efficiencies.dea_fuzzy <-
function(x, ...) {
deasol <- x
dmunames_eval <- names(deasol$dmu_eval)
nde <- length(deasol$dmu_eval)
if (grepl("kaoliu", deasol$modelname)) {
nalpha <- length(deasol$alpha)
if ("efficiency" %in% names(deasol$alphacut[[1]]$DMU$Worst[[1]])) {
neff <- length(deasol$alphacut[[1]]$DMU$Worst[[1]]$efficiency)
if (neff == 1) {
eff.W <- matrix(0, nrow = nde, ncol = nalpha)
rownames(eff.W) <- dmunames_eval
colnames(eff.W) <- names(deasol$alphacut)
eff.B <- eff.W
for (j in 1:nalpha) {
eff.W[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Worst, function(x)
x$efficiency))
eff.B[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Best, function(x)
x$efficiency))
}
} else {
eff.W <- array(0,
dim = c(nde, neff + 1, nalpha),
dimnames = list(dmunames_eval,
c(names(deasol$alphacut[[1]]$DMU$Worst[[1]]$efficiency), "mean_efficiency"),
names(deasol$alphacut)))
eff.B <- eff.W
for (k in 1:nalpha) {
eff.W[, , k] <- cbind(
do.call(rbind, lapply(deasol$alphacut[[k]]$DMU$Worst, function(x)
x$efficiency)),
unlist(lapply(deasol$alphacut[[k]]$DMU$Worst, function(x)
x$mean_efficiency))
)
eff.B[, , k] <- cbind(
do.call(rbind, lapply(deasol$alphacut[[k]]$DMU$Best, function(x)
x$efficiency)),
unlist(lapply(deasol$alphacut[[k]]$DMU$Best, function(x)
x$mean_efficiency))
)
}
}
} else if ("beta" %in% names(deasol$alphacut[[1]]$DMU$Worst[[1]])) {
eff.W <- matrix(0, nrow = nde, ncol = nalpha)
rownames(eff.W) <- dmunames_eval
colnames(eff.W) <- names(deasol$alphacut)
eff.B <- eff.W
for (j in 1:nalpha) {
eff.W[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Worst, function(x)
x$beta))
eff.B[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Best, function(x)
x$beta))
}
} else if ("delta" %in% names(deasol$alphacut[[1]]$DMU$Worst[[1]])) {
eff.W <- matrix(0, nrow = nde, ncol = nalpha)
rownames(eff.W) <- dmunames_eval
colnames(eff.W) <- names(deasol$alphacut)
eff.B <- eff.W
for (j in 1:nalpha) {
eff.W[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Worst, function(x)
x$delta))
eff.B[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Best, function(x)
x$delta))
}
} else if ("objval" %in% names(deasol$alphacut[[1]]$DMU$Worst[[1]])) {
eff.W <- matrix(0, nrow = nde, ncol = nalpha)
rownames(eff.W) <- dmunames_eval
colnames(eff.W) <- names(deasol$alphacut)
eff.B <- eff.W
for (j in 1:nalpha) {
eff.W[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Worst, function(x)
x$objval))
eff.B[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Best, function(x)
x$objval))
}
} else {
stop("No efficiency/beta/delta/objval parameters in this solution!")
}
return(list(Worst = round(eff.W, 5), Best = round(eff.B, 5)))
} else if (grepl("possibilistic", deasol$modelname)) {
nh <- length(deasol$h)
if ("efficiency" %in% names(deasol$hlevel[[1]]$DMU[[1]])) {
neff <- length(deasol$hlevel[[1]]$DMU[[1]]$efficiency)
if (neff == 1) {
eff <- matrix(0, nrow = nde, ncol = nh)
rownames(eff) <- dmunames_eval
colnames(eff) <- names(deasol$hlevel)
for (j in 1:nh) {
eff[, j] <- unlist(lapply(deasol$hlevel[[j]]$DMU, function(x)
x$efficiency))
}
} else {
eff <- array(0,
dim = c(nde, neff + 1, nh),
dimnames = list(dmunames_eval,
c(names(deasol$hlevel[[1]]$DMU[[1]]$efficiency), "mean_efficiency"),
names(deasol$hlevel)))
for (k in 1:nh) {
eff[, , k] <- cbind(
do.call(rbind, lapply(deasol$hlevel[[k]]$DMU, function(x)
x$efficiency)),
unlist(lapply(deasol$hlevel[[k]]$DMU, function(x)
x$mean_efficiency))
)
}
}
} else if ("beta" %in% names(deasol$hlevel[[1]]$DMU[[1]])) {
eff <- matrix(0, nrow = nde, ncol = nh)
rownames(eff) <- dmunames_eval
colnames(eff) <- names(deasol$hlevel)
for (j in 1:nh) {
eff[, j] <- unlist(lapply(deasol$hlevel[[j]]$DMU, function(x)
x$beta))
}
} else {
stop("No efficiency/beta parameters in this solution!")
}
return(round(eff, 5))
} else if (grepl("guotanaka", deasol$modelname)) {
nh <- length(deasol$h)
eff <- array(0,
dim = c(nde, 3, nh),
dimnames = list(dmunames_eval,
names(deasol$hlevel[[1]]$DMU[[1]]$efficiency),
names(deasol$hlevel)))
for (k in 1:nh) {
eff[, , k] <- do.call(rbind, lapply(deasol$hlevel[[k]]$DMU, function(x)
x$efficiency))
}
return(round(eff, 5))
}
} | /R/efficiencies.dea_fuzzy.R | no_license | cran/deaR | R | false | false | 7,668 | r | #' @title Efficiencies
#'
#' @description Extract the scores (optimal objective values) of the evaluated DMUs
#' from a fuzzy DEA solution. Note that these scores may not always be interpreted
#' as efficiencies.
#'
#' @param x Object of class \code{dea_fuzzy} obtained with some of the fuzzy DEA
#' \code{modelfuzzy_*} functions.
#' @param ... Other options (for compatibility).
#'
#' @author
#' \strong{Vicente Coll-Serrano} (\email{vicente.coll@@uv.es}).
#' \emph{Quantitative Methods for Measuring Culture (MC2). Applied Economics.}
#'
#' \strong{Vicente Bolós} (\email{vicente.bolos@@uv.es}).
#' \emph{Department of Business Mathematics}
#'
#' \strong{Rafael Benítez} (\email{rafael.suarez@@uv.es}).
#' \emph{Department of Business Mathematics}
#'
#' University of Valencia (Spain)
#'
#' @references
#' Boscá, J.E.; Liern, V.; Sala, R.; Martínez, A. (2011). "Ranking Decision Making
#' Units by Means of Soft Computing DEA Models". International Journal of Uncertainty,
#' Fuzziness and Knowledge-Based Systems, 19(1), p.115-134.
#'
#' @examples
#' # Replication of results in Boscá, Liern, Sala and Martínez (2011, p.125)
#' data("Leon2003")
#' data_example <- make_deadata_fuzzy(datadea = Leon2003,
#' inputs.mL = 2,
#' inputs.dL = 3,
#' outputs.mL = 4,
#' outputs.dL = 5)
#' result <- modelfuzzy_kaoliu(data_example,
#' kaoliu_modelname = "basic",
#' alpha = seq(0, 1, by = 0.1),
#' orientation = "io",
#' rts = "vrs")
#' efficiencies(result)
#'
#' @method efficiencies dea_fuzzy
#' @export
efficiencies.dea_fuzzy <-
function(x, ...) {
deasol <- x
dmunames_eval <- names(deasol$dmu_eval)
nde <- length(deasol$dmu_eval)
if (grepl("kaoliu", deasol$modelname)) {
nalpha <- length(deasol$alpha)
if ("efficiency" %in% names(deasol$alphacut[[1]]$DMU$Worst[[1]])) {
neff <- length(deasol$alphacut[[1]]$DMU$Worst[[1]]$efficiency)
if (neff == 1) {
eff.W <- matrix(0, nrow = nde, ncol = nalpha)
rownames(eff.W) <- dmunames_eval
colnames(eff.W) <- names(deasol$alphacut)
eff.B <- eff.W
for (j in 1:nalpha) {
eff.W[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Worst, function(x)
x$efficiency))
eff.B[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Best, function(x)
x$efficiency))
}
} else {
eff.W <- array(0,
dim = c(nde, neff + 1, nalpha),
dimnames = list(dmunames_eval,
c(names(deasol$alphacut[[1]]$DMU$Worst[[1]]$efficiency), "mean_efficiency"),
names(deasol$alphacut)))
eff.B <- eff.W
for (k in 1:nalpha) {
eff.W[, , k] <- cbind(
do.call(rbind, lapply(deasol$alphacut[[k]]$DMU$Worst, function(x)
x$efficiency)),
unlist(lapply(deasol$alphacut[[k]]$DMU$Worst, function(x)
x$mean_efficiency))
)
eff.B[, , k] <- cbind(
do.call(rbind, lapply(deasol$alphacut[[k]]$DMU$Best, function(x)
x$efficiency)),
unlist(lapply(deasol$alphacut[[k]]$DMU$Best, function(x)
x$mean_efficiency))
)
}
}
} else if ("beta" %in% names(deasol$alphacut[[1]]$DMU$Worst[[1]])) {
eff.W <- matrix(0, nrow = nde, ncol = nalpha)
rownames(eff.W) <- dmunames_eval
colnames(eff.W) <- names(deasol$alphacut)
eff.B <- eff.W
for (j in 1:nalpha) {
eff.W[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Worst, function(x)
x$beta))
eff.B[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Best, function(x)
x$beta))
}
} else if ("delta" %in% names(deasol$alphacut[[1]]$DMU$Worst[[1]])) {
eff.W <- matrix(0, nrow = nde, ncol = nalpha)
rownames(eff.W) <- dmunames_eval
colnames(eff.W) <- names(deasol$alphacut)
eff.B <- eff.W
for (j in 1:nalpha) {
eff.W[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Worst, function(x)
x$delta))
eff.B[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Best, function(x)
x$delta))
}
} else if ("objval" %in% names(deasol$alphacut[[1]]$DMU$Worst[[1]])) {
eff.W <- matrix(0, nrow = nde, ncol = nalpha)
rownames(eff.W) <- dmunames_eval
colnames(eff.W) <- names(deasol$alphacut)
eff.B <- eff.W
for (j in 1:nalpha) {
eff.W[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Worst, function(x)
x$objval))
eff.B[, j] <- unlist(lapply(deasol$alphacut[[j]]$DMU$Best, function(x)
x$objval))
}
} else {
stop("No efficiency/beta/delta/objval parameters in this solution!")
}
return(list(Worst = round(eff.W, 5), Best = round(eff.B, 5)))
} else if (grepl("possibilistic", deasol$modelname)) {
nh <- length(deasol$h)
if ("efficiency" %in% names(deasol$hlevel[[1]]$DMU[[1]])) {
neff <- length(deasol$hlevel[[1]]$DMU[[1]]$efficiency)
if (neff == 1) {
eff <- matrix(0, nrow = nde, ncol = nh)
rownames(eff) <- dmunames_eval
colnames(eff) <- names(deasol$hlevel)
for (j in 1:nh) {
eff[, j] <- unlist(lapply(deasol$hlevel[[j]]$DMU, function(x)
x$efficiency))
}
} else {
eff <- array(0,
dim = c(nde, neff + 1, nh),
dimnames = list(dmunames_eval,
c(names(deasol$hlevel[[1]]$DMU[[1]]$efficiency), "mean_efficiency"),
names(deasol$hlevel)))
for (k in 1:nh) {
eff[, , k] <- cbind(
do.call(rbind, lapply(deasol$hlevel[[k]]$DMU, function(x)
x$efficiency)),
unlist(lapply(deasol$hlevel[[k]]$DMU, function(x)
x$mean_efficiency))
)
}
}
} else if ("beta" %in% names(deasol$hlevel[[1]]$DMU[[1]])) {
eff <- matrix(0, nrow = nde, ncol = nh)
rownames(eff) <- dmunames_eval
colnames(eff) <- names(deasol$hlevel)
for (j in 1:nh) {
eff[, j] <- unlist(lapply(deasol$hlevel[[j]]$DMU, function(x)
x$beta))
}
} else {
stop("No efficiency/beta parameters in this solution!")
}
return(round(eff, 5))
} else if (grepl("guotanaka", deasol$modelname)) {
nh <- length(deasol$h)
eff <- array(0,
dim = c(nde, 3, nh),
dimnames = list(dmunames_eval,
names(deasol$hlevel[[1]]$DMU[[1]]$efficiency),
names(deasol$hlevel)))
for (k in 1:nh) {
eff[, , k] <- do.call(rbind, lapply(deasol$hlevel[[k]]$DMU, function(x)
x$efficiency))
}
return(round(eff, 5))
}
} |
# UK soil texture plot
soil.texture.uk <- function (soiltexture = NULL, main = "",
at = seq(0.1, 0.9, by = 0.1),
axis.labels = c("percent sand", "percent silt", "percent clay"),
tick.labels = list(l = seq(10, 90, by = 10), r = seq(10, 90, by = 10),
b = seq(10, 90, by = 10)), show.names = TRUE,
show.lines = TRUE, col.names = "gray", bg.names = par("bg"),
show.grid = FALSE, col.axis = "black", col.lines = "gray",
col.grid = "gray", lty.grid = 3, show.legend = FALSE, label.points = FALSE,
point.labels = NULL, col.symbols = "black", pch = par("pch"),
h1 = NA, h3 = NA, t1 = NA, t3 = NA, lwduk = 2, xpos = NA, ypos = NA,
snames = NA, cexuk = 1.1, ...) {
if(is.na(h1[1])) h1<-c(82, 85, 70, 50, 45, 20) / 100
if(is.na(h3[1])) h3<-c(18, 15, 30, 30, 35, 0) / 100
if(is.na(t1[1])) t1<-c(0, 70, 50, 45, 0, 20) / 100
if(is.na(t3[1])) t3<-c(18, 0, 30, 35, 35, 35) / 100
# Name positions (x and y, x starting form left point)
if(is.na(xpos[1])) xpos<-c(0.5,0.77,0.45,0.1,0.45,0.85)
if(is.na(ypos[1])) ypos<-c(0.65,0.265,0.265,0.07,0.1,0.1)
if(is.na(snames[1])) snames <- c("Clays","Medium silts","Medium loams",
"Sands","Light loams","Light silts")
par(xpd = TRUE)
plot(0.5, type = "n", axes = FALSE, xlim = c(0,1),ylim = c(0,1),
main = NA, xlab = NA, ylab = NA)
triax.plot(x=NULL,main = main, at = at, axis.labels = axis.labels,
tick.labels = tick.labels, col.axis = col.axis, show.grid = show.grid,
col.grid = col.grid, lty.grid = lty.grid)
arrows(0.12, 0.41, 0.22, 0.57, length = 0.15)
arrows(0.78, 0.57, 0.88, 0.41, length = 0.15)
arrows(0.6, -0.1, 0.38, -0.1, length = 0.15)
if(show.lines) {
triax.segments <- function(h1, h3, t1, t3, col, lwd) {
segments(1 - h1 - h3/2, h3 * sin(pi/3), 1 - t1 -
t3/2, t3 * sin(pi/3), col = col, lwd = lwd)
}
triax.segments(h1 , h3, t1, t3, col.lines, lwduk)
}
if (show.names) {
boxed.labels(xpos, ypos* sin(pi/3), snames, border = FALSE,
xpad = 0.5, cex = cexuk)
}
par(xpd = FALSE)
if (is.null(soiltexture)) return(NULL)
soilpoints <- triax.points(soiltexture, show.legend = show.legend,
label.points = label.points, point.labels = point.labels,
col.symbols = col.symbols, pch = pch, ...)
invisible(soilpoints)
}
| /plotrix/R/soil.texture.uk.R | no_license | edenduthie/palsR | R | false | false | 2,256 | r | # UK soil texture plot
soil.texture.uk <- function (soiltexture = NULL, main = "",
at = seq(0.1, 0.9, by = 0.1),
axis.labels = c("percent sand", "percent silt", "percent clay"),
tick.labels = list(l = seq(10, 90, by = 10), r = seq(10, 90, by = 10),
b = seq(10, 90, by = 10)), show.names = TRUE,
show.lines = TRUE, col.names = "gray", bg.names = par("bg"),
show.grid = FALSE, col.axis = "black", col.lines = "gray",
col.grid = "gray", lty.grid = 3, show.legend = FALSE, label.points = FALSE,
point.labels = NULL, col.symbols = "black", pch = par("pch"),
h1 = NA, h3 = NA, t1 = NA, t3 = NA, lwduk = 2, xpos = NA, ypos = NA,
snames = NA, cexuk = 1.1, ...) {
if(is.na(h1[1])) h1<-c(82, 85, 70, 50, 45, 20) / 100
if(is.na(h3[1])) h3<-c(18, 15, 30, 30, 35, 0) / 100
if(is.na(t1[1])) t1<-c(0, 70, 50, 45, 0, 20) / 100
if(is.na(t3[1])) t3<-c(18, 0, 30, 35, 35, 35) / 100
# Name positions (x and y, x starting form left point)
if(is.na(xpos[1])) xpos<-c(0.5,0.77,0.45,0.1,0.45,0.85)
if(is.na(ypos[1])) ypos<-c(0.65,0.265,0.265,0.07,0.1,0.1)
if(is.na(snames[1])) snames <- c("Clays","Medium silts","Medium loams",
"Sands","Light loams","Light silts")
par(xpd = TRUE)
plot(0.5, type = "n", axes = FALSE, xlim = c(0,1),ylim = c(0,1),
main = NA, xlab = NA, ylab = NA)
triax.plot(x=NULL,main = main, at = at, axis.labels = axis.labels,
tick.labels = tick.labels, col.axis = col.axis, show.grid = show.grid,
col.grid = col.grid, lty.grid = lty.grid)
arrows(0.12, 0.41, 0.22, 0.57, length = 0.15)
arrows(0.78, 0.57, 0.88, 0.41, length = 0.15)
arrows(0.6, -0.1, 0.38, -0.1, length = 0.15)
if(show.lines) {
triax.segments <- function(h1, h3, t1, t3, col, lwd) {
segments(1 - h1 - h3/2, h3 * sin(pi/3), 1 - t1 -
t3/2, t3 * sin(pi/3), col = col, lwd = lwd)
}
triax.segments(h1 , h3, t1, t3, col.lines, lwduk)
}
if (show.names) {
boxed.labels(xpos, ypos* sin(pi/3), snames, border = FALSE,
xpad = 0.5, cex = cexuk)
}
par(xpd = FALSE)
if (is.null(soiltexture)) return(NULL)
soilpoints <- triax.points(soiltexture, show.legend = show.legend,
label.points = label.points, point.labels = point.labels,
col.symbols = col.symbols, pch = pch, ...)
invisible(soilpoints)
}
|
\name{cps4c_small}
\alias{cps4c_small}
\docType{data}
\title{
cps4c_small Data
}
\description{
Obs: 1000 observations (subset of cps4.dat)
}
\usage{data("cps4c_small")}
\format{
A data frame with 1000 observations on the following 12 variables.
\describe{
\item{\code{wage}}{earnings per hour}
\item{\code{educ}}{years of education}
\item{\code{exper}}{post education years experience}
\item{\code{hrswk}}{usual hours worked per week}
\item{\code{married}}{= 1 if married}
\item{\code{female}}{= 1 if female}
\item{\code{metro}}{= 1 if lives in metropolitan area}
\item{\code{midwest}}{= 1 if lives in midwest}
\item{\code{south}}{= 1 if lives in south}
\item{\code{west}}{= 1 if lives in west}
\item{\code{black}}{= 1 if black}
\item{\code{asian}}{= 1 if asian}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
http://principlesofeconometrics.com/poe4/poe4.htm
}
\references{
Dr. Kang Sun Lee, Louisiana Department of Health and Human Services
}
\examples{
data(cps4c_small)
## maybe str(cps4c_small) ; plot(cps4c_small) ...
}
\keyword{datasets}
| /man/cps4c_small.Rd | no_license | Worathan/PoEdata | R | false | false | 1,149 | rd | \name{cps4c_small}
\alias{cps4c_small}
\docType{data}
\title{
cps4c_small Data
}
\description{
Obs: 1000 observations (subset of cps4.dat)
}
\usage{data("cps4c_small")}
\format{
A data frame with 1000 observations on the following 12 variables.
\describe{
\item{\code{wage}}{earnings per hour}
\item{\code{educ}}{years of education}
\item{\code{exper}}{post education years experience}
\item{\code{hrswk}}{usual hours worked per week}
\item{\code{married}}{= 1 if married}
\item{\code{female}}{= 1 if female}
\item{\code{metro}}{= 1 if lives in metropolitan area}
\item{\code{midwest}}{= 1 if lives in midwest}
\item{\code{south}}{= 1 if lives in south}
\item{\code{west}}{= 1 if lives in west}
\item{\code{black}}{= 1 if black}
\item{\code{asian}}{= 1 if asian}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
http://principlesofeconometrics.com/poe4/poe4.htm
}
\references{
Dr. Kang Sun Lee, Louisiana Department of Health and Human Services
}
\examples{
data(cps4c_small)
## maybe str(cps4c_small) ; plot(cps4c_small) ...
}
\keyword{datasets}
|
hotcode <- function(train, test){
train[,1:11] <- lapply(train[,1:11], as.character)
test[,1:11] <- lapply( test[,1:11], as.character)
train$Gender <- ifelse(train$Gender=='F', 0, 1)
test$Gender <- ifelse(test$Gender=='F', 0, 1)
train$Stay_In_Current_City_Years <- ifelse(train$Stay_In_Current_City_Years=='4+', '4', train$Stay_In_Current_City_Years)
test$Stay_In_Current_City_Years <- ifelse(test$Stay_In_Current_City_Years=='4+', '4', test$Stay_In_Current_City_Years)
train <- dummy.data.frame(train, names = c("City_Category", "Stay_In_Current_City_Years"), sep = "_")
test <- dummy.data.frame(test, names = c("City_Category", "Stay_In_Current_City_Years"), sep = "_")
train <- data.table(train)
test <- data.table(test)
train[, Age_Count := .N, by = Age]
train_test_age <- unique(select(train, Age, Age_Count))
test <- merge(test, train_test_age, by = 'Age', all.x = TRUE)
train[, Occupation_Count := .N, by = Occupation]
train_test_Occupation <- unique(select(train, Occupation, Occupation_Count))
test <- merge(test, train_test_Occupation, by = 'Occupation',all.x = TRUE)
train[, Product_Category_1_Count := .N, by = Product_Category_1]
train_test_Product_1 <- unique(select(train, Product_Category_1, Product_Category_1_Count))
test <- merge(test, train_test_Product_1, by = 'Product_Category_1',all.x = TRUE)
train[, Product_Category_2_Count := .N, by = Product_Category_2]
train_test_Product_2 <- unique(select(train, Product_Category_2, Product_Category_2_Count))
test <- merge(test, train_test_Product_2, by = 'Product_Category_2',all.x = TRUE)
train[, Product_Category_3_Count := .N, by = Product_Category_3]
train_test_Product_3 <- unique(select(train, Product_Category_3, Product_Category_3_Count))
test <- merge(test, train_test_Product_3, by = 'Product_Category_3',all.x = TRUE)
train[, User_Count := .N, by = User_ID]
train_test_User <- unique(select(train, User_ID, User_Count))
test <- merge(test, train_test_User, by = 'User_ID',all.x = TRUE)
train[, Product_Count := .N, by = Product_ID]
train_test_Product <- unique(select(train, Product_ID, Product_Count))
test <- merge(test, train_test_Product, by = 'Product_ID',all.x = TRUE)
train[, Mean_Purchase_Product := mean(Purchase), by = Product_ID]
train_product_purchase <- unique(select(train, Product_ID, Mean_Purchase_Product))
test <- merge(test, train_product_purchase, by = 'Product_ID', all.x = TRUE)
train[, Mean_Purchase_User := mean(Purchase), by = User_ID]
train_user_purchase <- unique(select(train, User_ID, Mean_Purchase_User))
test <- merge(test, train_user_purchase, by = 'User_ID', all.x = TRUE)
test[, pro_self_count := .N, by = Product_ID]
train$pro <- paste(train$Product_Category_1, train$Product_Category_2, train$Product_Category_3, sep='_')
test$pro <- paste(test$Product_Category_1, test$Product_Category_2, test$Product_Category_3, sep='_')
train[, pro_mean_purchase := mean(Purchase), by = pro]
train_pro_purchase <- unique(select(train, pro, pro_mean_purchase))
test <- merge(test, train_pro_purchase, by = 'pro', all.x = TRUE)
test <- data.frame(test)
for (i in 1:dim(test)[1]) {
if(is.na(test[i,'Mean_Purchase_Product'])){
test[i,'Mean_Purchase_Product'] = test[i,'pro_mean_purchase']
}
if(is.na(test[i,'Product_Count'])){
test[i,'Product_Count'] = test[i, 'pro_self_count']
}
}
train <- subset(train, select = -c(Age, pro, pro_mean_purchase, Occupation, Product_Category_1,Product_Category_2, Product_Category_3))
test <- subset(test, select = -c(pro_self_count, Age, Occupation, pro, pro_mean_purchase, Product_Category_1,Product_Category_2, Product_Category_3))
train <- select(train, User_ID, Product_ID, Gender, City_Category_A, City_Category_B, City_Category_C, Stay_In_Current_City_Years_0, Stay_In_Current_City_Years_1,
Stay_In_Current_City_Years_2, Stay_In_Current_City_Years_3, Stay_In_Current_City_Years_4, Marital_Status, Age_Count, Occupation_Count, Product_Category_1_Count,
Product_Category_2_Count, Product_Category_3_Count, User_Count, Product_Count, Mean_Purchase_Product, Mean_Purchase_User, Purchase)
test <- select(test, User_ID, Product_ID, Gender, City_Category_A, City_Category_B, City_Category_C, Stay_In_Current_City_Years_0, Stay_In_Current_City_Years_1,
Stay_In_Current_City_Years_2, Stay_In_Current_City_Years_3, Stay_In_Current_City_Years_4, Marital_Status, Age_Count, Occupation_Count, Product_Category_1_Count,
Product_Category_2_Count, Product_Category_3_Count, User_Count, Product_Count, Mean_Purchase_Product, Mean_Purchase_User)
c <- list(train, test)
train_test <- rbindlist(c, fill = TRUE)
train_test <- data.frame(train_test)
train_test[,3:dim(train_test)[2]] <- lapply(train_test[,3:dim(train_test)[2]], as.numeric)
return(train_test)
} | /hotcode.R | no_license | jingliang92/analyticsvidhya_practice | R | false | false | 5,020 | r | hotcode <- function(train, test){
train[,1:11] <- lapply(train[,1:11], as.character)
test[,1:11] <- lapply( test[,1:11], as.character)
train$Gender <- ifelse(train$Gender=='F', 0, 1)
test$Gender <- ifelse(test$Gender=='F', 0, 1)
train$Stay_In_Current_City_Years <- ifelse(train$Stay_In_Current_City_Years=='4+', '4', train$Stay_In_Current_City_Years)
test$Stay_In_Current_City_Years <- ifelse(test$Stay_In_Current_City_Years=='4+', '4', test$Stay_In_Current_City_Years)
train <- dummy.data.frame(train, names = c("City_Category", "Stay_In_Current_City_Years"), sep = "_")
test <- dummy.data.frame(test, names = c("City_Category", "Stay_In_Current_City_Years"), sep = "_")
train <- data.table(train)
test <- data.table(test)
train[, Age_Count := .N, by = Age]
train_test_age <- unique(select(train, Age, Age_Count))
test <- merge(test, train_test_age, by = 'Age', all.x = TRUE)
train[, Occupation_Count := .N, by = Occupation]
train_test_Occupation <- unique(select(train, Occupation, Occupation_Count))
test <- merge(test, train_test_Occupation, by = 'Occupation',all.x = TRUE)
train[, Product_Category_1_Count := .N, by = Product_Category_1]
train_test_Product_1 <- unique(select(train, Product_Category_1, Product_Category_1_Count))
test <- merge(test, train_test_Product_1, by = 'Product_Category_1',all.x = TRUE)
train[, Product_Category_2_Count := .N, by = Product_Category_2]
train_test_Product_2 <- unique(select(train, Product_Category_2, Product_Category_2_Count))
test <- merge(test, train_test_Product_2, by = 'Product_Category_2',all.x = TRUE)
train[, Product_Category_3_Count := .N, by = Product_Category_3]
train_test_Product_3 <- unique(select(train, Product_Category_3, Product_Category_3_Count))
test <- merge(test, train_test_Product_3, by = 'Product_Category_3',all.x = TRUE)
train[, User_Count := .N, by = User_ID]
train_test_User <- unique(select(train, User_ID, User_Count))
test <- merge(test, train_test_User, by = 'User_ID',all.x = TRUE)
train[, Product_Count := .N, by = Product_ID]
train_test_Product <- unique(select(train, Product_ID, Product_Count))
test <- merge(test, train_test_Product, by = 'Product_ID',all.x = TRUE)
train[, Mean_Purchase_Product := mean(Purchase), by = Product_ID]
train_product_purchase <- unique(select(train, Product_ID, Mean_Purchase_Product))
test <- merge(test, train_product_purchase, by = 'Product_ID', all.x = TRUE)
train[, Mean_Purchase_User := mean(Purchase), by = User_ID]
train_user_purchase <- unique(select(train, User_ID, Mean_Purchase_User))
test <- merge(test, train_user_purchase, by = 'User_ID', all.x = TRUE)
test[, pro_self_count := .N, by = Product_ID]
train$pro <- paste(train$Product_Category_1, train$Product_Category_2, train$Product_Category_3, sep='_')
test$pro <- paste(test$Product_Category_1, test$Product_Category_2, test$Product_Category_3, sep='_')
train[, pro_mean_purchase := mean(Purchase), by = pro]
train_pro_purchase <- unique(select(train, pro, pro_mean_purchase))
test <- merge(test, train_pro_purchase, by = 'pro', all.x = TRUE)
test <- data.frame(test)
for (i in 1:dim(test)[1]) {
if(is.na(test[i,'Mean_Purchase_Product'])){
test[i,'Mean_Purchase_Product'] = test[i,'pro_mean_purchase']
}
if(is.na(test[i,'Product_Count'])){
test[i,'Product_Count'] = test[i, 'pro_self_count']
}
}
train <- subset(train, select = -c(Age, pro, pro_mean_purchase, Occupation, Product_Category_1,Product_Category_2, Product_Category_3))
test <- subset(test, select = -c(pro_self_count, Age, Occupation, pro, pro_mean_purchase, Product_Category_1,Product_Category_2, Product_Category_3))
train <- select(train, User_ID, Product_ID, Gender, City_Category_A, City_Category_B, City_Category_C, Stay_In_Current_City_Years_0, Stay_In_Current_City_Years_1,
Stay_In_Current_City_Years_2, Stay_In_Current_City_Years_3, Stay_In_Current_City_Years_4, Marital_Status, Age_Count, Occupation_Count, Product_Category_1_Count,
Product_Category_2_Count, Product_Category_3_Count, User_Count, Product_Count, Mean_Purchase_Product, Mean_Purchase_User, Purchase)
test <- select(test, User_ID, Product_ID, Gender, City_Category_A, City_Category_B, City_Category_C, Stay_In_Current_City_Years_0, Stay_In_Current_City_Years_1,
Stay_In_Current_City_Years_2, Stay_In_Current_City_Years_3, Stay_In_Current_City_Years_4, Marital_Status, Age_Count, Occupation_Count, Product_Category_1_Count,
Product_Category_2_Count, Product_Category_3_Count, User_Count, Product_Count, Mean_Purchase_Product, Mean_Purchase_User)
c <- list(train, test)
train_test <- rbindlist(c, fill = TRUE)
train_test <- data.frame(train_test)
train_test[,3:dim(train_test)[2]] <- lapply(train_test[,3:dim(train_test)[2]], as.numeric)
return(train_test)
} |
##1/13/16
##Update Beta, n, seed, and file name
rm(list=ls())
seed <- 754470050
###################################################################################
##LIBRARIES##
###################################################################################
require(survival)
require(MASS)
require(gaussquad)
require(numDeriv)
currWorkDir <- getwd()
setwd("/users/emhuang/Ravi/paper/genericCode")
source("allCode.R")
setwd(currWorkDir)
############################################################################################################################################
##FUNCTIONS##
############################################################################################################################################
hazard <- function(t, a, b, exb) exb * a * (t/b)^(a-1)
cumhaz <- function(t, a, b, exb) exb * b * (t/b)^a
froot <- function(x, u, ...) sqrt(cumhaz(x, ...)) - sqrt(u)
############################################################################################################################################
##INPUTS##
############################################################################################################################################
K <- 10000 ##number of randomized trials to be simulated
m <- 3 ##number of follow-up visits
pTreatment <- 0.5 ##probability of being assigned to treatment group
trueBeta <- 0.6 ##treatment effect
n <- 200 ##sample size of each simulated randomized trial
set.seed(seed)
simSeeds <- sample(10^9,K)
visit1Time <- 2
visit2Time <- 4
visit3Time <- 6
###################################################################################
##SIMULATIONS##
###################################################################################
nMethods <- 5
result <- matrix(NA, nrow = K, ncol = nMethods * 2)
nties <- matrix(NA, nrow = K, ncol = 3)
for(k in 1:K){
set.seed(simSeeds[k])
###################################################################################
##Generation of Z (random treatment assignment: 1 if control, 0 if treatment)##
###################################################################################
data <- data.frame(Z=rbinom(n,size=1,prob=pTreatment))
###################################################################################
##Generation of C (censoring time, the last visit the subject attends)##
###################################################################################
data$C <- sample(x=0:m,size=n,replace = TRUE, prob = c(.08,.1,.1,.72))
###################################################################################
##Generation of T (frailty time)##
###################################################################################
##Failure time generated from the Weibull hazard
a <- 2
b <- 100
u <- -log(runif(n))
exb <- exp(trueBeta*data$Z)
data$Tcont <- NA
for(i in 1:n){
data$Tcont[i] <- uniroot(froot, interval=c(1.e-14, 1e04), u = u[i], exb = exb[i], a=a, b=b)$root
}
rm(a,b,u,exb)
#hist(data$Tcont)
#summary(data$Tcont)
###################################################################################
##Generation of T' (grouped frailty time)##
###################################################################################
data$Tgrouped <- 10000
for(i in 1:n){
if(data$Tcont[i]==0){
data$Tgrouped[i] <- 0
}else if(0<data$Tcont[i]&data$Tcont[i]<=visit1Time){
data$Tgrouped[i] <- 1
}else if(visit1Time<data$Tcont[i] & data$Tcont[i]<=visit2Time){
data$Tgrouped[i] <- 2
}else if(visit2Time<data$Tcont[i] & data$Tcont[i]<=visit3Time){
data$Tgrouped[i] <- 3
}
}
###################################################################################
##Calculate delta (censoring indicator) and V (visit time depending on delta)##
###################################################################################
data$delta <- 1
data$delta[data$Tgrouped>data$C] <- 0
data$V <- data$delta*data$Tgrouped + (1-data$delta)*data$C
#table(data$C)/n
#table(data$Tgrouped)/n
#table(data$delta,data$V)/n
#temp <- table(data$delta,data$V)/n
#sum(temp[1,1:3]) #proportion of n subjects who dropout early
#temp[1,4] #proportion of n subjects who are adminstratively censored
#sum(temp[2,]) #proportion who are observed to be frail
data <- data.frame(delta = data$delta, V = data$V, Z = data$Z)
data <- subset(data, V!=0)
nties[k,] <- table(data$delta, data$V)[2,]
temp <- table(data$delta,data$V)/n
if (nrow(temp)!=2) {
result[k,] <- rep(NA,times=nMethods * 2)
warning(paste(k, ":Either no censoring or no failure."))
} else {
result[k,] <- applyMethods(data)
}
}
save(nties, result, file="Beta06_n200.Rdata") | /groupedData/Simulations/SingleCovariate/varyBeta/code/Beta06_n200.R | no_license | emhuang1/groupedData | R | false | false | 4,768 | r | ##1/13/16
##Update Beta, n, seed, and file name
rm(list=ls())
seed <- 754470050
###################################################################################
##LIBRARIES##
###################################################################################
require(survival)
require(MASS)
require(gaussquad)
require(numDeriv)
currWorkDir <- getwd()
setwd("/users/emhuang/Ravi/paper/genericCode")
source("allCode.R")
setwd(currWorkDir)
############################################################################################################################################
##FUNCTIONS##
############################################################################################################################################
hazard <- function(t, a, b, exb) exb * a * (t/b)^(a-1)
cumhaz <- function(t, a, b, exb) exb * b * (t/b)^a
froot <- function(x, u, ...) sqrt(cumhaz(x, ...)) - sqrt(u)
############################################################################################################################################
##INPUTS##
############################################################################################################################################
K <- 10000 ##number of randomized trials to be simulated
m <- 3 ##number of follow-up visits
pTreatment <- 0.5 ##probability of being assigned to treatment group
trueBeta <- 0.6 ##treatment effect
n <- 200 ##sample size of each simulated randomized trial
set.seed(seed)
simSeeds <- sample(10^9,K)
visit1Time <- 2
visit2Time <- 4
visit3Time <- 6
###################################################################################
##SIMULATIONS##
###################################################################################
nMethods <- 5
result <- matrix(NA, nrow = K, ncol = nMethods * 2)
nties <- matrix(NA, nrow = K, ncol = 3)
for(k in 1:K){
set.seed(simSeeds[k])
###################################################################################
##Generation of Z (random treatment assignment: 1 if control, 0 if treatment)##
###################################################################################
data <- data.frame(Z=rbinom(n,size=1,prob=pTreatment))
###################################################################################
##Generation of C (censoring time, the last visit the subject attends)##
###################################################################################
data$C <- sample(x=0:m,size=n,replace = TRUE, prob = c(.08,.1,.1,.72))
###################################################################################
##Generation of T (frailty time)##
###################################################################################
##Failure time generated from the Weibull hazard
a <- 2
b <- 100
u <- -log(runif(n))
exb <- exp(trueBeta*data$Z)
data$Tcont <- NA
for(i in 1:n){
data$Tcont[i] <- uniroot(froot, interval=c(1.e-14, 1e04), u = u[i], exb = exb[i], a=a, b=b)$root
}
rm(a,b,u,exb)
#hist(data$Tcont)
#summary(data$Tcont)
###################################################################################
##Generation of T' (grouped frailty time)##
###################################################################################
data$Tgrouped <- 10000
for(i in 1:n){
if(data$Tcont[i]==0){
data$Tgrouped[i] <- 0
}else if(0<data$Tcont[i]&data$Tcont[i]<=visit1Time){
data$Tgrouped[i] <- 1
}else if(visit1Time<data$Tcont[i] & data$Tcont[i]<=visit2Time){
data$Tgrouped[i] <- 2
}else if(visit2Time<data$Tcont[i] & data$Tcont[i]<=visit3Time){
data$Tgrouped[i] <- 3
}
}
###################################################################################
##Calculate delta (censoring indicator) and V (visit time depending on delta)##
###################################################################################
data$delta <- 1
data$delta[data$Tgrouped>data$C] <- 0
data$V <- data$delta*data$Tgrouped + (1-data$delta)*data$C
#table(data$C)/n
#table(data$Tgrouped)/n
#table(data$delta,data$V)/n
#temp <- table(data$delta,data$V)/n
#sum(temp[1,1:3]) #proportion of n subjects who dropout early
#temp[1,4] #proportion of n subjects who are adminstratively censored
#sum(temp[2,]) #proportion who are observed to be frail
data <- data.frame(delta = data$delta, V = data$V, Z = data$Z)
data <- subset(data, V!=0)
nties[k,] <- table(data$delta, data$V)[2,]
temp <- table(data$delta,data$V)/n
if (nrow(temp)!=2) {
result[k,] <- rep(NA,times=nMethods * 2)
warning(paste(k, ":Either no censoring or no failure."))
} else {
result[k,] <- applyMethods(data)
}
}
save(nties, result, file="Beta06_n200.Rdata") |
gr.longWB <-
function (betas) {
eta.yx <- as.vector(X %*% betas)
if (parameterization %in% c("value", "both")) {
Ys <- as.vector(Xs %*% betas) + Zsb
WintF.vl.alph <- c(WintF.vl %*% alpha)
Ws.intF.vl.alph <- c(Ws.intF.vl %*% alpha)
eta.s <- Ws.intF.vl.alph * Ys
}
if (parameterization %in% c("slope", "both")) {
Ys.deriv <- as.vector(Xs.deriv %*% betas[indFixed]) + Zsb.deriv
WintF.sl.alph <- c(WintF.sl %*% Dalpha)
Ws.intF.sl.alph <- c(Ws.intF.sl %*% Dalpha)
eta.s <- if (parameterization == "both")
eta.s + Ws.intF.sl.alph * Ys.deriv
else
Ws.intF.sl.alph * Ys.deriv
}
exp.eta.tw.P <- exp(eta.tw) * P
sc1 <- - crossprod(X, y - eta.yx - Zb) / sigma^2
Int <- wk * exp(log(sigma.t) + (sigma.t - 1) * log.st + eta.s)
sc2 <- numeric(ncx)
for (i in 1:ncx) {
ki <- exp.eta.tw.P * switch(parameterization,
"value" = rowsum(Int * Ws.intF.vl.alph * Xs[, i], id.GK, reorder = FALSE),
"slope" = {ii <- match(i, indFixed);
if (is.na(ii)) 0 else rowsum(Int * Ws.intF.sl.alph * Xs.deriv[, ii], id.GK, reorder = FALSE)},
"both" = {ii <- match(i, indFixed);
rowsum(Int * (Ws.intF.vl.alph * Xs[, i] +
Ws.intF.sl.alph * if (is.na(ii)) 0 else Xs.deriv[, ii]), id.GK, reorder = FALSE)}
)
kii <- c((p.byt * ki) %*% wGH)
sc2[i] <- switch(parameterization,
"value" = - sum(d * WintF.vl.alph * Xtime[, i] - kii, na.rm = TRUE),
"slope" = {ii <- match(i, indFixed);
if (is.na(ii)) 0 else - sum(d * WintF.sl.alph * Xtime.deriv[, ii] - kii, na.rm = TRUE)},
"both" = {ii <- match(i, indFixed);
- sum(d * (WintF.vl.alph * Xtime[, i] +
WintF.sl.alph * if (is.na(ii)) 0 else Xtime.deriv[, ii]) - kii, na.rm = TRUE)}
)
}
c(sc1 + sc2)
}
| /R/gr.longWB.R | no_license | cran/JM | R | false | false | 2,016 | r | gr.longWB <-
function (betas) {
eta.yx <- as.vector(X %*% betas)
if (parameterization %in% c("value", "both")) {
Ys <- as.vector(Xs %*% betas) + Zsb
WintF.vl.alph <- c(WintF.vl %*% alpha)
Ws.intF.vl.alph <- c(Ws.intF.vl %*% alpha)
eta.s <- Ws.intF.vl.alph * Ys
}
if (parameterization %in% c("slope", "both")) {
Ys.deriv <- as.vector(Xs.deriv %*% betas[indFixed]) + Zsb.deriv
WintF.sl.alph <- c(WintF.sl %*% Dalpha)
Ws.intF.sl.alph <- c(Ws.intF.sl %*% Dalpha)
eta.s <- if (parameterization == "both")
eta.s + Ws.intF.sl.alph * Ys.deriv
else
Ws.intF.sl.alph * Ys.deriv
}
exp.eta.tw.P <- exp(eta.tw) * P
sc1 <- - crossprod(X, y - eta.yx - Zb) / sigma^2
Int <- wk * exp(log(sigma.t) + (sigma.t - 1) * log.st + eta.s)
sc2 <- numeric(ncx)
for (i in 1:ncx) {
ki <- exp.eta.tw.P * switch(parameterization,
"value" = rowsum(Int * Ws.intF.vl.alph * Xs[, i], id.GK, reorder = FALSE),
"slope" = {ii <- match(i, indFixed);
if (is.na(ii)) 0 else rowsum(Int * Ws.intF.sl.alph * Xs.deriv[, ii], id.GK, reorder = FALSE)},
"both" = {ii <- match(i, indFixed);
rowsum(Int * (Ws.intF.vl.alph * Xs[, i] +
Ws.intF.sl.alph * if (is.na(ii)) 0 else Xs.deriv[, ii]), id.GK, reorder = FALSE)}
)
kii <- c((p.byt * ki) %*% wGH)
sc2[i] <- switch(parameterization,
"value" = - sum(d * WintF.vl.alph * Xtime[, i] - kii, na.rm = TRUE),
"slope" = {ii <- match(i, indFixed);
if (is.na(ii)) 0 else - sum(d * WintF.sl.alph * Xtime.deriv[, ii] - kii, na.rm = TRUE)},
"both" = {ii <- match(i, indFixed);
- sum(d * (WintF.vl.alph * Xtime[, i] +
WintF.sl.alph * if (is.na(ii)) 0 else Xtime.deriv[, ii]) - kii, na.rm = TRUE)}
)
}
c(sc1 + sc2)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/FrissMessageBox.R
\name{showConfirm}
\alias{showConfirm}
\title{Shows a confirm message.}
\usage{
showConfirm(session, inputName, message, okText = "OK",
cancelText = "Cancel")
}
\arguments{
\item{session}{Shiny session object from the server.r page of the app that will use this function}
\item{inputName}{Name of the input to return the result to.}
\item{message}{Message to display on the confirm box}
}
\description{
Shows a confirm message in the browser and returns result in input$inputName
}
\keyword{alert}
\keyword{confirm}
\keyword{dhtmlx}
\keyword{modal}
\keyword{popup}
\keyword{show}
| /man/showConfirm.Rd | no_license | ttraboue/FrissMsgBox | R | false | false | 689 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/FrissMessageBox.R
\name{showConfirm}
\alias{showConfirm}
\title{Shows a confirm message.}
\usage{
showConfirm(session, inputName, message, okText = "OK",
cancelText = "Cancel")
}
\arguments{
\item{session}{Shiny session object from the server.r page of the app that will use this function}
\item{inputName}{Name of the input to return the result to.}
\item{message}{Message to display on the confirm box}
}
\description{
Shows a confirm message in the browser and returns result in input$inputName
}
\keyword{alert}
\keyword{confirm}
\keyword{dhtmlx}
\keyword{modal}
\keyword{popup}
\keyword{show}
|
rga$methods(
list(
getData = function(ids, start.date = format(Sys.Date() - 8, "%Y-%m-%d"),
end.date = format(Sys.Date() - 1, "%Y-%m-%d"), date.format = "%Y-%m-%d",
metrics = "ga:users,ga:sessions,ga:pageviews", dimensions = "ga:date",
sort = "", filters = "", segment = "", fields = "",
start = 1, max, messages = TRUE, batch, walk = FALSE,
output.raw, output.formats, return.url = FALSE, rbr = FALSE, envir = .GlobalEnv,
samplingLevel = "HIGHER_PRECISION") {
if (missing(ids)) {
stop("please enter a profile id")
}
if (missing(batch) || batch == FALSE) {
isBatch <- FALSE
if (missing(max)) {
# standard
max <- 1000
}
} else {
isBatch <- TRUE
if (!is.numeric(batch)) {
if (!missing(max) && max < 10000) {
# no need
batch <- max
} else {
# max batch size
batch <- 10000
}
} else {
if (batch > 10000) {
# as per https://developers.google.com/analytics/devguides/reporting/core/v3/reference#maxResults
stop("batch size can be set to max of 10000")
}
}
adjustMax <- TRUE
# arbitrary target, adjust later
max <- 10000
}
# ensure that profile id begings with 'ga:'
if (!grepl("ga:", ids)) {
ids <- paste("ga:", ids, sep = "")
}
# remove whitespaces from metrics and dimensions
metrics <- gsub("\\s", "", metrics)
dimensions <- gsub("\\s", "", dimensions)
# build url with variables
url <- "https://www.googleapis.com/analytics/v3/data/ga"
query <- paste(paste("access_token", .self$getToken()$access_token, sep = "="),
paste("ids", ids, sep = "="),
paste("start-date", start.date, sep = "="),
paste("end-date", end.date, sep = "="),
paste("metrics", metrics, sep = "="),
paste("dimensions", dimensions, sep = "="),
paste("start-index", start, sep = "="),
paste("max-results", max, sep = "="),
paste("samplingLevel", samplingLevel, sep = "="),
sep = "&")
if (sort != "") {
query <- paste(query, paste("sort", sort, sep = "="), sep = "&")
}
if (segment != "") {
query <- paste(query, paste("segment", segment, sep = "="), sep = "&")
}
if (fields != "") {
query <- paste(query, paste("fields", fields, sep = "="), sep = "&")
}
if (filters != "") {
# available operators
ops <- c("==", "!=", ">", "<", ">=", "<=", "=@", "!@", "=-", "!-", "\\|\\|", "&&", "OR", "AND")
# make pattern for gsub
opsw <- paste("(\\ )+(", paste(ops, collapse = "|"), ")(\\ )+", sep = "")
# remove whitespaces around operators
filters <- gsub(opsw, "\\2", filters)
# replace logical operators
filters <- gsub("OR|\\|\\|", ",", filters)
filters <- gsub("AND|&&", ";", filters)
query <- paste(query, paste("filters", curlEscape(filters), sep = "="), sep = "&", collapse = "")
}
url <- paste(url, query = query, sep = "?")
if (return.url) {
return(url)
}
# thanks to Schaun Wheeler this will not provoke the weird SSL-bug
if (.Platform$OS.type == "windows") {
options(RCurlOptions = list(
verbose = FALSE,
capath = system.file("CurlSSL", "cacert.pem",
package = "RCurl"), ssl.verifypeer = FALSE))
}
# get data and convert from json to list-format
# switched to use httr and jsonlite
request <- GET(url)
ga.data <- jsonlite::fromJSON(content(request, "text"))
# possibility to extract the raw data
if (!missing(output.raw)) {
assign(output.raw, ga.data, envir = envir)
}
# output error and stop
if (!is.null(ga.data$error)) {
stop(paste("error in fetching data: ", ga.data$error$message, sep = ""))
}
if (ga.data$containsSampledData == "TRUE") {
isSampled <- TRUE
if (!walk) {
message(sprintf("Notice: Data set sampled from %s sessions (%d%% of all sessions)",
format(as.numeric(ga.data$sampleSize), big.mark=",", scientific=FALSE),
round((as.numeric(ga.data$sampleSize) / as.numeric(ga.data$sampleSpace) * 100))))
}
} else {
isSampled <- FALSE
}
if (isSampled && walk) {
return(.self$getDataInWalks(total = ga.data$totalResults, max = max, batch = batch,
ids = ids, start.date = start.date, end.date = end.date, date.format = date.format,
metrics = metrics, dimensions = dimensions, sort = sort, filters = filters,
segment = segment, fields = fields, envir = envir, samplingLevel = samplingLevel))
}
# check if all data is being extracted
if (NROW(ga.data$rows) < ga.data$totalResults && (messages || isBatch)) {
if (!isBatch) {
message(paste("Only pulling", NROW(ga.data$rows), "observations of", ga.data$totalResults, "total (set batch = TRUE to get all observations)"))
} else {
if (adjustMax) {
max <- ga.data$totalResults
}
message(paste("Batch: pulling", max, "observations in batches of", batch))
# pass variables to batch-function
return(.self$getDataInBatches(total = ga.data$totalResults, max = max, batchSize = batch,
ids = ids, start.date = start.date, end.date = end.date, date.format = date.format,
metrics = metrics, dimensions = dimensions, sort = sort, filters = filters,
segment = segment, fields = fields, envir = envir, samplingLevel = samplingLevel))
}
}
# get column names
ga.headers <- ga.data$columnHeaders
# remove ga: from column headers
ga.headers$name <- sub("ga:", "", ga.headers$name)
# did not return any results
if (!inherits(ga.data$rows, "matrix") && !rbr) {
stop(paste("no results:", ga.data$totalResults))
} else if (!inherits(ga.data$rows, "matrix") && rbr) {
# If row-by-row is true, return NULL
return(NULL)
}
# convert to data.frame
ga.data.df <- as.data.frame(ga.data$rows, stringsAsFactors = FALSE)
# insert column names
names(ga.data.df) <- ga.headers$name
# check if sampled; add attributes if so
if (isSampled) {
attr(ga.data.df, "containsSampledData") <- TRUE
attr(ga.data.df, "sampleSize") <- as.numeric(ga.data$sampleSize)
attr(ga.data.df, "sampleSpace") <- as.numeric(ga.data$sampleSpace)
} else {
attr(ga.data.df, "containsSampledData") <- FALSE
}
# find formats
formats <- ga.headers
# convert to r friendly
formats$dataType[formats$dataType %in% c("INTEGER", "PERCENT", "TIME", "CURRENCY", "FLOAT")] <- "numeric"
formats$dataType[formats$dataType == "STRING"] <- "character"
# addition rules
formats$dataType[formats$name %in% c("latitude", "longitude")] <- "numeric"
formats$dataType[formats$name %in% c("year", "month", "week", "day", "hour", "minute", "nthMonth", "nthWeek", "nthDay", "nthHour", "nthMinute", "dayOfWeek", "sessionDurationBucket", "visitLength", "daysSinceLastVisit", "daysSinceLastSession", "visitCount", "sessionCount", "sessionsToTransaction", "daysToTransaction")] <- "ordered"
formats$dataType[formats$name == "date"] <- "Date"
if ("date" %in% ga.headers$name) {
ga.data.df$date <- format(as.Date(ga.data.df$date, "%Y%m%d"), date.format)
}
# looping through columns and setting classes
for (i in 1:nrow(formats)) {
column <- formats$name[i]
class <- formats$dataType[[i]]
if (!exists(paste("as.", class, sep = ""), mode = "function")) {
stop(paste("can't find function for class", class))
} else {
as.fun <- match.fun(paste("as.", class, sep = ""))
}
if (class == "ordered") {
ga.data.df[[column]] <- as.numeric(ga.data.df[[column]])
}
ga.data.df[[column]] <- as.fun(ga.data.df[[column]])
}
# mos-def optimize
if (!missing(output.formats)) {
assign(output.formats, formats, envir = envir)
}
# and we're done
return(ga.data.df)
},
getFirstDate = function(ids) {
first <- .self$getData(ids, start.date = "2005-01-01", filters = "ga:hits!=0", max = 1, messages = FALSE)
return(first$date)
},
getDataInBatches = function(batchSize, total, ids, start.date, end.date, date.format,
metrics, max, dimensions, sort, filters, segment, fields, envir,
samplingLevel) {
runs.max <- ceiling(max/batchSize)
chunk.list <- vector("list", runs.max)
for (i in 0:(runs.max - 1)) {
start <- i * batchSize + 1
end <- start + batchSize - 1
# adjust batch size if we're pulling the last batch
if (end > max) {
batchSize <- max - batchSize
end <- max
}
message(paste("Batch: run (", i + 1, "/", runs.max, "), observations [", start, ":", end, "]. Batch size: ", batchSize, sep = ""))
chunk <- .self$getData(ids = ids, start.date = start.date, end.date = end.date, metrics = metrics, dimensions = dimensions, sort = sort,
filters = filters, segment = segment, fields = fields, date.format = date.format, envir = envir, messages = FALSE, return.url = FALSE,
batch = FALSE, start = start, max = batchSize, samplingLevel = samplingLevel)
message(paste("Batch: received", NROW(chunk), "observations"))
chunk.list[[i + 1]] <- chunk
}
return(do.call(rbind, chunk.list, envir = envir))
},
getDataInWalks = function(total, max, batch, ids, start.date, end.date, date.format,
metrics, dimensions, sort, filters, segment, fields, envir,
samplingLevel) {
# this function will extract data day-by-day (to avoid sampling)
walks.max <- ceiling(as.numeric(difftime(as.Date(end.date), as.Date(start.date), units = "days")))
chunk.list <- vector("list", walks.max + 1)
for (i in 0:(walks.max)) {
date <- format(as.Date(start.date) + i, "%Y-%m-%d")
message(paste("Walk: run (", i + 1, "/", walks.max + 1, ") for date ", date, sep = ""))
chunk <- .self$getData(ids = ids, start.date = date, end.date = date, date.format = date.format,
metrics = metrics, dimensions = dimensions, sort = sort, filters = filters,
segment = segment, fields = fields, envir = envir, max = max,
rbr = TRUE, messages = FALSE, return.url = FALSE, batch = batch, samplingLevel = samplingLevel)
message(paste("Walk: received", NROW(chunk), "observations"))
chunk.list[[i + 1]] <- chunk
}
return(do.call(rbind, chunk.list, envir = envir))
}
)
)
| /R/core.R | no_license | tedconf/rga | R | false | false | 13,230 | r | rga$methods(
list(
getData = function(ids, start.date = format(Sys.Date() - 8, "%Y-%m-%d"),
end.date = format(Sys.Date() - 1, "%Y-%m-%d"), date.format = "%Y-%m-%d",
metrics = "ga:users,ga:sessions,ga:pageviews", dimensions = "ga:date",
sort = "", filters = "", segment = "", fields = "",
start = 1, max, messages = TRUE, batch, walk = FALSE,
output.raw, output.formats, return.url = FALSE, rbr = FALSE, envir = .GlobalEnv,
samplingLevel = "HIGHER_PRECISION") {
if (missing(ids)) {
stop("please enter a profile id")
}
if (missing(batch) || batch == FALSE) {
isBatch <- FALSE
if (missing(max)) {
# standard
max <- 1000
}
} else {
isBatch <- TRUE
if (!is.numeric(batch)) {
if (!missing(max) && max < 10000) {
# no need
batch <- max
} else {
# max batch size
batch <- 10000
}
} else {
if (batch > 10000) {
# as per https://developers.google.com/analytics/devguides/reporting/core/v3/reference#maxResults
stop("batch size can be set to max of 10000")
}
}
adjustMax <- TRUE
# arbitrary target, adjust later
max <- 10000
}
# ensure that profile id begings with 'ga:'
if (!grepl("ga:", ids)) {
ids <- paste("ga:", ids, sep = "")
}
# remove whitespaces from metrics and dimensions
metrics <- gsub("\\s", "", metrics)
dimensions <- gsub("\\s", "", dimensions)
# build url with variables
url <- "https://www.googleapis.com/analytics/v3/data/ga"
query <- paste(paste("access_token", .self$getToken()$access_token, sep = "="),
paste("ids", ids, sep = "="),
paste("start-date", start.date, sep = "="),
paste("end-date", end.date, sep = "="),
paste("metrics", metrics, sep = "="),
paste("dimensions", dimensions, sep = "="),
paste("start-index", start, sep = "="),
paste("max-results", max, sep = "="),
paste("samplingLevel", samplingLevel, sep = "="),
sep = "&")
if (sort != "") {
query <- paste(query, paste("sort", sort, sep = "="), sep = "&")
}
if (segment != "") {
query <- paste(query, paste("segment", segment, sep = "="), sep = "&")
}
if (fields != "") {
query <- paste(query, paste("fields", fields, sep = "="), sep = "&")
}
if (filters != "") {
# available operators
ops <- c("==", "!=", ">", "<", ">=", "<=", "=@", "!@", "=-", "!-", "\\|\\|", "&&", "OR", "AND")
# make pattern for gsub
opsw <- paste("(\\ )+(", paste(ops, collapse = "|"), ")(\\ )+", sep = "")
# remove whitespaces around operators
filters <- gsub(opsw, "\\2", filters)
# replace logical operators
filters <- gsub("OR|\\|\\|", ",", filters)
filters <- gsub("AND|&&", ";", filters)
query <- paste(query, paste("filters", curlEscape(filters), sep = "="), sep = "&", collapse = "")
}
url <- paste(url, query = query, sep = "?")
if (return.url) {
return(url)
}
# thanks to Schaun Wheeler this will not provoke the weird SSL-bug
if (.Platform$OS.type == "windows") {
options(RCurlOptions = list(
verbose = FALSE,
capath = system.file("CurlSSL", "cacert.pem",
package = "RCurl"), ssl.verifypeer = FALSE))
}
# get data and convert from json to list-format
# switched to use httr and jsonlite
request <- GET(url)
ga.data <- jsonlite::fromJSON(content(request, "text"))
# possibility to extract the raw data
if (!missing(output.raw)) {
assign(output.raw, ga.data, envir = envir)
}
# output error and stop
if (!is.null(ga.data$error)) {
stop(paste("error in fetching data: ", ga.data$error$message, sep = ""))
}
if (ga.data$containsSampledData == "TRUE") {
isSampled <- TRUE
if (!walk) {
message(sprintf("Notice: Data set sampled from %s sessions (%d%% of all sessions)",
format(as.numeric(ga.data$sampleSize), big.mark=",", scientific=FALSE),
round((as.numeric(ga.data$sampleSize) / as.numeric(ga.data$sampleSpace) * 100))))
}
} else {
isSampled <- FALSE
}
if (isSampled && walk) {
return(.self$getDataInWalks(total = ga.data$totalResults, max = max, batch = batch,
ids = ids, start.date = start.date, end.date = end.date, date.format = date.format,
metrics = metrics, dimensions = dimensions, sort = sort, filters = filters,
segment = segment, fields = fields, envir = envir, samplingLevel = samplingLevel))
}
# check if all data is being extracted
if (NROW(ga.data$rows) < ga.data$totalResults && (messages || isBatch)) {
if (!isBatch) {
message(paste("Only pulling", NROW(ga.data$rows), "observations of", ga.data$totalResults, "total (set batch = TRUE to get all observations)"))
} else {
if (adjustMax) {
max <- ga.data$totalResults
}
message(paste("Batch: pulling", max, "observations in batches of", batch))
# pass variables to batch-function
return(.self$getDataInBatches(total = ga.data$totalResults, max = max, batchSize = batch,
ids = ids, start.date = start.date, end.date = end.date, date.format = date.format,
metrics = metrics, dimensions = dimensions, sort = sort, filters = filters,
segment = segment, fields = fields, envir = envir, samplingLevel = samplingLevel))
}
}
# get column names
ga.headers <- ga.data$columnHeaders
# remove ga: from column headers
ga.headers$name <- sub("ga:", "", ga.headers$name)
# did not return any results
if (!inherits(ga.data$rows, "matrix") && !rbr) {
stop(paste("no results:", ga.data$totalResults))
} else if (!inherits(ga.data$rows, "matrix") && rbr) {
# If row-by-row is true, return NULL
return(NULL)
}
# convert to data.frame
ga.data.df <- as.data.frame(ga.data$rows, stringsAsFactors = FALSE)
# insert column names
names(ga.data.df) <- ga.headers$name
# check if sampled; add attributes if so
if (isSampled) {
attr(ga.data.df, "containsSampledData") <- TRUE
attr(ga.data.df, "sampleSize") <- as.numeric(ga.data$sampleSize)
attr(ga.data.df, "sampleSpace") <- as.numeric(ga.data$sampleSpace)
} else {
attr(ga.data.df, "containsSampledData") <- FALSE
}
# find formats
formats <- ga.headers
# convert to r friendly
formats$dataType[formats$dataType %in% c("INTEGER", "PERCENT", "TIME", "CURRENCY", "FLOAT")] <- "numeric"
formats$dataType[formats$dataType == "STRING"] <- "character"
# addition rules
formats$dataType[formats$name %in% c("latitude", "longitude")] <- "numeric"
formats$dataType[formats$name %in% c("year", "month", "week", "day", "hour", "minute", "nthMonth", "nthWeek", "nthDay", "nthHour", "nthMinute", "dayOfWeek", "sessionDurationBucket", "visitLength", "daysSinceLastVisit", "daysSinceLastSession", "visitCount", "sessionCount", "sessionsToTransaction", "daysToTransaction")] <- "ordered"
formats$dataType[formats$name == "date"] <- "Date"
if ("date" %in% ga.headers$name) {
ga.data.df$date <- format(as.Date(ga.data.df$date, "%Y%m%d"), date.format)
}
# looping through columns and setting classes
for (i in 1:nrow(formats)) {
column <- formats$name[i]
class <- formats$dataType[[i]]
if (!exists(paste("as.", class, sep = ""), mode = "function")) {
stop(paste("can't find function for class", class))
} else {
as.fun <- match.fun(paste("as.", class, sep = ""))
}
if (class == "ordered") {
ga.data.df[[column]] <- as.numeric(ga.data.df[[column]])
}
ga.data.df[[column]] <- as.fun(ga.data.df[[column]])
}
# mos-def optimize
if (!missing(output.formats)) {
assign(output.formats, formats, envir = envir)
}
# and we're done
return(ga.data.df)
},
getFirstDate = function(ids) {
first <- .self$getData(ids, start.date = "2005-01-01", filters = "ga:hits!=0", max = 1, messages = FALSE)
return(first$date)
},
getDataInBatches = function(batchSize, total, ids, start.date, end.date, date.format,
metrics, max, dimensions, sort, filters, segment, fields, envir,
samplingLevel) {
runs.max <- ceiling(max/batchSize)
chunk.list <- vector("list", runs.max)
for (i in 0:(runs.max - 1)) {
start <- i * batchSize + 1
end <- start + batchSize - 1
# adjust batch size if we're pulling the last batch
if (end > max) {
batchSize <- max - batchSize
end <- max
}
message(paste("Batch: run (", i + 1, "/", runs.max, "), observations [", start, ":", end, "]. Batch size: ", batchSize, sep = ""))
chunk <- .self$getData(ids = ids, start.date = start.date, end.date = end.date, metrics = metrics, dimensions = dimensions, sort = sort,
filters = filters, segment = segment, fields = fields, date.format = date.format, envir = envir, messages = FALSE, return.url = FALSE,
batch = FALSE, start = start, max = batchSize, samplingLevel = samplingLevel)
message(paste("Batch: received", NROW(chunk), "observations"))
chunk.list[[i + 1]] <- chunk
}
return(do.call(rbind, chunk.list, envir = envir))
},
getDataInWalks = function(total, max, batch, ids, start.date, end.date, date.format,
metrics, dimensions, sort, filters, segment, fields, envir,
samplingLevel) {
# this function will extract data day-by-day (to avoid sampling)
walks.max <- ceiling(as.numeric(difftime(as.Date(end.date), as.Date(start.date), units = "days")))
chunk.list <- vector("list", walks.max + 1)
for (i in 0:(walks.max)) {
date <- format(as.Date(start.date) + i, "%Y-%m-%d")
message(paste("Walk: run (", i + 1, "/", walks.max + 1, ") for date ", date, sep = ""))
chunk <- .self$getData(ids = ids, start.date = date, end.date = date, date.format = date.format,
metrics = metrics, dimensions = dimensions, sort = sort, filters = filters,
segment = segment, fields = fields, envir = envir, max = max,
rbr = TRUE, messages = FALSE, return.url = FALSE, batch = batch, samplingLevel = samplingLevel)
message(paste("Walk: received", NROW(chunk), "observations"))
chunk.list[[i + 1]] <- chunk
}
return(do.call(rbind, chunk.list, envir = envir))
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-snsp.R
\docType{class}
\name{snsp-class}
\alias{snsp-class}
\alias{snsp}
\title{snsp-class}
\description{
snsp-class
}
| /man/snsp-class.Rd | no_license | ABS-dev/DiagTestKit | R | false | true | 204 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class-snsp.R
\docType{class}
\name{snsp-class}
\alias{snsp-class}
\alias{snsp}
\title{snsp-class}
\description{
snsp-class
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/omega.r
\name{omega}
\alias{omega}
\title{Compute 1 to k-step transition proportions}
\usage{
omega(x, k = NULL, labels = NULL)
}
\arguments{
\item{x}{either a transition matrix or 3-d array ( a set of transition matrices)}
\item{k}{if x is a transition matrix, this is number of steps 1 to k}
\item{labels}{labels for states except for last which is always dead and is added at end}
}
\description{
Computes 1 to k-step forward transition proportions in each state with a single transition matrix or a 3-d array of transition matrices.
}
\author{
Jeff Laake
}
\keyword{utility}
| /marked/man/omega.Rd | no_license | bmcclintock/marked | R | false | false | 691 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/omega.r
\name{omega}
\alias{omega}
\title{Compute 1 to k-step transition proportions}
\usage{
omega(x, k = NULL, labels = NULL)
}
\arguments{
\item{x}{either a transition matrix or 3-d array ( a set of transition matrices)}
\item{k}{if x is a transition matrix, this is number of steps 1 to k}
\item{labels}{labels for states except for last which is always dead and is added at end}
}
\description{
Computes 1 to k-step forward transition proportions in each state with a single transition matrix or a 3-d array of transition matrices.
}
\author{
Jeff Laake
}
\keyword{utility}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{reorder_data}
\alias{reorder_data}
\title{Reorder fragments names represented as factor levels based on a specified metric}
\usage{
reorder_data(df, by_model, by_prop, FUN = median, frag_col_name = "full_name")
}
\arguments{
\item{df}{input data.frame.}
\item{by_model}{character name of a selected model name in column}
\item{by_prop}{character name of a selected property}
\item{FUN}{metric functions applied to order different fragments}
\item{frag_col_name}{character name of column containing desired fragments names to reorder}
}
\value{
data.frame.
}
\description{
Reorder fragments names represented as factor levels based on a specified metric
}
\details{
This function can be applied only to the melted input data.frame.
}
\examples{
file_name <- system.file("extdata", "free-wilson_frag_contributions.txt", package = "rspci")
df <- load_data(file_name)
df <- reorder_data(df, "consensus", "overall", frag_col_name = "FragID")
}
| /man/reorder_data.Rd | no_license | DrrDom/rspci | R | false | true | 1,039 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{reorder_data}
\alias{reorder_data}
\title{Reorder fragments names represented as factor levels based on a specified metric}
\usage{
reorder_data(df, by_model, by_prop, FUN = median, frag_col_name = "full_name")
}
\arguments{
\item{df}{input data.frame.}
\item{by_model}{character name of a selected model name in column}
\item{by_prop}{character name of a selected property}
\item{FUN}{metric functions applied to order different fragments}
\item{frag_col_name}{character name of column containing desired fragments names to reorder}
}
\value{
data.frame.
}
\description{
Reorder fragments names represented as factor levels based on a specified metric
}
\details{
This function can be applied only to the melted input data.frame.
}
\examples{
file_name <- system.file("extdata", "free-wilson_frag_contributions.txt", package = "rspci")
df <- load_data(file_name)
df <- reorder_data(df, "consensus", "overall", frag_col_name = "FragID")
}
|
dat_custom_transition <- data.frame(
year_from = 2009,
year_to = 2010,
icd_from = "K52.9",
icd_to = "A09.9",
automatic_forward = "A",
automatic_backward = "A",
stringsAsFactors = FALSE
)
icd_k52_2009 <- ICD10gm::icd_expand(data.frame(icd_spec = "K52.9"),
year = 2009, col_icd = "icd_spec")
icd_k52_2010 <- ICD10gm::icd_expand(data.frame(icd_spec = "K52.9"),
year = 2010, col_icd = "icd_spec")
icd_k58_2019 <- ICD10gm::icd_expand(data.frame(icd_spec = "K58.2"),
year = 2019, col_icd = "icd_spec")
test_that("icd_history returns input if years == year", {
expect_identical(icd_k52_2010,
ICD10gm::icd_history(icd_k52_2010, years = 2010))
})
test_that("Coding break: Check that K52.9 specified for 2009 is removed for 2010", {
expect_identical(
icd_history(icd_k52_2009, years = 2009:2010), icd_k52_2009
)
})
test_that("Simple transition: Check that K58.2 for 2019 is translated to K58.9 for 2018", {
expect_identical(
icd_history(icd_k58_2019, years = 2018:2019)$icd_code,
c("K58.9", "K58.2")
)
})
| /tests/testthat/test-icd_history.R | permissive | edonnachie/ICD10gm | R | false | false | 1,185 | r | dat_custom_transition <- data.frame(
year_from = 2009,
year_to = 2010,
icd_from = "K52.9",
icd_to = "A09.9",
automatic_forward = "A",
automatic_backward = "A",
stringsAsFactors = FALSE
)
icd_k52_2009 <- ICD10gm::icd_expand(data.frame(icd_spec = "K52.9"),
year = 2009, col_icd = "icd_spec")
icd_k52_2010 <- ICD10gm::icd_expand(data.frame(icd_spec = "K52.9"),
year = 2010, col_icd = "icd_spec")
icd_k58_2019 <- ICD10gm::icd_expand(data.frame(icd_spec = "K58.2"),
year = 2019, col_icd = "icd_spec")
test_that("icd_history returns input if years == year", {
expect_identical(icd_k52_2010,
ICD10gm::icd_history(icd_k52_2010, years = 2010))
})
test_that("Coding break: Check that K52.9 specified for 2009 is removed for 2010", {
expect_identical(
icd_history(icd_k52_2009, years = 2009:2010), icd_k52_2009
)
})
test_that("Simple transition: Check that K58.2 for 2019 is translated to K58.9 for 2018", {
expect_identical(
icd_history(icd_k58_2019, years = 2018:2019)$icd_code,
c("K58.9", "K58.2")
)
})
|
df<- readRDS("summary.rds")
str(df)
sum(is.na(df))
library(dplyr)
LA <- df[df$fips=="06037",]
motor <- grepl("Vehicle",map$SCC.Level.Two)
sub_map <- map[motor,]
motor_merge_baltimore <- merge(sub_map,baltimore,by="SCC")
motor_merge_baltimore$fips <- "baltimore"
motor_merge_LA <- merge(sub_map,LA,by="SCC")
motor_merge_LA$fips <- "los angales"
r <- rbind(motor_merge_baltimore,motor_merge_LA)
agg <- aggregate(Emissions~year+fips,agg,sum)
jpeg("Plot6.png")
ggplot(agg,aes(year,Emissions,color=fips)) + geom_line()
dev.off() | /Plot6.R | no_license | zigzagktz/Air-Pollutant-Analysis-for-US--1999-2008- | R | false | false | 528 | r | df<- readRDS("summary.rds")
str(df)
sum(is.na(df))
library(dplyr)
LA <- df[df$fips=="06037",]
motor <- grepl("Vehicle",map$SCC.Level.Two)
sub_map <- map[motor,]
motor_merge_baltimore <- merge(sub_map,baltimore,by="SCC")
motor_merge_baltimore$fips <- "baltimore"
motor_merge_LA <- merge(sub_map,LA,by="SCC")
motor_merge_LA$fips <- "los angales"
r <- rbind(motor_merge_baltimore,motor_merge_LA)
agg <- aggregate(Emissions~year+fips,agg,sum)
jpeg("Plot6.png")
ggplot(agg,aes(year,Emissions,color=fips)) + geom_line()
dev.off() |
#install.packages("rvest")
library("rvest")
webpage<-read_html('https://en.wikipedia.org/wiki/List_of_amendments_to_the_United_States_Constitution')
tbls <- html_nodes(webpage, "table")
tbls[3]
tbls_ls <- webpage %>%
html_nodes("table") %>%
.[3] %>%
html_table(fill = TRUE)
tbls_ls
df<-as.data.frame(tbls_ls)
| /Wikipedia Table Scrapping.R | no_license | Akulm26/Webscrapping101 | R | false | false | 319 | r | #install.packages("rvest")
library("rvest")
webpage<-read_html('https://en.wikipedia.org/wiki/List_of_amendments_to_the_United_States_Constitution')
tbls <- html_nodes(webpage, "table")
tbls[3]
tbls_ls <- webpage %>%
html_nodes("table") %>%
.[3] %>%
html_table(fill = TRUE)
tbls_ls
df<-as.data.frame(tbls_ls)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{OrdwayBirds}
\alias{OrdwayBirds}
\title{Birds captured and released at Ordway, complete and uncleaned}
\format{A data frame with 15,829 observations on the bird's species, size,
date found, and band number.
\describe{
\item{\code{bogus}}{a character vector}
\item{\code{Timestamp}}{Timestamp indicates when the data were entered into an electronic record,
not anything about the bird being described}
\item{\code{Year}}{a character vector}
\item{\code{Day}}{a character vector}
\item{\code{Month}}{a character vector}
\item{\code{CaptureTime}}{a character vector}
\item{\code{SpeciesName}}{a character vector}
\item{\code{Sex}}{a character vector}
\item{\code{Age}}{a character vector}
\item{\code{BandNumber}}{a character vector}
\item{\code{TrapID}}{a character vector}
\item{\code{Weather}}{a character vector}
\item{\code{BandingReport}}{a character vector}
\item{\code{RecaptureYN}}{a character vector}
\item{\code{RecaptureMonth}}{a character vector}
\item{\code{RecaptureDay}}{a character vector}
\item{\code{Condition}}{a character vector}
\item{\code{Release}}{a character vector}
\item{\code{Comments}}{a character vector}
\item{\code{DataEntryPerson}}{a character vector}
\item{\code{Weight}}{a character vector}
\item{\code{WingChord}}{a character vector}
\item{\code{Temperature}}{a character vector}
\item{\code{RecaptureOriginal}}{a character vector}
\item{\code{RecapturePrevious}}{a character vector}
\item{\code{TailLength}}{a character vector}
}
Timestamp indicates when the data were entered into an electronic record,
not anything about the bird being described.}
\source{
Jerald Dosch, Dept. of Biology, Macalester College: the manager of
the Study Area.
}
\usage{
OrdwayBirds
}
\description{
The historical record of birds captured and released at the Katharine Ordway
Natural History Study Area, a 278-acre preserve in Inver Grove Heights,
Minnesota, owned and managed by Macalester College.
}
\details{
There are many extraneous levels of variables such as species. Part of the
purpose of this data set is to teach about data cleaning.
}
\examples{
data(OrdwayBirds)
}
\keyword{datasets}
| /man/OrdwayBirds.Rd | no_license | mesamiked/mdsr | R | false | true | 2,255 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{OrdwayBirds}
\alias{OrdwayBirds}
\title{Birds captured and released at Ordway, complete and uncleaned}
\format{A data frame with 15,829 observations on the bird's species, size,
date found, and band number.
\describe{
\item{\code{bogus}}{a character vector}
\item{\code{Timestamp}}{Timestamp indicates when the data were entered into an electronic record,
not anything about the bird being described}
\item{\code{Year}}{a character vector}
\item{\code{Day}}{a character vector}
\item{\code{Month}}{a character vector}
\item{\code{CaptureTime}}{a character vector}
\item{\code{SpeciesName}}{a character vector}
\item{\code{Sex}}{a character vector}
\item{\code{Age}}{a character vector}
\item{\code{BandNumber}}{a character vector}
\item{\code{TrapID}}{a character vector}
\item{\code{Weather}}{a character vector}
\item{\code{BandingReport}}{a character vector}
\item{\code{RecaptureYN}}{a character vector}
\item{\code{RecaptureMonth}}{a character vector}
\item{\code{RecaptureDay}}{a character vector}
\item{\code{Condition}}{a character vector}
\item{\code{Release}}{a character vector}
\item{\code{Comments}}{a character vector}
\item{\code{DataEntryPerson}}{a character vector}
\item{\code{Weight}}{a character vector}
\item{\code{WingChord}}{a character vector}
\item{\code{Temperature}}{a character vector}
\item{\code{RecaptureOriginal}}{a character vector}
\item{\code{RecapturePrevious}}{a character vector}
\item{\code{TailLength}}{a character vector}
}
Timestamp indicates when the data were entered into an electronic record,
not anything about the bird being described.}
\source{
Jerald Dosch, Dept. of Biology, Macalester College: the manager of
the Study Area.
}
\usage{
OrdwayBirds
}
\description{
The historical record of birds captured and released at the Katharine Ordway
Natural History Study Area, a 278-acre preserve in Inver Grove Heights,
Minnesota, owned and managed by Macalester College.
}
\details{
There are many extraneous levels of variables such as species. Part of the
purpose of this data set is to teach about data cleaning.
}
\examples{
data(OrdwayBirds)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-methods.R
\name{plot_placebos}
\alias{plot_placebos}
\title{plot_placebos}
\usage{
plot_placebos(data, time_window = NULL, prune = TRUE)
}
\arguments{
\item{data}{nested data of type \code{tbl_df}.}
\item{time_window}{time window of the tbl_df plot.}
\item{prune}{boolean flag; if TRUE, then all placebo cases with a pre-period
RMSPE exceeding two times the treated unit pre-period RMSPE are pruned;
Default is TRUE.}
}
\value{
\code{ggplot} object of the difference between the observed and synthetic
trends for the treated and placebo units.
}
\description{
Plot the difference between the observed and sythetic control unit for the
treated and the placebo units. The difference captures the causal quantity
(i.e. the magnitude of the difference between the observed and counterfactual
case). Plotting the actual treated observation against the placebos captures
the likelihood (or rarity) of the observed differenced trend.
}
\details{
The function provides a pruning rule where all placebo cases with a
pre-period root mean squared predictive error (RMSPE) exceeding two times the
treated unit pre-period RMSPE are pruned. This helps overcome scale issues
when a particular placebo case has poor fit in the pre-period.
See documentation on \code{?synthetic_control} on how to generate placebo cases.
When initializing a synth pipeline, set the \code{generate_placebos} argument to
\code{TRUE}. The processing pipeline remains the same.
}
\examples{
\donttest{
# Smoking example data
data(smoking)
smoking_out <-
smoking \%>\%
# initial the synthetic control object
synthetic_control(outcome = cigsale,
unit = state,
time = year,
i_unit = "California",
i_time = 1988,
generate_placebos=TRUE) \%>\%
# Generate the aggregate predictors used to generate the weights
generate_predictor(time_window=1980:1988,
lnincome = mean(lnincome, na.rm = TRUE),
retprice = mean(retprice, na.rm = TRUE),
age15to24 = mean(age15to24, na.rm = TRUE)) \%>\%
generate_predictor(time_window=1984:1988,
beer = mean(beer, na.rm = TRUE)) \%>\%
generate_predictor(time_window=1975,
cigsale_1975 = cigsale) \%>\%
generate_predictor(time_window=1980,
cigsale_1980 = cigsale) \%>\%
generate_predictor(time_window=1988,
cigsale_1988 = cigsale) \%>\%
# Generate the fitted weights for the synthetic control
generate_weights(optimization_window =1970:1988,
Margin.ipop=.02,Sigf.ipop=7,Bound.ipop=6) \%>\%
# Generate the synthetic control
generate_control()
# Plot the observed and synthetic trend
smoking_out \%>\% plot_placebos(time_window = 1970:2000)
}
}
| /man/plot_placebos.Rd | no_license | cran/tidysynth | R | false | true | 2,902 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-methods.R
\name{plot_placebos}
\alias{plot_placebos}
\title{plot_placebos}
\usage{
plot_placebos(data, time_window = NULL, prune = TRUE)
}
\arguments{
\item{data}{nested data of type \code{tbl_df}.}
\item{time_window}{time window of the tbl_df plot.}
\item{prune}{boolean flag; if TRUE, then all placebo cases with a pre-period
RMSPE exceeding two times the treated unit pre-period RMSPE are pruned;
Default is TRUE.}
}
\value{
\code{ggplot} object of the difference between the observed and synthetic
trends for the treated and placebo units.
}
\description{
Plot the difference between the observed and sythetic control unit for the
treated and the placebo units. The difference captures the causal quantity
(i.e. the magnitude of the difference between the observed and counterfactual
case). Plotting the actual treated observation against the placebos captures
the likelihood (or rarity) of the observed differenced trend.
}
\details{
The function provides a pruning rule where all placebo cases with a
pre-period root mean squared predictive error (RMSPE) exceeding two times the
treated unit pre-period RMSPE are pruned. This helps overcome scale issues
when a particular placebo case has poor fit in the pre-period.
See documentation on \code{?synthetic_control} on how to generate placebo cases.
When initializing a synth pipeline, set the \code{generate_placebos} argument to
\code{TRUE}. The processing pipeline remains the same.
}
\examples{
\donttest{
# Smoking example data
data(smoking)
smoking_out <-
smoking \%>\%
# initial the synthetic control object
synthetic_control(outcome = cigsale,
unit = state,
time = year,
i_unit = "California",
i_time = 1988,
generate_placebos=TRUE) \%>\%
# Generate the aggregate predictors used to generate the weights
generate_predictor(time_window=1980:1988,
lnincome = mean(lnincome, na.rm = TRUE),
retprice = mean(retprice, na.rm = TRUE),
age15to24 = mean(age15to24, na.rm = TRUE)) \%>\%
generate_predictor(time_window=1984:1988,
beer = mean(beer, na.rm = TRUE)) \%>\%
generate_predictor(time_window=1975,
cigsale_1975 = cigsale) \%>\%
generate_predictor(time_window=1980,
cigsale_1980 = cigsale) \%>\%
generate_predictor(time_window=1988,
cigsale_1988 = cigsale) \%>\%
# Generate the fitted weights for the synthetic control
generate_weights(optimization_window =1970:1988,
Margin.ipop=.02,Sigf.ipop=7,Bound.ipop=6) \%>\%
# Generate the synthetic control
generate_control()
# Plot the observed and synthetic trend
smoking_out \%>\% plot_placebos(time_window = 1970:2000)
}
}
|
library(ggplot2)
theme_set(theme_classic())
# Histogram on a Continuous (Numeric) Variable
g <- ggplot(mpg, aes(displ)) + scale_fill_brewer(palette = "Spectral")
require(gridExtra)
plot1<- g + geom_histogram(aes(fill=class),
binwidth = .1,
col="black",
size=.1) + # change binwidth
labs(title="Histogram with Auto Binning",
subtitle="Engine Displacement across Vehicle Classes")
plot2<- g + geom_histogram(aes(fill=class),
bins=5,
col="black",
size=.1) + # change number of bins
labs(title="Histogram with Fixed Bins",
subtitle="Engine Displacement across Vehicle Classes")
grid.arrange(plot1, plot2, ncol=2)
| /Day30_Histogram.R | no_license | sharifshohan/One_Figure_A_Day_Challenge | R | false | false | 774 | r | library(ggplot2)
theme_set(theme_classic())
# Histogram on a Continuous (Numeric) Variable
g <- ggplot(mpg, aes(displ)) + scale_fill_brewer(palette = "Spectral")
require(gridExtra)
plot1<- g + geom_histogram(aes(fill=class),
binwidth = .1,
col="black",
size=.1) + # change binwidth
labs(title="Histogram with Auto Binning",
subtitle="Engine Displacement across Vehicle Classes")
plot2<- g + geom_histogram(aes(fill=class),
bins=5,
col="black",
size=.1) + # change number of bins
labs(title="Histogram with Fixed Bins",
subtitle="Engine Displacement across Vehicle Classes")
grid.arrange(plot1, plot2, ncol=2)
|
suppressMessages(suppressWarnings(library(StatisticalModels)))
suppressMessages(suppressWarnings(library(raster)))
inDir <- "2_PrepareDiversityData/"
modelsDir <- "3_RunSpeciesLevelModels/"
climDir <- "20_PrepareRawClimateData/"
outDir <- "21_CompareRawCimateModels/"
load(paste(inDir,"diversity_data.Rd",sep=""))
diversity$LogElevation <- log(diversity$Elevation+2)
modelData <- diversity[,c('occur','LandUse','TEI_BL',
'TEI_delta','LogElevation','SS','SSBS',
'Taxon_name_entered','Longitude','Latitude')]
modelData <- na.omit(modelData)
max.temp.bl <- raster(paste0(climDir,"MaxTempBL.tif"))
max.temp.delta <- raster(paste0(climDir,"MaxTempDelta.tif"))
precip.bl <- raster(paste0(climDir,"AnnPrecipBL.tif"))
precip.delta <- raster(paste0(climDir,"AnnPrecipDelta.tif"))
modelData$MaxTempBL <- raster::extract(x = max.temp.bl,y = modelData[,c('Longitude','Latitude')])
modelData$MaxTempDelta <- raster::extract(x = max.temp.delta,y = modelData[,c('Longitude','Latitude')])
modelData$PrecipBL <- raster::extract(x = precip.bl,y = modelData[,c('Longitude','Latitude')])
modelData$PrecipDelta <- raster::extract(x = precip.delta,y = modelData[,c('Longitude','Latitude')])
load(paste0(modelsDir,"TemperatureModels.rd"))
m_raw_clim <- glmer(formula = occur ~ LandUse + poly(MaxTempBL, 2) + poly(MaxTempDelta, 2) +
LandUse:poly(MaxTempBL, 2) + LandUse:poly(MaxTempDelta, 2) +
LandUse:poly(MaxTempBL, 2):poly(MaxTempDelta, 2) + LogElevation +
(1 | SS) + (1 | SSBS) + (1 | Taxon_name_entered), data = modelData,
family = "binomial")
cat("AIC - original TEI model:\n")
cat(paste0(round(AIC(m_full$model),1),"\n"))
cat("AIC - raw climate model:\n")
cat(paste0(round(AIC(m_raw_clim),1),"\n"))
cat("AIC difference:\n")
cat(paste0(round(AIC(m_raw_clim) - AIC(m_full$model),1),"\n"))
| /21_CompareRawCimateModels.R | no_license | timnewbold/BumblebeesLandUseAnalysisPublic | R | false | false | 1,924 | r | suppressMessages(suppressWarnings(library(StatisticalModels)))
suppressMessages(suppressWarnings(library(raster)))
inDir <- "2_PrepareDiversityData/"
modelsDir <- "3_RunSpeciesLevelModels/"
climDir <- "20_PrepareRawClimateData/"
outDir <- "21_CompareRawCimateModels/"
load(paste(inDir,"diversity_data.Rd",sep=""))
diversity$LogElevation <- log(diversity$Elevation+2)
modelData <- diversity[,c('occur','LandUse','TEI_BL',
'TEI_delta','LogElevation','SS','SSBS',
'Taxon_name_entered','Longitude','Latitude')]
modelData <- na.omit(modelData)
max.temp.bl <- raster(paste0(climDir,"MaxTempBL.tif"))
max.temp.delta <- raster(paste0(climDir,"MaxTempDelta.tif"))
precip.bl <- raster(paste0(climDir,"AnnPrecipBL.tif"))
precip.delta <- raster(paste0(climDir,"AnnPrecipDelta.tif"))
modelData$MaxTempBL <- raster::extract(x = max.temp.bl,y = modelData[,c('Longitude','Latitude')])
modelData$MaxTempDelta <- raster::extract(x = max.temp.delta,y = modelData[,c('Longitude','Latitude')])
modelData$PrecipBL <- raster::extract(x = precip.bl,y = modelData[,c('Longitude','Latitude')])
modelData$PrecipDelta <- raster::extract(x = precip.delta,y = modelData[,c('Longitude','Latitude')])
load(paste0(modelsDir,"TemperatureModels.rd"))
m_raw_clim <- glmer(formula = occur ~ LandUse + poly(MaxTempBL, 2) + poly(MaxTempDelta, 2) +
LandUse:poly(MaxTempBL, 2) + LandUse:poly(MaxTempDelta, 2) +
LandUse:poly(MaxTempBL, 2):poly(MaxTempDelta, 2) + LogElevation +
(1 | SS) + (1 | SSBS) + (1 | Taxon_name_entered), data = modelData,
family = "binomial")
cat("AIC - original TEI model:\n")
cat(paste0(round(AIC(m_full$model),1),"\n"))
cat("AIC - raw climate model:\n")
cat(paste0(round(AIC(m_raw_clim),1),"\n"))
cat("AIC difference:\n")
cat(paste0(round(AIC(m_raw_clim) - AIC(m_full$model),1),"\n"))
|
install.packages("rgl")
install.packages("sna")
install.packages("network")
install.packages("igraph")
library(network)
library(sna)
library(rgl)
library(readxl)
library(igraph)
# Trabalha a partir de uma rede aleat?ria
rede <- read.table("Rede Two Mode_Tarefa Aula 1_Paulista T4.csv",header=TRUE,sep = ",")
dim(rede)
# Adaptando o data.frame rede para que possa servir para a montagem da rede
grede <- rede[,2:12]
rownames(grede) <- rede[,1]
dim(grede)
# Construindo a rede a partir da matriz de rela??es (0 e 1)
gplot(grede)
gplot(grede,gmode="graph",displaylabels = TRUE)
gplot(grede,gmode="graph",displaylabels = TRUE,edge.col="gray",usearrows=F)
gplot(grede,gmode="graph",displaylabels = TRUE, edge.col="gray",usearrows=F,vertex.cex=degree(grede,gmode="graph",cmode="indegree")/3)
gplot(grede,gmode="graph",displaylabels = TRUE, edge.col="gray",usearrows=F,label=degree(grede,gmode="graph",cmode="indegree"))
gplot(grede,gmode="grede",displaylabels = TRUE, edge.col="gray",usearrows=F,vertex.cex=closeness(grede,gmode="graph")*2)
gplot(grede,gmode="grede",displaylabels = TRUE, edge.col="gray",usearrows=F,label=round(closeness(grede,gmode="graph"),digits=2))
gplot(grede,gmode="grede",displaylabels = TRUE, edge.col="gray",usearrows=F,vertex.cex=betweenness(grede,gmode="graph")/3+1)
gplot(grede,gmode="grede",displaylabels = TRUE, edge.col="gray",usearrows=F,label=betweenness(grede,gmode="graph"))
# Explorando a rede
sna::degree(grede,gmode="graph",cmode="indegree")
sna::closeness(grede,gmode="graph")
sna::betweenness(grede,gmode="graph")
sna::bicomponent.dist(grede) # retorna os bicomponentes de um gráfico de entrada, juntamente com a distribuição de tamanho e as informações de associação.
sna::bicomponent.dist(grede, symmetrize = c("strong", "weak"))
sna::components(grede,connected="weak")
sna::components(grede,connected="strong")
sna::cug.test(grede,gtrans,cmode="size")
sna::cug.test(grede,gtrans,cmode="edges")
sna::cug.test(grede,gtrans,cmode="dyad.census")
sna::diag.remove(grede)
sna::efficiency(grede)
sna::gden(grede)
# plot cluster
eq <- equiv.clust(grede)
plot(eq)
gplot(diag.remove(grede))
# identifies the cutpoints of an input graph. Depending on mode, either a directed or undirected notion of “cutpoint” can be used.
weak <- cutpoints(grede,connected="weak",return.indicator=TRUE)
gplot(grede,vertex.col=2+weak)
strong <- cutpoints(grede,connected="strong",return.indicator=TRUE)
gplot(grede,vertex.col=2+strong)
# Given a set of equivalence classes and one or more graphs, blockmodel will form a blockmodel of the input
# graph(s) based on the classes in question, using the specified block content type.
eq<-equiv.clust(grede)
b<-blockmodel(grede,eq, h=2)
plot(b)
# Grand finale... para impressionar
gplot3d(grede)
gplot(grede, interactive = TRUE)
######################### SNA
# Find the Central Graph of a Labeled Graph Stack
cg <- centralgraph(grede)
gplot3d(cg)
# Compute The Closeness Centrality Scores Of Network Positions
closeness(grede)
# Find weak components
components(grede,connected="weak")
# Find strong components
components(grede,connected="strong")
# Compute Graph Efficiency Scores
efficiency(grede, g=NULL, diag=FALSE)
# Compute The Degree Centrality Scores Of Network Positions
degree(grede, g=1, nodes=NULL, gmode="digraph", diag=FALSE, tmaxdev=FALSE, cmode="freeman", rescale=FALSE, ignore.eval=FALSE)
# Find The Density Of A Graph
gden(grede)
# DAQUI PARA BAIXO NAO FUNCIONA PARA 2-MODE
# gera a um objeto graph
g1 <- graph.adjacency(as.matrix(grede), weighted=NULL, mode = "directed",add.colnames = colnames(grede), add.rownames = rownames(grede))
summary(g1)
# mostra se o grafo tem coneccoes fortes ou fracas
is_connected(g1, mode = "weak")
count_components(g1, mode = "weak")
is_connected(g1, mode = "strong")
count_components(g1, mode = "strong")
######################### igraph
layout1 <- layout.fruchterman.reingold(g1)
layout2 <- layout.circle(g1)
layout3 <- layout.sphere(g1)
layout4 <- layout.random(g1)
layout5 <- layout.reingold.tilford(g1)
layout6 <- layout.kamada.kawai(g1)
layout7 <- layout.lgl(g1)
# plot a gragh using the parameters in the layout
plot(g1, layout=layout1)
plot(g1, layout=layout2)
plot(g1, layout=layout3)
plot(g1, layout=layout4)
plot(g1, layout=layout5)
plot(g1, layout=layout6)
plot(g1, layout=layout7)
| /ARSTM/Tarefa1/script-2-mode.R | permissive | squassina/Trabalhos | R | false | false | 4,354 | r | install.packages("rgl")
install.packages("sna")
install.packages("network")
install.packages("igraph")
library(network)
library(sna)
library(rgl)
library(readxl)
library(igraph)
# Trabalha a partir de uma rede aleat?ria
rede <- read.table("Rede Two Mode_Tarefa Aula 1_Paulista T4.csv",header=TRUE,sep = ",")
dim(rede)
# Adaptando o data.frame rede para que possa servir para a montagem da rede
grede <- rede[,2:12]
rownames(grede) <- rede[,1]
dim(grede)
# Construindo a rede a partir da matriz de rela??es (0 e 1)
gplot(grede)
gplot(grede,gmode="graph",displaylabels = TRUE)
gplot(grede,gmode="graph",displaylabels = TRUE,edge.col="gray",usearrows=F)
gplot(grede,gmode="graph",displaylabels = TRUE, edge.col="gray",usearrows=F,vertex.cex=degree(grede,gmode="graph",cmode="indegree")/3)
gplot(grede,gmode="graph",displaylabels = TRUE, edge.col="gray",usearrows=F,label=degree(grede,gmode="graph",cmode="indegree"))
gplot(grede,gmode="grede",displaylabels = TRUE, edge.col="gray",usearrows=F,vertex.cex=closeness(grede,gmode="graph")*2)
gplot(grede,gmode="grede",displaylabels = TRUE, edge.col="gray",usearrows=F,label=round(closeness(grede,gmode="graph"),digits=2))
gplot(grede,gmode="grede",displaylabels = TRUE, edge.col="gray",usearrows=F,vertex.cex=betweenness(grede,gmode="graph")/3+1)
gplot(grede,gmode="grede",displaylabels = TRUE, edge.col="gray",usearrows=F,label=betweenness(grede,gmode="graph"))
# Explorando a rede
sna::degree(grede,gmode="graph",cmode="indegree")
sna::closeness(grede,gmode="graph")
sna::betweenness(grede,gmode="graph")
sna::bicomponent.dist(grede) # retorna os bicomponentes de um gráfico de entrada, juntamente com a distribuição de tamanho e as informações de associação.
sna::bicomponent.dist(grede, symmetrize = c("strong", "weak"))
sna::components(grede,connected="weak")
sna::components(grede,connected="strong")
sna::cug.test(grede,gtrans,cmode="size")
sna::cug.test(grede,gtrans,cmode="edges")
sna::cug.test(grede,gtrans,cmode="dyad.census")
sna::diag.remove(grede)
sna::efficiency(grede)
sna::gden(grede)
# plot cluster
eq <- equiv.clust(grede)
plot(eq)
gplot(diag.remove(grede))
# identifies the cutpoints of an input graph. Depending on mode, either a directed or undirected notion of “cutpoint” can be used.
weak <- cutpoints(grede,connected="weak",return.indicator=TRUE)
gplot(grede,vertex.col=2+weak)
strong <- cutpoints(grede,connected="strong",return.indicator=TRUE)
gplot(grede,vertex.col=2+strong)
# Given a set of equivalence classes and one or more graphs, blockmodel will form a blockmodel of the input
# graph(s) based on the classes in question, using the specified block content type.
eq<-equiv.clust(grede)
b<-blockmodel(grede,eq, h=2)
plot(b)
# Grand finale... para impressionar
gplot3d(grede)
gplot(grede, interactive = TRUE)
######################### SNA
# Find the Central Graph of a Labeled Graph Stack
cg <- centralgraph(grede)
gplot3d(cg)
# Compute The Closeness Centrality Scores Of Network Positions
closeness(grede)
# Find weak components
components(grede,connected="weak")
# Find strong components
components(grede,connected="strong")
# Compute Graph Efficiency Scores
efficiency(grede, g=NULL, diag=FALSE)
# Compute The Degree Centrality Scores Of Network Positions
degree(grede, g=1, nodes=NULL, gmode="digraph", diag=FALSE, tmaxdev=FALSE, cmode="freeman", rescale=FALSE, ignore.eval=FALSE)
# Find The Density Of A Graph
gden(grede)
# DAQUI PARA BAIXO NAO FUNCIONA PARA 2-MODE
# gera a um objeto graph
g1 <- graph.adjacency(as.matrix(grede), weighted=NULL, mode = "directed",add.colnames = colnames(grede), add.rownames = rownames(grede))
summary(g1)
# mostra se o grafo tem coneccoes fortes ou fracas
is_connected(g1, mode = "weak")
count_components(g1, mode = "weak")
is_connected(g1, mode = "strong")
count_components(g1, mode = "strong")
######################### igraph
layout1 <- layout.fruchterman.reingold(g1)
layout2 <- layout.circle(g1)
layout3 <- layout.sphere(g1)
layout4 <- layout.random(g1)
layout5 <- layout.reingold.tilford(g1)
layout6 <- layout.kamada.kawai(g1)
layout7 <- layout.lgl(g1)
# plot a gragh using the parameters in the layout
plot(g1, layout=layout1)
plot(g1, layout=layout2)
plot(g1, layout=layout3)
plot(g1, layout=layout4)
plot(g1, layout=layout5)
plot(g1, layout=layout6)
plot(g1, layout=layout7)
|
library(ape)
testtree <- read.tree("9623_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9623_0_unrooted.txt") | /codeml_files/newick_trees_processed/9623_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("9623_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9623_0_unrooted.txt") |
library(rpsychi)
# Function to display the possible ranges of the F or t statistic from a one- or two-way ANOVA.
# Assumes means and SDs have been rounded to two decimal place.
f_range <- function (m.p, sd.p, n.p, title=FALSE, u.p=TRUE, show.t=FALSE, dp=2, labels=c()) {
m.ok <- m.p
if (class(m.ok) == "matrix") {
func <- ind.twoway.second
useF <- c(3, 2, 4)
default_labels <- c("col F", "row F", "inter F")
}
else {
m.ok <- matrix(m.p)
func <- ind.oneway.second
useF <- 1
default_labels <- c("F")
if (show.t) {
default_labels <- c("t")
}
}
if (length(labels) == 0) {
labels <- default_labels
}
# Calculate the nominal test statistic(s) (i.e., assuming no rounding error)
f.nom <- func(m=m.ok, sd=sd.p, n=n.p, unbiased=u.p)$anova.table$F
# We correct for rounding in reported numbers by allowing for the maximum possible rounding error.
# For the maximum F estimate, we subtract .005 from all SDs; for minimum F estimate, we add .005.
# We then add or subtract .005 to every mean, in all possible permutations.
# (".005" is an example, based on 2 decimal places of precision.)
delta <- (0.1 ^ dp) / 2 #typically 0.005
sd.hi <- sd.p - delta
sd.lo <- sd.p + delta
# Initialise maximum and minimum F statistics to unlikely values.
f.hi <- rep(-1, length(useF))
f.lo <- rep(999999, length(useF))
f.hi <- f.nom
f.lo <- f.nom
# Generate every possible combination of +/- maximum rounding error to add to each mean.
l <- length(m.ok)
rawcomb <- combn(rep(c(-delta, delta), l), l)
comb <- rawcomb[,!duplicated(t(rawcomb))]
# Generate every possible set of test statistics within the bounds of rounding error,
# and retain the largest and smallest.
for (i in 1:ncol(comb)) {
m.adj <- m.ok + comb[,i]
f.hi <- pmax(f.hi, func(m=m.adj, sd=sd.hi, n=n.p, unbiased=u.p)$anova.table$F)
f.lo <- pmin(f.lo, func(m=m.adj, sd=sd.lo, n=n.p, unbiased=u.p)$anova.table$F)
}
if (show.t) {
f.nom <- sqrt(f.nom)
f.hi <- sqrt(f.hi)
f.lo <- sqrt(f.lo)
}
if (title != FALSE) {
cat(title)
}
sp <- " "
dpf <- paste("%.", dp, "f", sep="")
for (i in 1:length(useF)) {
j <- useF[i]
cat(sp, labels[i], ": ", sprintf(dpf, f.nom[j]),
" (min=", sprintf(dpf, f.lo[j]),
", max=", sprintf(dpf, f.hi[j]), ")",
sep="")
sp <- " "
}
cat("\n", sep="")
}
cat("Article 1 - Lower Buffet Prices - Table 1\n")
n.lbp.t1 <- c(62, 60)
m.lbp.t1.l1 <- c(44.16, 46.08)
sd.lbp.t1.l1 <- c(18.99, 14.46)
f_range(m = m.lbp.t1.l1, sd = sd.lbp.t1.l1, n = n.lbp.t1, title="Age")
m.lbp.t1.l3 <- c(68.52, 67.91)
sd.lbp.t1.l3 <- c(3.95, 3.93)
f_range(m = m.lbp.t1.l3, sd = sd.lbp.t1.l3, n = n.lbp.t1, title="Height")
m.lbp.t1.l4 <- c(180.84, 182.31)
sd.lbp.t1.l4 <- c(48.37, 48.41)
f_range(m = m.lbp.t1.l4, sd = sd.lbp.t1.l4, n = n.lbp.t1, title="Weight")
m.lbp.t1.l5 <- c(3.00, 3.28)
sd.lbp.t1.l5 <- c(1.55, 1.29)
f_range(m = m.lbp.t1.l5, sd = sd.lbp.t1.l5, n = n.lbp.t1, title="Group size")
# Next line gives an F too small for rpsychi to calculate
#m.lbp.t1.l6 <- c(6.62, 6.64)
#sd.lbp.t1.l6 <- c(1.85, 2.06)
#f_range(m = m.lbp.t1.l6, sd = sd.lbp.t1.l6, n = n.lbp.t1, title="Hungry then")
m.lbp.t1.l7 <- c(1.88, 1.85)
sd.lbp.t1.l7 <- c(1.34, 1.75)
f_range(m = m.lbp.t1.l7, sd = sd.lbp.t1.l7, n = n.lbp.t1, title="Hungry now")
cat("\n")
cat("Article 1 - Lower Buffet Prices - Table 2\n")
n.lbp.t2.1 <- c(62, 60) #Lines 1-4
n.lbp.t2.2 <- c(41, 26) #Lines 5-7
n.lbp.t2.3 <- c(47, 38) #Lines 5-7
m.lbp.t2.l1 <- c(6.89, 7.44)
sd.lbp.t2.l1 <- c(1.39, 1.60)
f_range(m = m.lbp.t2.l1, sd = sd.lbp.t2.l1, n = n.lbp.t2.1, title="Overall taste")
m.lbp.t2.l2 <- c(7.08, 7.45)
sd.lbp.t2.l2 <- c(1.30, 1.60)
f_range(m = m.lbp.t2.l2, sd = sd.lbp.t2.l2, n = n.lbp.t2.1, title="Piece 1, taste")
m.lbp.t2.l3 <- c(7.08, 7.34)
sd.lbp.t2.l3 <- c(1.37, 1.70)
f_range(m = m.lbp.t2.l3, sd = sd.lbp.t2.l3, n = n.lbp.t2.1, title="Piece 1, satisfying")
m.lbp.t2.l4 <- c(7.05, 7.47)
sd.lbp.t2.l4 <- c(1.40, 1.55)
f_range(m = m.lbp.t2.l4, sd = sd.lbp.t2.l4, n = n.lbp.t2.1, title="Piece 1, enjoyable")
m.lbp.t2.l5 <- c(6.68, 7.97)
sd.lbp.t2.l5 <- c(1.49, 1.21)
f_range(m = m.lbp.t2.l5, sd = sd.lbp.t2.l5, n = n.lbp.t2.2, title="Piece 2, taste")
m.lbp.t2.l6 <- c(6.68, 7.97)
sd.lbp.t2.l6 <- c(1.49, 1.21)
f_range(m = m.lbp.t2.l6, sd = sd.lbp.t2.l6, n = n.lbp.t2.2, title="Piece 2, satisfying")
m.lbp.t2.l7 <- c(6.64, 7.81)
sd.lbp.t2.l7 <- c(1.48, 1.22)
f_range(m = m.lbp.t2.l7, sd = sd.lbp.t2.l7, n = n.lbp.t2.2, title="Piece 2, enjoyable")
m.lbp.t2.l8 <- c(6.15, 7.58)
sd.lbp.t2.l8 <- c(1.89, 1.39)
f_range(m = m.lbp.t2.l8, sd = sd.lbp.t2.l8, n = n.lbp.t2.3, title="Piece 3, taste")
m.lbp.t2.l9 <- c(6.16, 7.41)
sd.lbp.t2.l9 <- c(1.87, 1.55)
f_range(m = m.lbp.t2.l9, sd = sd.lbp.t2.l9, n = n.lbp.t2.3, title="Piece 3, satisfying")
m.lbp.t2.l10 <- c(5.98, 7.45)
sd.lbp.t2.l10 <- c(1.86, 1.52)
f_range(m = m.lbp.t2.l10, sd = sd.lbp.t2.l10, n = n.lbp.t2.3, title="Piece 3, enjoyable")
cat("\n")
cat("Article 3 - Eating Heavily - Table 1\n")
n.eh.t1.men <- c(40, 20)
n.eh.t1.women <- c(35, 10)
m.eh.t1.l1.men <- c(44, 43)
sd.eh.t1.l1.men <- c(18.86, 11.19)
f_range(m = m.eh.t1.l1.men, sd = sd.eh.t1.l1.men, n = n.eh.t1.men, title="Line 1, men", show.t=TRUE)
m.eh.t1.l2.men <- c(178.02, 181.11)
sd.eh.t1.l2.men <- c(7.72, 7.32)
f_range(m = m.eh.t1.l2.men, sd = sd.eh.t1.l2.men, n = n.eh.t1.men, title="Line 2, men", show.t=TRUE)
m.eh.t1.l3.men <- c(86.35, 100.80)
sd.eh.t1.l3.men <- c(17.92, 21.33)
f_range(m = m.eh.t1.l3.men, sd = sd.eh.t1.l3.men, n = n.eh.t1.men, title="Line 3, men", show.t=TRUE)
m.eh.t1.l4.men <- c(27.20, 30.96)
sd.eh.t1.l4.men <- c(5.13, 6.62)
f_range(m = m.eh.t1.l4.men, sd = sd.eh.t1.l4.men, n = n.eh.t1.men, title="Line 4, men", show.t=TRUE)
m.eh.t1.l1.women <- c(44.52, 48.18)
sd.eh.t1.l1.women <- c(17.09, 16.49)
f_range(m = m.eh.t1.l1.women, sd = sd.eh.t1.l1.women, n = n.eh.t1.women, title="Line 1, women", show.t=TRUE)
m.eh.t1.l2.women <- c(165.83, 164.82)
sd.eh.t1.l2.women <- c(7.71, 5.88)
f_range(m = m.eh.t1.l2.women, sd = sd.eh.t1.l2.women, n = n.eh.t1.women, title="Line 2, women", show.t=TRUE)
m.eh.t1.l3.women <- c(64.63, 75.54)
sd.eh.t1.l3.women <- c(10.95, 12.42)
f_range(m = m.eh.t1.l3.women, sd = sd.eh.t1.l3.women, n = n.eh.t1.women, title="Line 3, women", show.t=TRUE)
m.eh.t1.l4.women <- c(23.46, 27.77)
sd.eh.t1.l4.women <- c(3.53, 3.68)
f_range(m = m.eh.t1.l4.women, sd = sd.eh.t1.l4.women, n = n.eh.t1.women, title="Line 4, women", show.t=TRUE)
cat("\n")
cat("Article 3 - Eating Heavily - Table 2\n")
lab.eh.t2 <- c("gender", "group", "gender x group")
n.eh.t2 <- matrix(c(40, 20, 35, 10), ncol=2)
m.eh.t2.l1 <- matrix(c(5.00, 2.69, 4.83, 5.54), ncol=2)
sd.eh.t2.l1 <- matrix(c(2.99, 2.57, 2.71, 1.84), ncol=2)
f_range(m = m.eh.t2.l1, sd = sd.eh.t2.l1, n = n.eh.t2, title="Line 1", labels=lab.eh.t2)
m.eh.t2.l2 <- matrix(c(2.99, 1.55, 1.33, 1.05), ncol=2)
sd.eh.t2.l2 <- matrix(c(1.75, 1.07, 0.83, 1.38), ncol=2)
f_range(m = m.eh.t2.l2, sd = sd.eh.t2.l2, n = n.eh.t2, title="Line 2", labels=lab.eh.t2)
m.eh.t2.l3 <- matrix(c(2.67, 2.76, 2.73, 1.00), ncol=2)
sd.eh.t2.l3 <- matrix(c(2.04, 2.18, 2.16, 0.00), ncol=2)
f_range(m = m.eh.t2.l3, sd = sd.eh.t2.l3, n = n.eh.t2, title="Line 3", labels=lab.eh.t2)
m.eh.t2.l4 <- matrix(c(1.46, 1.90, 2.29, 1.18), ncol=2)
sd.eh.t2.l4 <- matrix(c(1.07, 1.48, 2.28, 0.40), ncol=2)
f_range(m = m.eh.t2.l4, sd = sd.eh.t2.l4, n = n.eh.t2, title="Line 4", labels=lab.eh.t2)
m.eh.t2.l5 <- matrix(c(478.75, 397.5, 463.61, 111.71), ncol=2)
sd.eh.t2.l5 <- matrix(c(290.67, 191.37, 264.25, 109.57), ncol=2)
f_range(m = m.eh.t2.l5, sd = sd.eh.t2.l5, n = n.eh.t2, title="Line 5", labels=lab.eh.t2)
m.eh.t2.l6 <- matrix(c(2.11, 2.27, 2.20, 1.91), ncol=2)
sd.eh.t2.l6 <- matrix(c(1.54, 1.75, 1.71, 2.12), ncol=2)
f_range(m = m.eh.t2.l6, sd = sd.eh.t2.l6, n = n.eh.t2, title="Line 6", labels=lab.eh.t2)
cat("\n")
cat("Article 3 - Eating Heavily - Table 3\n")
n.eh.t3 <- c(20, 21, 19)
m.eh.t3.l1 <- c(2.69, 5.55, 4.33)
sd.eh.t3.l1 <- c(2.57, 2.66, 3.31)
f_range(m = m.eh.t3.l1, sd = sd.eh.t3.l1, n = n.eh.t3, title="Line 1")
m.eh.t3.l2 <- c(1.55, 2.79, 3.13)
sd.eh.t3.l2 <- c(1.07, 1.54, 2.18)
f_range(m = m.eh.t3.l2, sd = sd.eh.t3.l2, n = n.eh.t3, title="Line 2")
m.eh.t3.l3 <- c(2.76, 2.92, 2.53)
sd.eh.t3.l3 <- c(2.19, 2.3, 1.81)
f_range(m = m.eh.t3.l3, sd = sd.eh.t3.l3, n = n.eh.t3, title="Line 3")
m.eh.t3.l4 <- c(1.9, 1.65, 1.47)
sd.eh.t3.l4 <- c(1.48, 1.34, 1.23)
f_range(m = m.eh.t3.l4, sd = sd.eh.t3.l4, n = n.eh.t3, title="Line 4")
m.eh.t3.l5 <- c(397.5, 409.52, 555.26)
sd.eh.t3.l5 <- c(191.38, 246.87, 321.84)
f_range(m = m.eh.t3.l5, sd = sd.eh.t3.l5, n = n.eh.t3, title="Line 5")
m.eh.t3.l6 <- c(2.27, 2.32, 1.95)
sd.eh.t3.l6 <- c(1.75, 1.77, 1.24)
f_range(m = m.eh.t3.l6, sd = sd.eh.t3.l6, n = n.eh.t3, title="Line 6")
cat("\n")
cat("Article 4 - Low Prices and High Regret - Table 1\n")
n.lp.t1 <- c(43, 52)
m.lp.t1.l1 <- c(43.67, 44.55)
sd.lp.t1.l1 <- c(18.50, 14.30)
f_range(m = m.lp.t1.l1, sd = sd.lp.t1.l1, n = n.lp.t1, title="Age", show.t=TRUE)
m.lp.t1.l2 <- c(68.65, 66.51)
sd.lp.t1.l2 <- c(3.67, 9.44)
f_range(m = m.lp.t1.l2, sd = sd.lp.t1.l2, n = n.lp.t1, title="Height", show.t=TRUE)
m.lp.t1.l3 <- c(184.83, 178.38)
sd.lp.t1.l3 <- c(63.70, 45.71)
f_range(m = m.lp.t1.l3, sd = sd.lp.t1.l3, n = n.lp.t1, title="Weight", show.t=TRUE)
cat("\n")
cat("Article 4 - Low Prices and High Regret - Table 2\n")
lab.lp.t2 <- c("price", "pieces", "price x pieces")
n.lp.t2 <- matrix(c(18, 18, 7, 17, 19, 10), ncol=2)
m.lp.t2.l1 <- matrix(c(2.63, 4.82, 6.00, 1.76, 3.53, 4.40), ncol=2)
sd.lp.t2.l1 <- matrix(c(2.06, 2.55, 2.00, 1.82, 2.39, 3.24), ncol=2)
f_range(m = m.lp.t2.l1, sd = sd.lp.t2.l1, n = n.lp.t2, title="Line 1", labels=lab.lp.t2)
m.lp.t2.l2 <- matrix(c(2.39, 3.44, 3.71, 2.26, 1.68, 2.90), ncol=2)
sd.lp.t2.l2 <- matrix(c(1.94, 2.47, 1.49, 1.79, 1.42, 2.08), ncol=2)
f_range(m = m.lp.t2.l2, sd = sd.lp.t2.l2, n = n.lp.t2, title="Line 2", labels=lab.lp.t2)
m.lp.t2.l3 <- matrix(c(2.17, 2.94, 2.43, 1.97, 1.45, 2.25), ncol=2)
sd.lp.t2.l3 <- matrix(c(1.88, 2.12, 1.51, 1.68, 0.94, 1.81), ncol=2)
f_range(m = m.lp.t2.l3, sd = sd.lp.t2.l3, n = n.lp.t2, title="Line 3", labels=lab.lp.t2)
m.lp.t2.l4 <- matrix(c(2.11, 3.89, 3.71, 1.67, 1.67, 3.50), ncol=2)
sd.lp.t2.l4 <- matrix(c(1.81, 2.59, 1.79, 1.28, 1.24, 2.74), ncol=2)
f_range(m = m.lp.t2.l4, sd = sd.lp.t2.l4, n = n.lp.t2, title="Line 4", labels=lab.lp.t2)
m.lp.t2.l5 <- matrix(c(2.50, 4.28, 4.57, 2.00, 2.14, 3.92), ncol=2)
sd.lp.t2.l5 <- matrix(c(2.20, 2.44, 2.22, 1.45, 1.77, 2.81), ncol=2)
f_range(m = m.lp.t2.l5, sd = sd.lp.t2.l5, n = n.lp.t2, title="Line 5", labels=lab.lp.t2)
cat("\n")
cat("Article 4 - Low Prices and High Regret - Table 3\n")
n.lp.t3.1p <- c(18, 19)
n.lp.t3.2p <- c(18, 21)
n.lp.t3.3p <- c(7, 12)
m.lp.t3.l1.1p <- c(2.63, 1.76)
sd.lp.t3.l1.1p <- c(2.06, 1.82)
f_range(m = m.lp.t3.l1.1p, sd = sd.lp.t3.l1.1p, n = n.lp.t3.1p, title="Line 1, 1 piece")
m.lp.t3.l1.2p <- c(4.82, 3.53)
sd.lp.t3.l1.2p <- c(2.55, 2.39)
f_range(m = m.lp.t3.l1.2p, sd = sd.lp.t3.l1.2p, n = n.lp.t3.2p, title="Line 1, 2 pieces")
m.lp.t3.l1.3p <- c(6.00, 4.40)
sd.lp.t3.l1.3p <- c(2.00, 3.24)
f_range(m = m.lp.t3.l1.3p, sd = sd.lp.t3.l1.3p, n = n.lp.t3.3p, title="Line 1, 3 pieces")
m.lp.t3.l2.1p <- c(2.39, 2.26)
sd.lp.t3.l2.1p <- c(1.94, 1.79)
f_range(m = m.lp.t3.l2.1p, sd = sd.lp.t3.l2.1p, n = n.lp.t3.1p, title="Line 2, 1 piece")
m.lp.t3.l2.2p <- c(3.44, 1.68)
sd.lp.t3.l2.2p <- c(2.48, 1.42)
f_range(m = m.lp.t3.l2.2p, sd = sd.lp.t3.l2.2p, n = n.lp.t3.2p, title="Line 2, 2 pieces")
m.lp.t3.l2.3p <- c(3.71, 2.90)
sd.lp.t3.l2.3p <- c(1.50, 2.08)
f_range(m = m.lp.t3.l2.3p, sd = sd.lp.t3.l2.3p, n = n.lp.t3.3p, title="Line 2, 3 pieces")
m.lp.t3.l3.1p <- c(2.17, 1.955)
sd.lp.t3.l3.1p <- c(1.89, 1.68)
f_range(m = m.lp.t3.l3.1p, sd = sd.lp.t3.l3.1p, n = n.lp.t3.1p, title="Line 3, 1 piece")
m.lp.t3.l3.2p <- c(2.94, 1.28)
sd.lp.t3.l3.2p <- c(2.13, 0.46)
f_range(m = m.lp.t3.l3.2p, sd = sd.lp.t3.l3.2p, n = n.lp.t3.2p, title="Line 3, 2 pieces")
m.lp.t3.l3.3p <- c(2.43, 2.10)
sd.lp.t3.l3.3p <- c(1.51, 1.91)
f_range(m = m.lp.t3.l3.3p, sd = sd.lp.t3.l3.3p, n = n.lp.t3.3p, title="Line 3, 3 pieces")
m.lp.t3.l4.1p <- c(2.11, 1.67)
sd.lp.t3.l4.1p <- c(1.81, 1.28)
f_range(m = m.lp.t3.l4.1p, sd = sd.lp.t3.l4.1p, n = n.lp.t3.1p, title="Line 4, 1 piece")
m.lp.t3.l4.2p <- c(3.89, 1.53)
sd.lp.t3.l4.2p <- c(2.59, 1.02)
f_range(m = m.lp.t3.l4.2p, sd = sd.lp.t3.l4.2p, n = n.lp.t3.2p, title="Line 4, 2 pieces")
m.lp.t3.l4.3p <- c(3.71, 3.50)
sd.lp.t3.l4.3p <- c(1.79, 2.95)
f_range(m = m.lp.t3.l4.3p, sd = sd.lp.t3.l4.3p, n = n.lp.t3.3p, title="Line 4, 3 pieces")
m.lp.t3.l5.1p <- c(2.50, 2.00)
sd.lp.t3.l5.1p <- c(2.20, 1.45)
f_range(m = m.lp.t3.l5.1p, sd = sd.lp.t3.l5.1p, n = n.lp.t3.1p, title="Line 5, 1 piece")
m.lp.t3.l5.2p <- c(4.28, 2.05)
sd.lp.t3.l5.2p <- c(2.44, 1.72)
f_range(m = m.lp.t3.l5.2p, sd = sd.lp.t3.l5.2p, n = n.lp.t3.2p, title="Line 5, 2 pieces")
m.lp.t3.l5.3p <- c(4.57, 4.00)
sd.lp.t3.l5.3p <- c(2.23, 3.02)
f_range(m = m.lp.t3.l5.3p, sd = sd.lp.t3.l5.3p, n = n.lp.t3.3p, title="Line 5, 3 pieces")
| /R/Wansink-reanalyses.R | permissive | OmnesRes/pizzapizza | R | false | false | 13,150 | r | library(rpsychi)
# Function to display the possible ranges of the F or t statistic from a one- or two-way ANOVA.
# Assumes means and SDs have been rounded to two decimal place.
f_range <- function (m.p, sd.p, n.p, title=FALSE, u.p=TRUE, show.t=FALSE, dp=2, labels=c()) {
m.ok <- m.p
if (class(m.ok) == "matrix") {
func <- ind.twoway.second
useF <- c(3, 2, 4)
default_labels <- c("col F", "row F", "inter F")
}
else {
m.ok <- matrix(m.p)
func <- ind.oneway.second
useF <- 1
default_labels <- c("F")
if (show.t) {
default_labels <- c("t")
}
}
if (length(labels) == 0) {
labels <- default_labels
}
# Calculate the nominal test statistic(s) (i.e., assuming no rounding error)
f.nom <- func(m=m.ok, sd=sd.p, n=n.p, unbiased=u.p)$anova.table$F
# We correct for rounding in reported numbers by allowing for the maximum possible rounding error.
# For the maximum F estimate, we subtract .005 from all SDs; for minimum F estimate, we add .005.
# We then add or subtract .005 to every mean, in all possible permutations.
# (".005" is an example, based on 2 decimal places of precision.)
delta <- (0.1 ^ dp) / 2 #typically 0.005
sd.hi <- sd.p - delta
sd.lo <- sd.p + delta
# Initialise maximum and minimum F statistics to unlikely values.
f.hi <- rep(-1, length(useF))
f.lo <- rep(999999, length(useF))
f.hi <- f.nom
f.lo <- f.nom
# Generate every possible combination of +/- maximum rounding error to add to each mean.
l <- length(m.ok)
rawcomb <- combn(rep(c(-delta, delta), l), l)
comb <- rawcomb[,!duplicated(t(rawcomb))]
# Generate every possible set of test statistics within the bounds of rounding error,
# and retain the largest and smallest.
for (i in 1:ncol(comb)) {
m.adj <- m.ok + comb[,i]
f.hi <- pmax(f.hi, func(m=m.adj, sd=sd.hi, n=n.p, unbiased=u.p)$anova.table$F)
f.lo <- pmin(f.lo, func(m=m.adj, sd=sd.lo, n=n.p, unbiased=u.p)$anova.table$F)
}
if (show.t) {
f.nom <- sqrt(f.nom)
f.hi <- sqrt(f.hi)
f.lo <- sqrt(f.lo)
}
if (title != FALSE) {
cat(title)
}
sp <- " "
dpf <- paste("%.", dp, "f", sep="")
for (i in 1:length(useF)) {
j <- useF[i]
cat(sp, labels[i], ": ", sprintf(dpf, f.nom[j]),
" (min=", sprintf(dpf, f.lo[j]),
", max=", sprintf(dpf, f.hi[j]), ")",
sep="")
sp <- " "
}
cat("\n", sep="")
}
cat("Article 1 - Lower Buffet Prices - Table 1\n")
n.lbp.t1 <- c(62, 60)
m.lbp.t1.l1 <- c(44.16, 46.08)
sd.lbp.t1.l1 <- c(18.99, 14.46)
f_range(m = m.lbp.t1.l1, sd = sd.lbp.t1.l1, n = n.lbp.t1, title="Age")
m.lbp.t1.l3 <- c(68.52, 67.91)
sd.lbp.t1.l3 <- c(3.95, 3.93)
f_range(m = m.lbp.t1.l3, sd = sd.lbp.t1.l3, n = n.lbp.t1, title="Height")
m.lbp.t1.l4 <- c(180.84, 182.31)
sd.lbp.t1.l4 <- c(48.37, 48.41)
f_range(m = m.lbp.t1.l4, sd = sd.lbp.t1.l4, n = n.lbp.t1, title="Weight")
m.lbp.t1.l5 <- c(3.00, 3.28)
sd.lbp.t1.l5 <- c(1.55, 1.29)
f_range(m = m.lbp.t1.l5, sd = sd.lbp.t1.l5, n = n.lbp.t1, title="Group size")
# Next line gives an F too small for rpsychi to calculate
#m.lbp.t1.l6 <- c(6.62, 6.64)
#sd.lbp.t1.l6 <- c(1.85, 2.06)
#f_range(m = m.lbp.t1.l6, sd = sd.lbp.t1.l6, n = n.lbp.t1, title="Hungry then")
m.lbp.t1.l7 <- c(1.88, 1.85)
sd.lbp.t1.l7 <- c(1.34, 1.75)
f_range(m = m.lbp.t1.l7, sd = sd.lbp.t1.l7, n = n.lbp.t1, title="Hungry now")
cat("\n")
cat("Article 1 - Lower Buffet Prices - Table 2\n")
n.lbp.t2.1 <- c(62, 60) #Lines 1-4
n.lbp.t2.2 <- c(41, 26) #Lines 5-7
n.lbp.t2.3 <- c(47, 38) #Lines 5-7
m.lbp.t2.l1 <- c(6.89, 7.44)
sd.lbp.t2.l1 <- c(1.39, 1.60)
f_range(m = m.lbp.t2.l1, sd = sd.lbp.t2.l1, n = n.lbp.t2.1, title="Overall taste")
m.lbp.t2.l2 <- c(7.08, 7.45)
sd.lbp.t2.l2 <- c(1.30, 1.60)
f_range(m = m.lbp.t2.l2, sd = sd.lbp.t2.l2, n = n.lbp.t2.1, title="Piece 1, taste")
m.lbp.t2.l3 <- c(7.08, 7.34)
sd.lbp.t2.l3 <- c(1.37, 1.70)
f_range(m = m.lbp.t2.l3, sd = sd.lbp.t2.l3, n = n.lbp.t2.1, title="Piece 1, satisfying")
m.lbp.t2.l4 <- c(7.05, 7.47)
sd.lbp.t2.l4 <- c(1.40, 1.55)
f_range(m = m.lbp.t2.l4, sd = sd.lbp.t2.l4, n = n.lbp.t2.1, title="Piece 1, enjoyable")
m.lbp.t2.l5 <- c(6.68, 7.97)
sd.lbp.t2.l5 <- c(1.49, 1.21)
f_range(m = m.lbp.t2.l5, sd = sd.lbp.t2.l5, n = n.lbp.t2.2, title="Piece 2, taste")
m.lbp.t2.l6 <- c(6.68, 7.97)
sd.lbp.t2.l6 <- c(1.49, 1.21)
f_range(m = m.lbp.t2.l6, sd = sd.lbp.t2.l6, n = n.lbp.t2.2, title="Piece 2, satisfying")
m.lbp.t2.l7 <- c(6.64, 7.81)
sd.lbp.t2.l7 <- c(1.48, 1.22)
f_range(m = m.lbp.t2.l7, sd = sd.lbp.t2.l7, n = n.lbp.t2.2, title="Piece 2, enjoyable")
m.lbp.t2.l8 <- c(6.15, 7.58)
sd.lbp.t2.l8 <- c(1.89, 1.39)
f_range(m = m.lbp.t2.l8, sd = sd.lbp.t2.l8, n = n.lbp.t2.3, title="Piece 3, taste")
m.lbp.t2.l9 <- c(6.16, 7.41)
sd.lbp.t2.l9 <- c(1.87, 1.55)
f_range(m = m.lbp.t2.l9, sd = sd.lbp.t2.l9, n = n.lbp.t2.3, title="Piece 3, satisfying")
m.lbp.t2.l10 <- c(5.98, 7.45)
sd.lbp.t2.l10 <- c(1.86, 1.52)
f_range(m = m.lbp.t2.l10, sd = sd.lbp.t2.l10, n = n.lbp.t2.3, title="Piece 3, enjoyable")
cat("\n")
cat("Article 3 - Eating Heavily - Table 1\n")
n.eh.t1.men <- c(40, 20)
n.eh.t1.women <- c(35, 10)
m.eh.t1.l1.men <- c(44, 43)
sd.eh.t1.l1.men <- c(18.86, 11.19)
f_range(m = m.eh.t1.l1.men, sd = sd.eh.t1.l1.men, n = n.eh.t1.men, title="Line 1, men", show.t=TRUE)
m.eh.t1.l2.men <- c(178.02, 181.11)
sd.eh.t1.l2.men <- c(7.72, 7.32)
f_range(m = m.eh.t1.l2.men, sd = sd.eh.t1.l2.men, n = n.eh.t1.men, title="Line 2, men", show.t=TRUE)
m.eh.t1.l3.men <- c(86.35, 100.80)
sd.eh.t1.l3.men <- c(17.92, 21.33)
f_range(m = m.eh.t1.l3.men, sd = sd.eh.t1.l3.men, n = n.eh.t1.men, title="Line 3, men", show.t=TRUE)
m.eh.t1.l4.men <- c(27.20, 30.96)
sd.eh.t1.l4.men <- c(5.13, 6.62)
f_range(m = m.eh.t1.l4.men, sd = sd.eh.t1.l4.men, n = n.eh.t1.men, title="Line 4, men", show.t=TRUE)
m.eh.t1.l1.women <- c(44.52, 48.18)
sd.eh.t1.l1.women <- c(17.09, 16.49)
f_range(m = m.eh.t1.l1.women, sd = sd.eh.t1.l1.women, n = n.eh.t1.women, title="Line 1, women", show.t=TRUE)
m.eh.t1.l2.women <- c(165.83, 164.82)
sd.eh.t1.l2.women <- c(7.71, 5.88)
f_range(m = m.eh.t1.l2.women, sd = sd.eh.t1.l2.women, n = n.eh.t1.women, title="Line 2, women", show.t=TRUE)
m.eh.t1.l3.women <- c(64.63, 75.54)
sd.eh.t1.l3.women <- c(10.95, 12.42)
f_range(m = m.eh.t1.l3.women, sd = sd.eh.t1.l3.women, n = n.eh.t1.women, title="Line 3, women", show.t=TRUE)
m.eh.t1.l4.women <- c(23.46, 27.77)
sd.eh.t1.l4.women <- c(3.53, 3.68)
f_range(m = m.eh.t1.l4.women, sd = sd.eh.t1.l4.women, n = n.eh.t1.women, title="Line 4, women", show.t=TRUE)
cat("\n")
cat("Article 3 - Eating Heavily - Table 2\n")
lab.eh.t2 <- c("gender", "group", "gender x group")
n.eh.t2 <- matrix(c(40, 20, 35, 10), ncol=2)
m.eh.t2.l1 <- matrix(c(5.00, 2.69, 4.83, 5.54), ncol=2)
sd.eh.t2.l1 <- matrix(c(2.99, 2.57, 2.71, 1.84), ncol=2)
f_range(m = m.eh.t2.l1, sd = sd.eh.t2.l1, n = n.eh.t2, title="Line 1", labels=lab.eh.t2)
m.eh.t2.l2 <- matrix(c(2.99, 1.55, 1.33, 1.05), ncol=2)
sd.eh.t2.l2 <- matrix(c(1.75, 1.07, 0.83, 1.38), ncol=2)
f_range(m = m.eh.t2.l2, sd = sd.eh.t2.l2, n = n.eh.t2, title="Line 2", labels=lab.eh.t2)
m.eh.t2.l3 <- matrix(c(2.67, 2.76, 2.73, 1.00), ncol=2)
sd.eh.t2.l3 <- matrix(c(2.04, 2.18, 2.16, 0.00), ncol=2)
f_range(m = m.eh.t2.l3, sd = sd.eh.t2.l3, n = n.eh.t2, title="Line 3", labels=lab.eh.t2)
m.eh.t2.l4 <- matrix(c(1.46, 1.90, 2.29, 1.18), ncol=2)
sd.eh.t2.l4 <- matrix(c(1.07, 1.48, 2.28, 0.40), ncol=2)
f_range(m = m.eh.t2.l4, sd = sd.eh.t2.l4, n = n.eh.t2, title="Line 4", labels=lab.eh.t2)
m.eh.t2.l5 <- matrix(c(478.75, 397.5, 463.61, 111.71), ncol=2)
sd.eh.t2.l5 <- matrix(c(290.67, 191.37, 264.25, 109.57), ncol=2)
f_range(m = m.eh.t2.l5, sd = sd.eh.t2.l5, n = n.eh.t2, title="Line 5", labels=lab.eh.t2)
m.eh.t2.l6 <- matrix(c(2.11, 2.27, 2.20, 1.91), ncol=2)
sd.eh.t2.l6 <- matrix(c(1.54, 1.75, 1.71, 2.12), ncol=2)
f_range(m = m.eh.t2.l6, sd = sd.eh.t2.l6, n = n.eh.t2, title="Line 6", labels=lab.eh.t2)
cat("\n")
cat("Article 3 - Eating Heavily - Table 3\n")
n.eh.t3 <- c(20, 21, 19)
m.eh.t3.l1 <- c(2.69, 5.55, 4.33)
sd.eh.t3.l1 <- c(2.57, 2.66, 3.31)
f_range(m = m.eh.t3.l1, sd = sd.eh.t3.l1, n = n.eh.t3, title="Line 1")
m.eh.t3.l2 <- c(1.55, 2.79, 3.13)
sd.eh.t3.l2 <- c(1.07, 1.54, 2.18)
f_range(m = m.eh.t3.l2, sd = sd.eh.t3.l2, n = n.eh.t3, title="Line 2")
m.eh.t3.l3 <- c(2.76, 2.92, 2.53)
sd.eh.t3.l3 <- c(2.19, 2.3, 1.81)
f_range(m = m.eh.t3.l3, sd = sd.eh.t3.l3, n = n.eh.t3, title="Line 3")
m.eh.t3.l4 <- c(1.9, 1.65, 1.47)
sd.eh.t3.l4 <- c(1.48, 1.34, 1.23)
f_range(m = m.eh.t3.l4, sd = sd.eh.t3.l4, n = n.eh.t3, title="Line 4")
m.eh.t3.l5 <- c(397.5, 409.52, 555.26)
sd.eh.t3.l5 <- c(191.38, 246.87, 321.84)
f_range(m = m.eh.t3.l5, sd = sd.eh.t3.l5, n = n.eh.t3, title="Line 5")
m.eh.t3.l6 <- c(2.27, 2.32, 1.95)
sd.eh.t3.l6 <- c(1.75, 1.77, 1.24)
f_range(m = m.eh.t3.l6, sd = sd.eh.t3.l6, n = n.eh.t3, title="Line 6")
cat("\n")
cat("Article 4 - Low Prices and High Regret - Table 1\n")
n.lp.t1 <- c(43, 52)
m.lp.t1.l1 <- c(43.67, 44.55)
sd.lp.t1.l1 <- c(18.50, 14.30)
f_range(m = m.lp.t1.l1, sd = sd.lp.t1.l1, n = n.lp.t1, title="Age", show.t=TRUE)
m.lp.t1.l2 <- c(68.65, 66.51)
sd.lp.t1.l2 <- c(3.67, 9.44)
f_range(m = m.lp.t1.l2, sd = sd.lp.t1.l2, n = n.lp.t1, title="Height", show.t=TRUE)
m.lp.t1.l3 <- c(184.83, 178.38)
sd.lp.t1.l3 <- c(63.70, 45.71)
f_range(m = m.lp.t1.l3, sd = sd.lp.t1.l3, n = n.lp.t1, title="Weight", show.t=TRUE)
cat("\n")
cat("Article 4 - Low Prices and High Regret - Table 2\n")
lab.lp.t2 <- c("price", "pieces", "price x pieces")
n.lp.t2 <- matrix(c(18, 18, 7, 17, 19, 10), ncol=2)
m.lp.t2.l1 <- matrix(c(2.63, 4.82, 6.00, 1.76, 3.53, 4.40), ncol=2)
sd.lp.t2.l1 <- matrix(c(2.06, 2.55, 2.00, 1.82, 2.39, 3.24), ncol=2)
f_range(m = m.lp.t2.l1, sd = sd.lp.t2.l1, n = n.lp.t2, title="Line 1", labels=lab.lp.t2)
m.lp.t2.l2 <- matrix(c(2.39, 3.44, 3.71, 2.26, 1.68, 2.90), ncol=2)
sd.lp.t2.l2 <- matrix(c(1.94, 2.47, 1.49, 1.79, 1.42, 2.08), ncol=2)
f_range(m = m.lp.t2.l2, sd = sd.lp.t2.l2, n = n.lp.t2, title="Line 2", labels=lab.lp.t2)
m.lp.t2.l3 <- matrix(c(2.17, 2.94, 2.43, 1.97, 1.45, 2.25), ncol=2)
sd.lp.t2.l3 <- matrix(c(1.88, 2.12, 1.51, 1.68, 0.94, 1.81), ncol=2)
f_range(m = m.lp.t2.l3, sd = sd.lp.t2.l3, n = n.lp.t2, title="Line 3", labels=lab.lp.t2)
m.lp.t2.l4 <- matrix(c(2.11, 3.89, 3.71, 1.67, 1.67, 3.50), ncol=2)
sd.lp.t2.l4 <- matrix(c(1.81, 2.59, 1.79, 1.28, 1.24, 2.74), ncol=2)
f_range(m = m.lp.t2.l4, sd = sd.lp.t2.l4, n = n.lp.t2, title="Line 4", labels=lab.lp.t2)
m.lp.t2.l5 <- matrix(c(2.50, 4.28, 4.57, 2.00, 2.14, 3.92), ncol=2)
sd.lp.t2.l5 <- matrix(c(2.20, 2.44, 2.22, 1.45, 1.77, 2.81), ncol=2)
f_range(m = m.lp.t2.l5, sd = sd.lp.t2.l5, n = n.lp.t2, title="Line 5", labels=lab.lp.t2)
cat("\n")
cat("Article 4 - Low Prices and High Regret - Table 3\n")
n.lp.t3.1p <- c(18, 19)
n.lp.t3.2p <- c(18, 21)
n.lp.t3.3p <- c(7, 12)
m.lp.t3.l1.1p <- c(2.63, 1.76)
sd.lp.t3.l1.1p <- c(2.06, 1.82)
f_range(m = m.lp.t3.l1.1p, sd = sd.lp.t3.l1.1p, n = n.lp.t3.1p, title="Line 1, 1 piece")
m.lp.t3.l1.2p <- c(4.82, 3.53)
sd.lp.t3.l1.2p <- c(2.55, 2.39)
f_range(m = m.lp.t3.l1.2p, sd = sd.lp.t3.l1.2p, n = n.lp.t3.2p, title="Line 1, 2 pieces")
m.lp.t3.l1.3p <- c(6.00, 4.40)
sd.lp.t3.l1.3p <- c(2.00, 3.24)
f_range(m = m.lp.t3.l1.3p, sd = sd.lp.t3.l1.3p, n = n.lp.t3.3p, title="Line 1, 3 pieces")
m.lp.t3.l2.1p <- c(2.39, 2.26)
sd.lp.t3.l2.1p <- c(1.94, 1.79)
f_range(m = m.lp.t3.l2.1p, sd = sd.lp.t3.l2.1p, n = n.lp.t3.1p, title="Line 2, 1 piece")
m.lp.t3.l2.2p <- c(3.44, 1.68)
sd.lp.t3.l2.2p <- c(2.48, 1.42)
f_range(m = m.lp.t3.l2.2p, sd = sd.lp.t3.l2.2p, n = n.lp.t3.2p, title="Line 2, 2 pieces")
m.lp.t3.l2.3p <- c(3.71, 2.90)
sd.lp.t3.l2.3p <- c(1.50, 2.08)
f_range(m = m.lp.t3.l2.3p, sd = sd.lp.t3.l2.3p, n = n.lp.t3.3p, title="Line 2, 3 pieces")
m.lp.t3.l3.1p <- c(2.17, 1.955)
sd.lp.t3.l3.1p <- c(1.89, 1.68)
f_range(m = m.lp.t3.l3.1p, sd = sd.lp.t3.l3.1p, n = n.lp.t3.1p, title="Line 3, 1 piece")
m.lp.t3.l3.2p <- c(2.94, 1.28)
sd.lp.t3.l3.2p <- c(2.13, 0.46)
f_range(m = m.lp.t3.l3.2p, sd = sd.lp.t3.l3.2p, n = n.lp.t3.2p, title="Line 3, 2 pieces")
m.lp.t3.l3.3p <- c(2.43, 2.10)
sd.lp.t3.l3.3p <- c(1.51, 1.91)
f_range(m = m.lp.t3.l3.3p, sd = sd.lp.t3.l3.3p, n = n.lp.t3.3p, title="Line 3, 3 pieces")
m.lp.t3.l4.1p <- c(2.11, 1.67)
sd.lp.t3.l4.1p <- c(1.81, 1.28)
f_range(m = m.lp.t3.l4.1p, sd = sd.lp.t3.l4.1p, n = n.lp.t3.1p, title="Line 4, 1 piece")
m.lp.t3.l4.2p <- c(3.89, 1.53)
sd.lp.t3.l4.2p <- c(2.59, 1.02)
f_range(m = m.lp.t3.l4.2p, sd = sd.lp.t3.l4.2p, n = n.lp.t3.2p, title="Line 4, 2 pieces")
m.lp.t3.l4.3p <- c(3.71, 3.50)
sd.lp.t3.l4.3p <- c(1.79, 2.95)
f_range(m = m.lp.t3.l4.3p, sd = sd.lp.t3.l4.3p, n = n.lp.t3.3p, title="Line 4, 3 pieces")
m.lp.t3.l5.1p <- c(2.50, 2.00)
sd.lp.t3.l5.1p <- c(2.20, 1.45)
f_range(m = m.lp.t3.l5.1p, sd = sd.lp.t3.l5.1p, n = n.lp.t3.1p, title="Line 5, 1 piece")
m.lp.t3.l5.2p <- c(4.28, 2.05)
sd.lp.t3.l5.2p <- c(2.44, 1.72)
f_range(m = m.lp.t3.l5.2p, sd = sd.lp.t3.l5.2p, n = n.lp.t3.2p, title="Line 5, 2 pieces")
m.lp.t3.l5.3p <- c(4.57, 4.00)
sd.lp.t3.l5.3p <- c(2.23, 3.02)
f_range(m = m.lp.t3.l5.3p, sd = sd.lp.t3.l5.3p, n = n.lp.t3.3p, title="Line 5, 3 pieces")
|
#!/usr/bin/env Rscript
tr = read.delim("transitions.txt", '\t', header = T)
em = read.delim("emissions.txt", '\t', header = T)
setwd("H:/Asignment-2")
tr = read.delim("transitions.txt", '\t', header = T)
em = read.delim("emissions.txt", '\t', header = T)
states = tr[,1]
rownames(tr) = states
tr = tr[,-1]
em = em[,-1]
rownames(em) = states
bases = colnames(em)
# maxPr = apply(em, 1, max)[1:14]
# g = data.frame(maxPr)
#paste(bases[apply(em, 1, which.max)][2:18], collapse='')
seq = read.delim("seq.fa")
seq = seq[,1]
s = unlist(strsplit(paste(seq), ''))
library(HMM)
hmm = initHMM(states, bases, transProbs = data.matrix(tr), emissionProbs = data.matrix(em))
options(max.print=9999999)
viterbi(hmm, s)
install.packages("HMM")
hmm = initHMM(states, bases, transProbs = data.matrix(tr), emissionProbs = data.matrix(em))
options(max.print=9999999)
viterbi(hmm, s)
library(HMM)
hmm = initHMM(states, bases, transProbs = data.matrix(tr), emissionProbs = data.matrix(em))
options(max.print=9999999)
viterbi(hmm, s)
maxPr = apply(em, 1, max)[1:14]
paste(bases[apply(em, 1, which.max)][2:15], collapse='')
paste(bases[apply(em, 1, which.max)][1:15], collapse='')
wd("~/Asignment-2")
setwd("~/Asignment-2")
| /.Rhistory | no_license | LachlanD/Asignment-2 | R | false | false | 1,200 | rhistory | #!/usr/bin/env Rscript
tr = read.delim("transitions.txt", '\t', header = T)
em = read.delim("emissions.txt", '\t', header = T)
setwd("H:/Asignment-2")
tr = read.delim("transitions.txt", '\t', header = T)
em = read.delim("emissions.txt", '\t', header = T)
states = tr[,1]
rownames(tr) = states
tr = tr[,-1]
em = em[,-1]
rownames(em) = states
bases = colnames(em)
# maxPr = apply(em, 1, max)[1:14]
# g = data.frame(maxPr)
#paste(bases[apply(em, 1, which.max)][2:18], collapse='')
seq = read.delim("seq.fa")
seq = seq[,1]
s = unlist(strsplit(paste(seq), ''))
library(HMM)
hmm = initHMM(states, bases, transProbs = data.matrix(tr), emissionProbs = data.matrix(em))
options(max.print=9999999)
viterbi(hmm, s)
install.packages("HMM")
hmm = initHMM(states, bases, transProbs = data.matrix(tr), emissionProbs = data.matrix(em))
options(max.print=9999999)
viterbi(hmm, s)
library(HMM)
hmm = initHMM(states, bases, transProbs = data.matrix(tr), emissionProbs = data.matrix(em))
options(max.print=9999999)
viterbi(hmm, s)
maxPr = apply(em, 1, max)[1:14]
paste(bases[apply(em, 1, which.max)][2:15], collapse='')
paste(bases[apply(em, 1, which.max)][1:15], collapse='')
wd("~/Asignment-2")
setwd("~/Asignment-2")
|
#' Style as a ribbon image
#'
#' @param data data frame with x, y, order, id, time
#' @param burnin how many iterations should we discard as burnin?
#' @param alpha_init how transparent is each line?
#' @param alpha_decay how does transparency decay over iterations?
#' @param seed_col what colour to draw the seed? (default: NULL)
#' @param seed_fill what colour to fill the seed? (default: NULL)
#' @param palette function generating a palette
#' @param background colour of the background in the plot
#'
#' @return Returns a ggplot2 object
#' @export
style_ribbon <- function(
data,
burnin = 0, # how many of the iterations do we not draw?
alpha_init = .3, # transparency of each line
alpha_decay = 0, # rate of decay
seed_col = NULL,
seed_fill = NULL,
palette = palette_viridis(), # function to generate palette (args: n, alpha)
background = "black"
) {
ribbon <- data
# min, max
xmin <- min(ribbon$x)
xmax <- max(ribbon$x)
ymin <- min(ribbon$y)
ymax <- max(ribbon$y)
# force to the same scale
xmin <- min(xmin, ymin)
xmax <- max(xmax, ymax)
ymin <- xmin
ymax <- xmax
# normalise
ribbon <- ribbon %>%
dplyr::mutate(
x = (x - xmin) / (xmax - xmin),
y = (y - ymin) / (ymax - ymin),
al = alpha_init * (1 - alpha_decay)^(time - 1)
)
# the seed is the first element of the ribbon
seed <- ribbon %>% dplyr::filter(time == 1)
ribbon2 <- ribbon %>%
dplyr::rename(xend = x, yend = y) %>%
dplyr::mutate(time = time - 1) %>%
dplyr::filter(time > 0)
ribbon <- ribbon %>%
dplyr::filter(time < max(time))
ribbon$xend <- ribbon2$xend
ribbon$yend <- ribbon2$yend
ribbon$order <- ribbon2$order
#return(ribbon)
# get colour values
col_set <- palette(n = max(ribbon$order))
# create basic object
pic <- ggplot2::ggplot(
data = ribbon %>% dplyr::filter(time > burnin),
mapping = ggplot2::aes(
x = x,
y = y,
xend = xend,
yend = yend,
alpha = al,
colour = factor(order)
)
) +
ggplot2::geom_segment(show.legend = FALSE) +
ggplot2::scale_color_manual(values = col_set) +
ggplot2::scale_alpha_identity() +
theme_mono(background) +
ggplot2::coord_equal(
xlim = c(-.05, 1.05),
ylim = c(-.05, 1.05)
)
# add hollow fill for seed if requested
if(!is.null(seed_fill)) {
pic <- pic +
ggplot2::geom_polygon(
data = seed,
mapping = ggplot2::aes(x = x, y = y, group = factor(id)),
inherit.aes = FALSE,
colour = seed_fill,
fill = seed_fill,
show.legend = FALSE)
}
# add outline for seed if requested
if(!is.null(seed_col)) {
pic <- pic +
ggplot2::geom_path(
data = seed,
mapping = ggplot2::aes(x = x, y = y, group = factor(id)),
inherit.aes = FALSE,
colour = seed_col,
show.legend = FALSE)
}
return(pic)
}
# # fill in the seed shape if requested
# if(!is.null(seed_fill)) {
# for(i in 1:max(seed$id)) {
# s <- seed[seed$id == i,]
# graphics::polygon(
# x = (s$x - xmin)/(xmax - xmin),
# y = (s$y - ymin)/(ymax - ymin),
# col = seed_fill,
# lwd= 1
# )
# }
# }
#
# # draw outline of seed shape if requested
# if(!is.null(seed_col)) {
# for(i in 1:max(seed$id)) {
# s <- seed[seed$id == i,]
# graphics::lines(
# x = (s$x - xmin)/(xmax - xmin),
# y = (s$y - ymin)/(ymax - ymin),
# col = seed_col,
# lwd= line_width * 1.5
# )
# }
# }
# }
# }
#
# # create the png device if requested
# if(!is.null(file)) {
# grDevices::png(
# filename = file,
# width = width,
# height = height,
# bg = background
# )
# }
#
# # setup the plot area
# op <- graphics::par(bg = background, pty = "s", mar = c(0,0,0,0))
# graphics::plot.new()
# graphics::plot.window(xlim = box[1:2], ylim = box[3:4])
#
# # plot a series of curl iterations
# for(i in 1:iterations) {
# if(i > burnin) {
#
# last <- ribbon %>% dplyr::filter(time == i)
# this <- ribbon %>% dplyr::filter(time == i+1)
#
# # if palette is a single function...
# if(class(palette) == "function") {
#
# # create a colour palette for this iteration
# cols <- palette(nrow(seed), (alpha_init) * (1 - alpha_decay)^(i-1))
#
# # supply a default order if the input lacks one
# if(!("order" %in% names(this))) {
# this$order <- 1:length(this$x)
# }
#
# # draw the segments
# graphics::segments(
# x0 = (last$x - xmin) / (xmax - xmin),
# y0 = (last$y - ymin) / (ymax - ymin),
# x1 = (this$x - xmin) / (xmax - xmin),
# y1 = (this$y - ymin) / (ymax - ymin),
# col = cols[this$order],
# lwd = line_width,
# )
#
# # if it is a list of functions...
# } else {
#
# # create the list of colour palettes for this iteration
# cols <- list();
# for(j in 1:max(seed$id)) {
# cols[[j]] <- palette[[j]](nrow(seed), (alpha_init) * (1 - alpha_decay)^(i-1))
# }
#
# # draw each object separately with its own palette
# for(j in 1:max(seed$id)) {
#
# last <- ribbon %>% dplyr::filter(time == i & id == j)
# this <- ribbon %>% dplyr::filter(time == i+1 & id == j)
#
# # supply a default order if the input lacks one
# if(!("order" %in% names(this))) {
# this$order <- 1:length(this$x)
# }
#
# # draw the segments
# graphics::segments(
# x0 = (last$x - xmin) / (xmax - xmin),
# y0 = (last$y - ymin) / (ymax - ymin),
# x1 = (this$x - xmin) / (xmax - xmin),
# y1 = (this$y - ymin) / (ymax - ymin),
# col = cols[[j]][this$order],
# lwd = line_width,
# )
# }
# }
# }
# }
#
#
| /R/style_ribbon.R | permissive | sneakers-the-rat/jasmines | R | false | false | 5,986 | r |
#' Style as a ribbon image
#'
#' @param data data frame with x, y, order, id, time
#' @param burnin how many iterations should we discard as burnin?
#' @param alpha_init how transparent is each line?
#' @param alpha_decay how does transparency decay over iterations?
#' @param seed_col what colour to draw the seed? (default: NULL)
#' @param seed_fill what colour to fill the seed? (default: NULL)
#' @param palette function generating a palette
#' @param background colour of the background in the plot
#'
#' @return Returns a ggplot2 object
#' @export
style_ribbon <- function(
data,
burnin = 0, # how many of the iterations do we not draw?
alpha_init = .3, # transparency of each line
alpha_decay = 0, # rate of decay
seed_col = NULL,
seed_fill = NULL,
palette = palette_viridis(), # function to generate palette (args: n, alpha)
background = "black"
) {
ribbon <- data
# min, max
xmin <- min(ribbon$x)
xmax <- max(ribbon$x)
ymin <- min(ribbon$y)
ymax <- max(ribbon$y)
# force to the same scale
xmin <- min(xmin, ymin)
xmax <- max(xmax, ymax)
ymin <- xmin
ymax <- xmax
# normalise
ribbon <- ribbon %>%
dplyr::mutate(
x = (x - xmin) / (xmax - xmin),
y = (y - ymin) / (ymax - ymin),
al = alpha_init * (1 - alpha_decay)^(time - 1)
)
# the seed is the first element of the ribbon
seed <- ribbon %>% dplyr::filter(time == 1)
ribbon2 <- ribbon %>%
dplyr::rename(xend = x, yend = y) %>%
dplyr::mutate(time = time - 1) %>%
dplyr::filter(time > 0)
ribbon <- ribbon %>%
dplyr::filter(time < max(time))
ribbon$xend <- ribbon2$xend
ribbon$yend <- ribbon2$yend
ribbon$order <- ribbon2$order
#return(ribbon)
# get colour values
col_set <- palette(n = max(ribbon$order))
# create basic object
pic <- ggplot2::ggplot(
data = ribbon %>% dplyr::filter(time > burnin),
mapping = ggplot2::aes(
x = x,
y = y,
xend = xend,
yend = yend,
alpha = al,
colour = factor(order)
)
) +
ggplot2::geom_segment(show.legend = FALSE) +
ggplot2::scale_color_manual(values = col_set) +
ggplot2::scale_alpha_identity() +
theme_mono(background) +
ggplot2::coord_equal(
xlim = c(-.05, 1.05),
ylim = c(-.05, 1.05)
)
# add hollow fill for seed if requested
if(!is.null(seed_fill)) {
pic <- pic +
ggplot2::geom_polygon(
data = seed,
mapping = ggplot2::aes(x = x, y = y, group = factor(id)),
inherit.aes = FALSE,
colour = seed_fill,
fill = seed_fill,
show.legend = FALSE)
}
# add outline for seed if requested
if(!is.null(seed_col)) {
pic <- pic +
ggplot2::geom_path(
data = seed,
mapping = ggplot2::aes(x = x, y = y, group = factor(id)),
inherit.aes = FALSE,
colour = seed_col,
show.legend = FALSE)
}
return(pic)
}
# # fill in the seed shape if requested
# if(!is.null(seed_fill)) {
# for(i in 1:max(seed$id)) {
# s <- seed[seed$id == i,]
# graphics::polygon(
# x = (s$x - xmin)/(xmax - xmin),
# y = (s$y - ymin)/(ymax - ymin),
# col = seed_fill,
# lwd= 1
# )
# }
# }
#
# # draw outline of seed shape if requested
# if(!is.null(seed_col)) {
# for(i in 1:max(seed$id)) {
# s <- seed[seed$id == i,]
# graphics::lines(
# x = (s$x - xmin)/(xmax - xmin),
# y = (s$y - ymin)/(ymax - ymin),
# col = seed_col,
# lwd= line_width * 1.5
# )
# }
# }
# }
# }
#
# # create the png device if requested
# if(!is.null(file)) {
# grDevices::png(
# filename = file,
# width = width,
# height = height,
# bg = background
# )
# }
#
# # setup the plot area
# op <- graphics::par(bg = background, pty = "s", mar = c(0,0,0,0))
# graphics::plot.new()
# graphics::plot.window(xlim = box[1:2], ylim = box[3:4])
#
# # plot a series of curl iterations
# for(i in 1:iterations) {
# if(i > burnin) {
#
# last <- ribbon %>% dplyr::filter(time == i)
# this <- ribbon %>% dplyr::filter(time == i+1)
#
# # if palette is a single function...
# if(class(palette) == "function") {
#
# # create a colour palette for this iteration
# cols <- palette(nrow(seed), (alpha_init) * (1 - alpha_decay)^(i-1))
#
# # supply a default order if the input lacks one
# if(!("order" %in% names(this))) {
# this$order <- 1:length(this$x)
# }
#
# # draw the segments
# graphics::segments(
# x0 = (last$x - xmin) / (xmax - xmin),
# y0 = (last$y - ymin) / (ymax - ymin),
# x1 = (this$x - xmin) / (xmax - xmin),
# y1 = (this$y - ymin) / (ymax - ymin),
# col = cols[this$order],
# lwd = line_width,
# )
#
# # if it is a list of functions...
# } else {
#
# # create the list of colour palettes for this iteration
# cols <- list();
# for(j in 1:max(seed$id)) {
# cols[[j]] <- palette[[j]](nrow(seed), (alpha_init) * (1 - alpha_decay)^(i-1))
# }
#
# # draw each object separately with its own palette
# for(j in 1:max(seed$id)) {
#
# last <- ribbon %>% dplyr::filter(time == i & id == j)
# this <- ribbon %>% dplyr::filter(time == i+1 & id == j)
#
# # supply a default order if the input lacks one
# if(!("order" %in% names(this))) {
# this$order <- 1:length(this$x)
# }
#
# # draw the segments
# graphics::segments(
# x0 = (last$x - xmin) / (xmax - xmin),
# y0 = (last$y - ymin) / (ymax - ymin),
# x1 = (this$x - xmin) / (xmax - xmin),
# y1 = (this$y - ymin) / (ymax - ymin),
# col = cols[[j]][this$order],
# lwd = line_width,
# )
# }
# }
# }
# }
#
#
|
##2.Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
##Use the base plotting system to make a plot answering this question.
#Load data
if(!exists("NEI")){NEI <- readRDS("summarySCC_PM25.rds")}
if(!exists("SCC")){SCC <- readRDS("Source_Classification_Code.rds")}
# select fips="24510" as subset of NEI
subsetNEI <- NEI[NEI$fips=="24510", ]
aggregatedTotalByYear <- aggregate(Emissions ~ year, subsetNEI, sum)
png('plot2.png')
barplot(height=aggregatedTotalByYear$Emissions, names.arg=aggregatedTotalByYear$year, xlab="years", ylab=expression('total PM'[2.5]*' emission'),main=expression('Total PM'[2.5]*' in the Baltimore City, MD emissions at various years'))
dev.off()
| /plot2.R | no_license | dataperito/ExploratoryDataAnalysisAssignmentCourseProject2 | R | false | false | 763 | r | ##2.Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
##Use the base plotting system to make a plot answering this question.
#Load data
if(!exists("NEI")){NEI <- readRDS("summarySCC_PM25.rds")}
if(!exists("SCC")){SCC <- readRDS("Source_Classification_Code.rds")}
# select fips="24510" as subset of NEI
subsetNEI <- NEI[NEI$fips=="24510", ]
aggregatedTotalByYear <- aggregate(Emissions ~ year, subsetNEI, sum)
png('plot2.png')
barplot(height=aggregatedTotalByYear$Emissions, names.arg=aggregatedTotalByYear$year, xlab="years", ylab=expression('total PM'[2.5]*' emission'),main=expression('Total PM'[2.5]*' in the Baltimore City, MD emissions at various years'))
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.