content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Generate Extra Slidify Layouts
#'
#' A collection of additional slidify slide layouts to extend the slidify
#' framework.
#'
#' @param path The path to the layout directory where the html files should be
#' generated. Default is ~assets/layouts.
#'
#' @return Creates the layout html files to extend the slidify slide layout
#' framework. Currently \code{slidify_layouts} generates a thankyou.html and
#' youtube.html. See the references for additional information.
#' @note If \code{\link[reports]{new_report}} or
#' \code{\link[reports]{presentation}} was utilized to generate the slidify
#' presentation directory \code{\link[reports]{slidify_layouts}} has already
#' created the extra slidify slide layouts in: ~PRESENTATION/assets/layouts
#' @references INSERT LINK TO slidify tricks and tips section
#' @export
slidify_layouts <- function(path = file.path(getwd(), "assets/layouts")){
root <- system.file("extdata/slidify_layouts", package = "reports")
if (!file.exists(path)) stop("Supply a valid path argument")
invisible(file.copy(file.path(root, dir(root)), path))
message(paste0("The following files have been generated ", ":\n\n",
paste(file.path(path, dir(root)), collapse="\n"),
"\n", paste(rep("=", 50), collapse=""),
"\n\nSee slidify tricks and tips section for more:\n\nINSERT LINK HERE\n"))
}
| /R/slidify_layouts.R | no_license | riverlee/reports | R | false | false | 1,367 | r | #' Generate Extra Slidify Layouts
#'
#' A collection of additional slidify slide layouts to extend the slidify
#' framework.
#'
#' @param path The path to the layout directory where the html files should be
#' generated. Default is ~assets/layouts.
#'
#' @return Creates the layout html files to extend the slidify slide layout
#' framework. Currently \code{slidify_layouts} generates a thankyou.html and
#' youtube.html. See the references for additional information.
#' @note If \code{\link[reports]{new_report}} or
#' \code{\link[reports]{presentation}} was utilized to generate the slidify
#' presentation directory \code{\link[reports]{slidify_layouts}} has already
#' created the extra slidify slide layouts in: ~PRESENTATION/assets/layouts
#' @references INSERT LINK TO slidify tricks and tips section
#' @export
slidify_layouts <- function(path = file.path(getwd(), "assets/layouts")){
root <- system.file("extdata/slidify_layouts", package = "reports")
if (!file.exists(path)) stop("Supply a valid path argument")
invisible(file.copy(file.path(root, dir(root)), path))
message(paste0("The following files have been generated ", ":\n\n",
paste(file.path(path, dir(root)), collapse="\n"),
"\n", paste(rep("=", 50), collapse=""),
"\n\nSee slidify tricks and tips section for more:\n\nINSERT LINK HERE\n"))
}
|
library(data.table)
library(dplyr)
library(ggplot2)
read.cistrome.peak <- function(filepath, span = -1){
# We need to remove rows with the extra chromosomes
allowed_chr = c(paste("chr",1:22,sep=""),"chrX","chrY")
df <- read.table(filepath)[ , c("V1", "V2", "V3", "V5")]
colnames(df) <- c("chr","start","end","intensity")
df <- filter(df, chr %in% allowed_chr)
if(span != -1){
# update start and end to use peak center, if span != -1
peak_center <- (df$start + df$end) %/% 2 # integer division
df$start <- peak_center - span
df$end <- peak_center + span
}
setDT(df, key = names(df))
setkey(df, "chr", "start","end")
return(df)
}
get.bsite.cist.count <- function(peak_df, imads_df){
sites_peak <- foverlaps(peak_df,imads_df)[, .(
chr, peak.start = i.start, peak.end = i.end,
intensity,
pref
)]
sites_peak$count <- ifelse(is.na(sites_peak$pref), 0, 1)
counted <- aggregate(sites_peak$count, by=list(sites_peak$chr, sites_peak$peak.start,
sites_peak$peak.end, sites_peak$intensity), FUN=sum)
colnames(counted) <- c("chr","peak.start","peak.end","intensity","count")
counted <- counted %>% arrange(chr,peak.start,peak.end)
return(counted)
}
make.peaklen.dist.cistrome.plot <- function(lenvec1, outdir, chip_name=""){
# hardcode upper limit to be 1500
peaklen_hist_plot1 <- ggplot() +
geom_histogram(aes(x = lenvec1), binwidth=50, colour = "white", fill = "cornflowerblue", size = 0.1) +
labs(title="ChIP-seq peak length distribution",
subtitle=chip_name,
x="peak length") +
coord_cartesian(xlim=c(min(lenvec1),1500)) +
scale_x_continuous(breaks=seq(0, 1500, by = 100))
ggsave(paste(outdir,"/",chip_name,"_peaklen_hist.pdf",sep=''), plot=peaklen_hist_plot1)
peaklen_dist_plot <- ggplot() +
geom_density(aes(lenvec1, fill="peaklen_dist"), alpha=0.5) +
labs(title="ChIP-seq peak length distribution",
subtitle=chip_name,
x="peak length") +
coord_cartesian(xlim=c(min(lenvec1),1500)) +
scale_x_continuous(breaks=seq(0, 1500, by = 100))
#theme(legend.position=c(.8,.8), legend.title=element_blank(), legend.spacing.x=unit(0.2,'cm')) # legend.title=element_blank()
ggsave(paste(outdir,"/",chip_name,"_peaklen_dist.pdf",sep=''), plot=peaklen_dist_plot)
}
make.intensity.dist.plot <- function(count_df, outpath, chip_name="", maxsitesnum=8){
# make boxplot, since we need pileup for the whole distribution, make a copy of the whole table
# as group "all"
merged_copy <- count_df
merged_copy$count <- "all"
merged_duplicated <- rbind(count_df,merged_copy)
countvec <- c("all",as.character(sort(unique(count_df$count)))) # need this to order xlabel
merged_duplicated$count <- factor(merged_duplicated$count, levels = countvec)
pudist_plot <- ggplot(merged_duplicated, aes(x=count, y=intensity, fill=count)) +
geom_boxplot() +
#geom_jitter(width=0.1,alpha=0.2) +
stat_summary(
fun.data = count.n,
geom = "text"
) +
labs(title="Intensity distributions",
subtitle=chip_name,
y="intensity",
x="#sites in a peak") +
coord_cartesian(xlim=c(0,maxsitesnum+2)) +
theme(legend.position = "none")
ggsave(outpath)
}
| /chip2probe/probe_generator/src_v1/R_analysis/chip.info/R/cistreader.R | permissive | vincentiusmartin/chip2probe | R | false | false | 3,275 | r | library(data.table)
library(dplyr)
library(ggplot2)
read.cistrome.peak <- function(filepath, span = -1){
# We need to remove rows with the extra chromosomes
allowed_chr = c(paste("chr",1:22,sep=""),"chrX","chrY")
df <- read.table(filepath)[ , c("V1", "V2", "V3", "V5")]
colnames(df) <- c("chr","start","end","intensity")
df <- filter(df, chr %in% allowed_chr)
if(span != -1){
# update start and end to use peak center, if span != -1
peak_center <- (df$start + df$end) %/% 2 # integer division
df$start <- peak_center - span
df$end <- peak_center + span
}
setDT(df, key = names(df))
setkey(df, "chr", "start","end")
return(df)
}
get.bsite.cist.count <- function(peak_df, imads_df){
sites_peak <- foverlaps(peak_df,imads_df)[, .(
chr, peak.start = i.start, peak.end = i.end,
intensity,
pref
)]
sites_peak$count <- ifelse(is.na(sites_peak$pref), 0, 1)
counted <- aggregate(sites_peak$count, by=list(sites_peak$chr, sites_peak$peak.start,
sites_peak$peak.end, sites_peak$intensity), FUN=sum)
colnames(counted) <- c("chr","peak.start","peak.end","intensity","count")
counted <- counted %>% arrange(chr,peak.start,peak.end)
return(counted)
}
make.peaklen.dist.cistrome.plot <- function(lenvec1, outdir, chip_name=""){
# hardcode upper limit to be 1500
peaklen_hist_plot1 <- ggplot() +
geom_histogram(aes(x = lenvec1), binwidth=50, colour = "white", fill = "cornflowerblue", size = 0.1) +
labs(title="ChIP-seq peak length distribution",
subtitle=chip_name,
x="peak length") +
coord_cartesian(xlim=c(min(lenvec1),1500)) +
scale_x_continuous(breaks=seq(0, 1500, by = 100))
ggsave(paste(outdir,"/",chip_name,"_peaklen_hist.pdf",sep=''), plot=peaklen_hist_plot1)
peaklen_dist_plot <- ggplot() +
geom_density(aes(lenvec1, fill="peaklen_dist"), alpha=0.5) +
labs(title="ChIP-seq peak length distribution",
subtitle=chip_name,
x="peak length") +
coord_cartesian(xlim=c(min(lenvec1),1500)) +
scale_x_continuous(breaks=seq(0, 1500, by = 100))
#theme(legend.position=c(.8,.8), legend.title=element_blank(), legend.spacing.x=unit(0.2,'cm')) # legend.title=element_blank()
ggsave(paste(outdir,"/",chip_name,"_peaklen_dist.pdf",sep=''), plot=peaklen_dist_plot)
}
make.intensity.dist.plot <- function(count_df, outpath, chip_name="", maxsitesnum=8){
# make boxplot, since we need pileup for the whole distribution, make a copy of the whole table
# as group "all"
merged_copy <- count_df
merged_copy$count <- "all"
merged_duplicated <- rbind(count_df,merged_copy)
countvec <- c("all",as.character(sort(unique(count_df$count)))) # need this to order xlabel
merged_duplicated$count <- factor(merged_duplicated$count, levels = countvec)
pudist_plot <- ggplot(merged_duplicated, aes(x=count, y=intensity, fill=count)) +
geom_boxplot() +
#geom_jitter(width=0.1,alpha=0.2) +
stat_summary(
fun.data = count.n,
geom = "text"
) +
labs(title="Intensity distributions",
subtitle=chip_name,
y="intensity",
x="#sites in a peak") +
coord_cartesian(xlim=c(0,maxsitesnum+2)) +
theme(legend.position = "none")
ggsave(outpath)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/method-plot.R
\docType{methods}
\name{plotVolcano}
\alias{plotVolcano}
\alias{plotFunctions}
\alias{plotQQ}
\alias{plotExpressionRank}
\alias{plotCountCorHeatmap}
\alias{plotAberrantPerSample}
\alias{plotFPKM}
\alias{plotDispEsts}
\alias{plotPowerAnalysis}
\alias{plotExpectedVsObservedCounts}
\alias{plotCountGeneSampleHeatmap}
\alias{plotDispEsts,OutriderDataSet-method}
\alias{plotEncDimSearch}
\alias{plotExpressedGenes}
\alias{plotSizeFactors}
\title{Visualization functions for OUTRIDER}
\usage{
plotVolcano(ods, sampleID, main, padjCutoff = 0.05, zScoreCutoff = 0,
pch = 16, basePlot = FALSE, col = c("gray", "firebrick"))
plotQQ(ods, geneID, main, global = FALSE, padjCutoff = 0.05,
zScoreCutoff = 0, samplePoints = TRUE, legendPos = "topleft",
outlierRatio = 0.001, conf.alpha = 0.05, pch = 16, xlim = NULL,
ylim = NULL, col = NULL)
plotExpectedVsObservedCounts(ods, geneID, main, basePlot = FALSE,
log = TRUE, groups = c(), groupColSet = "Set1", ...)
plotExpressionRank(ods, geneID, main, padjCutoff = 0.05,
zScoreCutoff = 0, normalized = TRUE, basePlot = FALSE,
log = TRUE, col = c("gray", "firebrick"), groups = c(),
groupColSet = "Accent")
plotCountCorHeatmap(ods, normalized = TRUE, rowCentered = TRUE,
rowGroups = NA, rowColSet = NA, colGroups = NA, colColSet = NA,
nRowCluster = 4, nColCluster = 4,
main = "Count correlation heatmap", basePlot = TRUE, nBreaks = 50,
show_names = c("none", "row", "col", "both"), ...)
plotCountGeneSampleHeatmap(ods, normalized = TRUE, rowCentered = TRUE,
rowGroups = NA, rowColSet = NA, colGroups = NA, colColSet = NA,
nRowCluster = 4, nColCluster = 4,
main = "Count Gene vs Sample Heatmap", bcvQuantile = 0.9,
show_names = c("none", "col", "row", "both"), nGenes = 500,
nBreaks = 50, ...)
plotAberrantPerSample(ods, main, padjCutoff = 0.05, zScoreCutoff = 0,
outlierRatio = 0.001, col = brewer.pal(3, "Dark2")[c(1, 2)],
yadjust = c(1.2, 1.2), labLine = c(3.5, 3), ymax = NULL,
ylab = "#Aberrantly expressed genes", labCex = par()$cex, ...)
plotFPKM(ods, bins = 100)
\S4method{plotDispEsts}{OutriderDataSet}(object, compareDisp, xlim, ylim,
main = "Dispersion estimates versus mean expression", ...)
plotPowerAnalysis(ods)
plotEncDimSearch(ods)
plotExpressedGenes(ods, main = "Statistics of expressed genes")
plotSizeFactors(ods, basePlot = TRUE)
}
\arguments{
\item{ods, object}{An OutriderDataSet object.}
\item{sampleID, geneID}{A sample or gene ID, which should be plotted.
Can also be a vector. Integers are treated as indices.}
\item{main}{Title for the plot, if missing a default title will be used.}
\item{padjCutoff, zScoreCutoff}{Significance or Z-score cutoff
to mark outliers}
\item{pch}{Integer or character to be used for plotting the points}
\item{basePlot}{if \code{TRUE}, use the R base plot version, else use the
plotly framework, which is the default}
\item{col}{Set color for the points. If set, it must be a character vector
of length 2. (1. normal point; 2. outlier point)}
\item{global}{Flag to plot a global Q-Q plot, default FALSE}
\item{samplePoints}{Sample points for Q-Q plot, defaults to max 30k points}
\item{legendPos}{Set legendpos, by default topleft.}
\item{outlierRatio}{The fraction to be used for the outlier sample filtering}
\item{conf.alpha}{If set, a confidence interval is plotted, defaults to 0.05}
\item{xlim, ylim}{The x/y limits for the plot or NULL to use
the full data range}
\item{log}{If TRUE, the default, counts are plotted in log10.}
\item{groups}{A character vector containing either group assignments of
samples or sample IDs. Is empty by default. If group assignments
are given, the vector must have the same length as the number of
samples. If sample IDs are provided the assignment will result
in a binary group assignemt.}
\item{groupColSet}{A color set from RColorBrewer or a manual vector of
colors, which length must match the number of categories
from groups.}
\item{...}{Additional parameters passed to plot() or plot_ly() if not stated
otherwise in the details for each plot function}
\item{normalized}{If TRUE, the normalized counts are used, the default,
otherwise the raw counts}
\item{rowCentered}{If TRUE, the counts are row-wise (gene-wise) centered}
\item{rowGroups, colGroups}{A vector of co-factors (colnames of colData)
for color coding the rows. It also accepts a data.frame of
dim = (#samples, #groups). Must have more than 2 groups.}
\item{rowColSet, colColSet}{A color set from RColorBrewer/colorRampPalette}
\item{nRowCluster, nColCluster}{Number of clusters to show in the row and
column dendrograms. If this argument is set the resulting
cluster assignments are added to the OutriderDataSet.}
\item{nBreaks}{number of breaks for the heatmap color scheme. Default to 50.}
\item{show_names}{character string indicating whether to show 'none', 'row',
'col', or 'both' names on the heatmap axes.}
\item{bcvQuantile}{quantile for choosing the cutoff for the biological
coefficient of variation (BCV)}
\item{nGenes}{upper limit of number of genes (defaults to 500). Subsets the
top n genes based on the BCV.}
\item{yadjust}{Option to adjust position of Median and 90 percentile labels.}
\item{labLine}{Option to move axis labels}
\item{ymax}{If set, ymax is the upper bound for the plot range on the y axis.}
\item{ylab}{The y axis label}
\item{labCex}{The label cex parameter}
\item{bins}{Number of bins used in the histogram. Defaults to 100.}
\item{compareDisp}{If TRUE, the default, and if the autoCorrect normalization
was used it computes the dispersion without autoCorrect and
plots it for comparison.}
}
\value{
If base R graphics are used nothing is returned else the plotly or
the gplot object is returned.
}
\description{
The OUTRIDER package provides mutliple functions to visualize
the data and the results of a full data set analysis.
This is the list of all plotting function provided by OUTRIDER:
\itemize{
\item plotAberrantPerSample()
\item plotVolcano()
\item plotExpressionRank()
\item plotQQ()
\item plotExpectedVsObservedCounts()
\item plotCountCorHeatmap()
\item plotCountGeneSampleHeatmap()
\item plotSizeFactors()
\item plotFPKM()
\item plotExpressedGenes()
\item plotDispEsts()
\item plotPowerAnalysis()
\item plotEncDimSearch()
}
For a detailed description of each plot function please see the details.
Most of the functions share the same parameters.
}
\details{
\code{plotAberrantPerSample}: The number of aberrant events per sample are
plotted sorted by rank. The ... parameters are passed on to the
\code{\link{aberrant}} function.
\code{plotVolcano}: the volcano plot is sample-centric. It plots for a given
sample the negative log10 nominal P-values against the Z-scores for all
genes.
\code{plotExpressionRank}: This function plots for a given gene the
expression level against the expression rank for all samples. This can
be used with normalized and unnormalized expression values.
\code{plotQQ}: the quantile-quantile plot for a given gene or if
\code{global} is set to \code{TRUE} over the full data set. Here the
observed P-values are plotted against the expected ones in the negative
log10 space.
\code{plotExpectedVsObservedCounts}: A scatter plot of the observed counts
against the predicted expression for a given gene.
\code{plotCountCorHeatmap}: The correlation heatmap of the count data
of the full data set. Default the values are log transformed and
row centered. This function returns an OutriderDataSet with annotated
clusters if requested. The ... arguments are passed to the
\code{\link[pheatmap]{pheatmap}} function.
\code{plotCountGeneSampleHeatmap}: A gene x sample heatmap of the raw or
normalized counts. By default they are log transformed and row centered.
Only the top 500 viable genes based on the BCV (biological coefficient
of variation) is used by default.
\code{plotSizeFactors}: The sizefactor distribution within the dataset.
\code{plotFPKM}: The distribution of FPKM values. If the OutriderDataSet
object contains the \code{passedFilter} column, it will plot both FPKM
distributions for the expressed genes and for the filtered genes.
\code{plotExpressedGenes}: A summary statistic plot on the number of genes
expressed within this dataset. It plots the sample rank (based on the
number of expressed genes) against the accumulated statistics up to the
given sample.
\code{plotDispEsts}: Plots the dispersion of the OutriderDataSet
model against the normalized mean count. If autoCorrect is used it will also
estimate the dispersion without normalization for comparison.
\code{plotPowerAnalysis}: The power analysis plot should give the user a
ruff estimate of the events one can be detected with OUTRIDER. Based on
the dispersion of the provided OUTRIDER data set the theoretical P-value
over the mean expression is plotted. This is done for different expression
levels. The curves are smooths to make the reading of the plot easier.
\code{plotEncDimSearch}: Visualization of the hyperparameter optimization.
It plots the encoding dimension against the achieved loss (area under the
precision-recall curve). From this plot the optimum should be choosen for
the \code{q} in fitting process.
}
\examples{
ods <- makeExampleOutriderDataSet(dataset="Kremer")
implementation <- 'autoencoder'
\dontshow{
# reduce the object size to speed up the calculations
ods <- ods[1:400,1:80]
implementation <- 'pca'
}
mcols(ods)$basepairs <- 300 # assign pseudo gene length for filtering
ods <- filterExpression(ods)
ods <- OUTRIDER(ods, implementation=implementation)
plotAberrantPerSample(ods)
plotVolcano(ods, 49)
plotVolcano(ods, 'MUC1365', basePlot=TRUE)
plotExpressionRank(ods, 35)
plotExpressionRank(ods, "NDUFS5", normalized=FALSE,
log=FALSE, main="Over expression outlier", basePlot=TRUE)
plotQQ(ods, 149)
plotQQ(ods, global=TRUE, outlierRatio=0.001)
plotExpectedVsObservedCounts(ods, 149)
plotExpectedVsObservedCounts(ods, "ATAD3C", basePlot=TRUE)
plotExpressedGenes(ods)
sex <- sample(c("female", "male"), dim(ods)[2], replace=TRUE)
colData(ods)$Sex <- sex
ods <- plotCountCorHeatmap(ods, nColCluster=4, normalized=FALSE)
ods <- plotCountCorHeatmap(ods, colGroup="Sex", colColSet="Set1")
table(colData(ods)$clusterNumber_4)
plotCountGeneSampleHeatmap(ods, normalized=FALSE)
plotCountGeneSampleHeatmap(ods, rowGroups="theta",
rowColSet=list(c("white", "darkgreen")))
plotSizeFactors(ods)
mcols(ods)$basepairs <- 1
mcols(ods)$passedFilter <- rowMeans(counts(ods)) > 10
plotFPKM(ods)
plotDispEsts(ods, compareDisp=FALSE)
plotPowerAnalysis(ods)
\dontrun{
# for speed reasons we only search for 5 different dimensions
ods <- findEncodingDim(ods, params=c(3, 10, 20, 35, 50),
implementation=implementation)
plotEncDimSearch(ods)
}
}
| /man/plotFunctions.Rd | permissive | AmrR101/OUTRIDER | R | false | true | 10,890 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/method-plot.R
\docType{methods}
\name{plotVolcano}
\alias{plotVolcano}
\alias{plotFunctions}
\alias{plotQQ}
\alias{plotExpressionRank}
\alias{plotCountCorHeatmap}
\alias{plotAberrantPerSample}
\alias{plotFPKM}
\alias{plotDispEsts}
\alias{plotPowerAnalysis}
\alias{plotExpectedVsObservedCounts}
\alias{plotCountGeneSampleHeatmap}
\alias{plotDispEsts,OutriderDataSet-method}
\alias{plotEncDimSearch}
\alias{plotExpressedGenes}
\alias{plotSizeFactors}
\title{Visualization functions for OUTRIDER}
\usage{
plotVolcano(ods, sampleID, main, padjCutoff = 0.05, zScoreCutoff = 0,
pch = 16, basePlot = FALSE, col = c("gray", "firebrick"))
plotQQ(ods, geneID, main, global = FALSE, padjCutoff = 0.05,
zScoreCutoff = 0, samplePoints = TRUE, legendPos = "topleft",
outlierRatio = 0.001, conf.alpha = 0.05, pch = 16, xlim = NULL,
ylim = NULL, col = NULL)
plotExpectedVsObservedCounts(ods, geneID, main, basePlot = FALSE,
log = TRUE, groups = c(), groupColSet = "Set1", ...)
plotExpressionRank(ods, geneID, main, padjCutoff = 0.05,
zScoreCutoff = 0, normalized = TRUE, basePlot = FALSE,
log = TRUE, col = c("gray", "firebrick"), groups = c(),
groupColSet = "Accent")
plotCountCorHeatmap(ods, normalized = TRUE, rowCentered = TRUE,
rowGroups = NA, rowColSet = NA, colGroups = NA, colColSet = NA,
nRowCluster = 4, nColCluster = 4,
main = "Count correlation heatmap", basePlot = TRUE, nBreaks = 50,
show_names = c("none", "row", "col", "both"), ...)
plotCountGeneSampleHeatmap(ods, normalized = TRUE, rowCentered = TRUE,
rowGroups = NA, rowColSet = NA, colGroups = NA, colColSet = NA,
nRowCluster = 4, nColCluster = 4,
main = "Count Gene vs Sample Heatmap", bcvQuantile = 0.9,
show_names = c("none", "col", "row", "both"), nGenes = 500,
nBreaks = 50, ...)
plotAberrantPerSample(ods, main, padjCutoff = 0.05, zScoreCutoff = 0,
outlierRatio = 0.001, col = brewer.pal(3, "Dark2")[c(1, 2)],
yadjust = c(1.2, 1.2), labLine = c(3.5, 3), ymax = NULL,
ylab = "#Aberrantly expressed genes", labCex = par()$cex, ...)
plotFPKM(ods, bins = 100)
\S4method{plotDispEsts}{OutriderDataSet}(object, compareDisp, xlim, ylim,
main = "Dispersion estimates versus mean expression", ...)
plotPowerAnalysis(ods)
plotEncDimSearch(ods)
plotExpressedGenes(ods, main = "Statistics of expressed genes")
plotSizeFactors(ods, basePlot = TRUE)
}
\arguments{
\item{ods, object}{An OutriderDataSet object.}
\item{sampleID, geneID}{A sample or gene ID, which should be plotted.
Can also be a vector. Integers are treated as indices.}
\item{main}{Title for the plot, if missing a default title will be used.}
\item{padjCutoff, zScoreCutoff}{Significance or Z-score cutoff
to mark outliers}
\item{pch}{Integer or character to be used for plotting the points}
\item{basePlot}{if \code{TRUE}, use the R base plot version, else use the
plotly framework, which is the default}
\item{col}{Set color for the points. If set, it must be a character vector
of length 2. (1. normal point; 2. outlier point)}
\item{global}{Flag to plot a global Q-Q plot, default FALSE}
\item{samplePoints}{Sample points for Q-Q plot, defaults to max 30k points}
\item{legendPos}{Set legendpos, by default topleft.}
\item{outlierRatio}{The fraction to be used for the outlier sample filtering}
\item{conf.alpha}{If set, a confidence interval is plotted, defaults to 0.05}
\item{xlim, ylim}{The x/y limits for the plot or NULL to use
the full data range}
\item{log}{If TRUE, the default, counts are plotted in log10.}
\item{groups}{A character vector containing either group assignments of
samples or sample IDs. Is empty by default. If group assignments
are given, the vector must have the same length as the number of
samples. If sample IDs are provided the assignment will result
in a binary group assignemt.}
\item{groupColSet}{A color set from RColorBrewer or a manual vector of
colors, which length must match the number of categories
from groups.}
\item{...}{Additional parameters passed to plot() or plot_ly() if not stated
otherwise in the details for each plot function}
\item{normalized}{If TRUE, the normalized counts are used, the default,
otherwise the raw counts}
\item{rowCentered}{If TRUE, the counts are row-wise (gene-wise) centered}
\item{rowGroups, colGroups}{A vector of co-factors (colnames of colData)
for color coding the rows. It also accepts a data.frame of
dim = (#samples, #groups). Must have more than 2 groups.}
\item{rowColSet, colColSet}{A color set from RColorBrewer/colorRampPalette}
\item{nRowCluster, nColCluster}{Number of clusters to show in the row and
column dendrograms. If this argument is set the resulting
cluster assignments are added to the OutriderDataSet.}
\item{nBreaks}{number of breaks for the heatmap color scheme. Default to 50.}
\item{show_names}{character string indicating whether to show 'none', 'row',
'col', or 'both' names on the heatmap axes.}
\item{bcvQuantile}{quantile for choosing the cutoff for the biological
coefficient of variation (BCV)}
\item{nGenes}{upper limit of number of genes (defaults to 500). Subsets the
top n genes based on the BCV.}
\item{yadjust}{Option to adjust position of Median and 90 percentile labels.}
\item{labLine}{Option to move axis labels}
\item{ymax}{If set, ymax is the upper bound for the plot range on the y axis.}
\item{ylab}{The y axis label}
\item{labCex}{The label cex parameter}
\item{bins}{Number of bins used in the histogram. Defaults to 100.}
\item{compareDisp}{If TRUE, the default, and if the autoCorrect normalization
was used it computes the dispersion without autoCorrect and
plots it for comparison.}
}
\value{
If base R graphics are used nothing is returned else the plotly or
the gplot object is returned.
}
\description{
The OUTRIDER package provides mutliple functions to visualize
the data and the results of a full data set analysis.
This is the list of all plotting function provided by OUTRIDER:
\itemize{
\item plotAberrantPerSample()
\item plotVolcano()
\item plotExpressionRank()
\item plotQQ()
\item plotExpectedVsObservedCounts()
\item plotCountCorHeatmap()
\item plotCountGeneSampleHeatmap()
\item plotSizeFactors()
\item plotFPKM()
\item plotExpressedGenes()
\item plotDispEsts()
\item plotPowerAnalysis()
\item plotEncDimSearch()
}
For a detailed description of each plot function please see the details.
Most of the functions share the same parameters.
}
\details{
\code{plotAberrantPerSample}: The number of aberrant events per sample are
plotted sorted by rank. The ... parameters are passed on to the
\code{\link{aberrant}} function.
\code{plotVolcano}: the volcano plot is sample-centric. It plots for a given
sample the negative log10 nominal P-values against the Z-scores for all
genes.
\code{plotExpressionRank}: This function plots for a given gene the
expression level against the expression rank for all samples. This can
be used with normalized and unnormalized expression values.
\code{plotQQ}: the quantile-quantile plot for a given gene or if
\code{global} is set to \code{TRUE} over the full data set. Here the
observed P-values are plotted against the expected ones in the negative
log10 space.
\code{plotExpectedVsObservedCounts}: A scatter plot of the observed counts
against the predicted expression for a given gene.
\code{plotCountCorHeatmap}: The correlation heatmap of the count data
of the full data set. Default the values are log transformed and
row centered. This function returns an OutriderDataSet with annotated
clusters if requested. The ... arguments are passed to the
\code{\link[pheatmap]{pheatmap}} function.
\code{plotCountGeneSampleHeatmap}: A gene x sample heatmap of the raw or
normalized counts. By default they are log transformed and row centered.
Only the top 500 viable genes based on the BCV (biological coefficient
of variation) is used by default.
\code{plotSizeFactors}: The sizefactor distribution within the dataset.
\code{plotFPKM}: The distribution of FPKM values. If the OutriderDataSet
object contains the \code{passedFilter} column, it will plot both FPKM
distributions for the expressed genes and for the filtered genes.
\code{plotExpressedGenes}: A summary statistic plot on the number of genes
expressed within this dataset. It plots the sample rank (based on the
number of expressed genes) against the accumulated statistics up to the
given sample.
\code{plotDispEsts}: Plots the dispersion of the OutriderDataSet
model against the normalized mean count. If autoCorrect is used it will also
estimate the dispersion without normalization for comparison.
\code{plotPowerAnalysis}: The power analysis plot should give the user a
ruff estimate of the events one can be detected with OUTRIDER. Based on
the dispersion of the provided OUTRIDER data set the theoretical P-value
over the mean expression is plotted. This is done for different expression
levels. The curves are smooths to make the reading of the plot easier.
\code{plotEncDimSearch}: Visualization of the hyperparameter optimization.
It plots the encoding dimension against the achieved loss (area under the
precision-recall curve). From this plot the optimum should be choosen for
the \code{q} in fitting process.
}
\examples{
ods <- makeExampleOutriderDataSet(dataset="Kremer")
implementation <- 'autoencoder'
\dontshow{
# reduce the object size to speed up the calculations
ods <- ods[1:400,1:80]
implementation <- 'pca'
}
mcols(ods)$basepairs <- 300 # assign pseudo gene length for filtering
ods <- filterExpression(ods)
ods <- OUTRIDER(ods, implementation=implementation)
plotAberrantPerSample(ods)
plotVolcano(ods, 49)
plotVolcano(ods, 'MUC1365', basePlot=TRUE)
plotExpressionRank(ods, 35)
plotExpressionRank(ods, "NDUFS5", normalized=FALSE,
log=FALSE, main="Over expression outlier", basePlot=TRUE)
plotQQ(ods, 149)
plotQQ(ods, global=TRUE, outlierRatio=0.001)
plotExpectedVsObservedCounts(ods, 149)
plotExpectedVsObservedCounts(ods, "ATAD3C", basePlot=TRUE)
plotExpressedGenes(ods)
sex <- sample(c("female", "male"), dim(ods)[2], replace=TRUE)
colData(ods)$Sex <- sex
ods <- plotCountCorHeatmap(ods, nColCluster=4, normalized=FALSE)
ods <- plotCountCorHeatmap(ods, colGroup="Sex", colColSet="Set1")
table(colData(ods)$clusterNumber_4)
plotCountGeneSampleHeatmap(ods, normalized=FALSE)
plotCountGeneSampleHeatmap(ods, rowGroups="theta",
rowColSet=list(c("white", "darkgreen")))
plotSizeFactors(ods)
mcols(ods)$basepairs <- 1
mcols(ods)$passedFilter <- rowMeans(counts(ods)) > 10
plotFPKM(ods)
plotDispEsts(ods, compareDisp=FALSE)
plotPowerAnalysis(ods)
\dontrun{
# for speed reasons we only search for 5 different dimensions
ods <- findEncodingDim(ods, params=c(3, 10, 20, 35, 50),
implementation=implementation)
plotEncDimSearch(ods)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggts_corona.R
\name{ggts_cum_daily}
\alias{ggts_cum_daily}
\title{Cumulative and daily data trend plot}
\usage{
ggts_cum_daily(
data,
y_cum = .data$Cases,
y_daily = .data$Daily_Cases,
mean_daily = .data$Mean_Daily_Cases,
country,
span = 7,
weeks = 12,
...
)
}
\arguments{
\item{data}{A data frame}
\item{y_cum}{Unquoted df data-variable of the cumulative cases}
\item{y_daily}{Unquoted df data-variable of the daily cases}
\item{mean_daily}{Unquoted df data-variable of the daily rolling mean data}
\item{country}{Unquoted df data-variable of the countries}
\item{span}{Numeric, span used for rolling mean calculation}
\item{weeks}{Numeric, number of time range weeks weeks for the daily data,
dates are provided in column \code{default = Date}}
\item{...}{Other arguments passed on to \code{\link[=ggts_trend_facet]{ggts_trend_facet()}}.
Unquoted df data-variable to specify the time index variable (\code{default: DATE}).}
}
\value{
plot object of mode \code{plot}
}
\description{
Provide \strong{trend plot} for \emph{cumulative} and \emph{daily} cases with
facets of \code{vars_1 = Case_Type}
}
\examples{
# Corona data of "Germany")
ggts_cum_daily(corona_data, country = "Germany", weeks = 6)
}
\seealso{
\link{ggts_trend_facet}, \link{ggts_conf_deaths_facet} and \link[ggplot2:ggplot]{ggplot2::ggplot}
}
| /man/ggts_cum_daily.Rd | no_license | WoVollmer/pkgTS | R | false | true | 1,411 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggts_corona.R
\name{ggts_cum_daily}
\alias{ggts_cum_daily}
\title{Cumulative and daily data trend plot}
\usage{
ggts_cum_daily(
data,
y_cum = .data$Cases,
y_daily = .data$Daily_Cases,
mean_daily = .data$Mean_Daily_Cases,
country,
span = 7,
weeks = 12,
...
)
}
\arguments{
\item{data}{A data frame}
\item{y_cum}{Unquoted df data-variable of the cumulative cases}
\item{y_daily}{Unquoted df data-variable of the daily cases}
\item{mean_daily}{Unquoted df data-variable of the daily rolling mean data}
\item{country}{Unquoted df data-variable of the countries}
\item{span}{Numeric, span used for rolling mean calculation}
\item{weeks}{Numeric, number of time range weeks weeks for the daily data,
dates are provided in column \code{default = Date}}
\item{...}{Other arguments passed on to \code{\link[=ggts_trend_facet]{ggts_trend_facet()}}.
Unquoted df data-variable to specify the time index variable (\code{default: DATE}).}
}
\value{
plot object of mode \code{plot}
}
\description{
Provide \strong{trend plot} for \emph{cumulative} and \emph{daily} cases with
facets of \code{vars_1 = Case_Type}
}
\examples{
# Corona data of "Germany")
ggts_cum_daily(corona_data, country = "Germany", weeks = 6)
}
\seealso{
\link{ggts_trend_facet}, \link{ggts_conf_deaths_facet} and \link[ggplot2:ggplot]{ggplot2::ggplot}
}
|
#Esta funcion acepta un array A de dimensiones pxkxn (n de landmarks, dimensiones, por ahora 2, y n de especimenes)
#Los landmarks de cada configuración deben estar en el siguiente orden: sagitales, izquierdos y derechos (o der e izq, eso es indiferente)
#Lee del dir de trabajo un archivo (por defecto llamado "pairs.txt") que es una lista de los pares de landmarks
#ctr puede tomar los valores "gmedian"(mediana espacial", "median" (mediana cac) y "mean" (media))
#opciones referencias: "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right" and "center"
object.symm<-function(A,ctr="gmedian",prs.file="pairs.txt",proj.met="msum",legend.loc="topleft"){
library(Gmedian)
pares<-read.table(prs.file,sep=" ",header=FALSE)
ns<-length(A[,1,1])-(length(pares[,1])*2)#numero de puntos sagitales
np<-length(A[,1,1])-ns #numero de puntos pareados
n<-dim(A)[3]
#--------------------------centrado----------------
Ac<-center(A,cent=ctr)
#lee la base y crea matrices para sagitales y para cada lado, izquierdo y derecho (en el txt están ordenados así)
m0<-Ac[1:ns,,,drop=FALSE]
m1<-Ac[(ns+1):((ns+1)+(np/2)-1),,,drop=FALSE]
m2<-Ac[((ns+1)+(np/2)):((ns)+(np)),,,drop=FALSE]
#puntos sagitales
pares.sag<-t(combn(1:ns,2))# aca hace la combinatoria del n de orden de los sagitales tomados de a 2
vs<-m0[pares.sag[,1],,,drop=FALSE]-m0[pares.sag[,2],,,drop=FALSE]#obtiene los vectores de direcciones sagitales para los pares.sag
mod<-as.matrix(sqrt((vs[,1,]^2)+(vs[,2,]^2)))#norma
N<-aperm(array(mod,c(dim(pares.sag)[1],n,2)),c(1,3,2))#crea array con la norma repetida, para operar con el array de vectores (abajo)
vsn<-vs/N#vectores sagitales normalizados
#puntos pareados
vp<-m1[pares[,1]-ns,,,drop=FALSE]-m2[pares[,2]-ns-(np/2),,,drop=FALSE]
# proyecta y ordena las direcciones sagitales por proyeccion
P<-array(0,dim = c(dim(vs)[1],(np/2),n))#crea un array para guardar las proyecciones
#Abajo, proyecta sacando producto punto entra la matriz de vectores sagitales normalizados y
#la de vectores pareados para cada "feta" del array correspondiente (cada configuracion)
for(i in 1:n){
P[,,i]<-(vsn[,,i]%*%t(vp[,,i]))
}
P<-abs(P)
p.sum<-apply(P,c(1,3), sum)#suma de proyecciones. Cada columna es un especimen. Adentro, cada fila es la suma de proyecciones de los pareados
p.median<-apply(P,c(1,3),median)#mediana de proyecciones
# indices del vector sagital con suma o mediana de proyecciones minima para cada configuracion
switch(proj.met,
msum={i.min<-apply(p.sum,c(2),which.min)},
mmedian={i.min<-apply(p.median,c(2),which.min)}
)
vr<-NULL #crea matriz para vectores sagitales unitarios para reflexion (que luego se completa abajo)
#Selecciona los vectores normalizados que tuvieron proyeccion minima y los copia a la matriz vr
for(j in 1:n){
vaux<-vsn[i.min[j],,j]
vr<-rbind(vr,vaux)
}
#--------------------------reflexion----------------
#transforma al vector en ortogonal al eje de reflexion (es decir al vector que unia los 2 puntos)
e<-matrix(0,n,2)
e[,2]<-vr[,1]
e[,1]<- -(vr[,2])
#calcula y aplica la matriz de Householder
Ur<-array(0,dim(A))
for(i in 1:n){
R<-diag(2) - 2*e[i,]%*%t(e[i,])
Ur[,,i] <- Ac[,,i]%*%R
}
#reetiquetado
Ure<-Ur #hace un duplicado de la matriz rotada, y el for de abajo cambia los izquierdos por los derechos y viceversa
for (j in 1:(np/2)) {
Ure[pares[j, 1],,] <- Ur[pares[j,2],,]
Ure[pares[j, 2],,] <- Ur[pares[j,1],,]
}
T<-(Ac-Ure)/2 #saca la diferencia entre original y reflejada y lo divide por 2 (lo que queda,T, es el residuo entre original y simétrica)
#saca contrib % de cada punto
distances<-dist.contrib(mc=Ac,mre=Ure)
#plots
plot.result(mc=Ac,mre=Ure,mt=T,nconf=n,object=TRUE,legloc=legend.loc)
#devuelve T
return(list(distances,T))
}
| /object_symm.R | no_license | linxs/Robust | R | false | false | 3,929 | r | #Esta funcion acepta un array A de dimensiones pxkxn (n de landmarks, dimensiones, por ahora 2, y n de especimenes)
#Los landmarks de cada configuración deben estar en el siguiente orden: sagitales, izquierdos y derechos (o der e izq, eso es indiferente)
#Lee del dir de trabajo un archivo (por defecto llamado "pairs.txt") que es una lista de los pares de landmarks
#ctr puede tomar los valores "gmedian"(mediana espacial", "median" (mediana cac) y "mean" (media))
#opciones referencias: "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right" and "center"
object.symm<-function(A,ctr="gmedian",prs.file="pairs.txt",proj.met="msum",legend.loc="topleft"){
library(Gmedian)
pares<-read.table(prs.file,sep=" ",header=FALSE)
ns<-length(A[,1,1])-(length(pares[,1])*2)#numero de puntos sagitales
np<-length(A[,1,1])-ns #numero de puntos pareados
n<-dim(A)[3]
#--------------------------centrado----------------
Ac<-center(A,cent=ctr)
#lee la base y crea matrices para sagitales y para cada lado, izquierdo y derecho (en el txt están ordenados así)
m0<-Ac[1:ns,,,drop=FALSE]
m1<-Ac[(ns+1):((ns+1)+(np/2)-1),,,drop=FALSE]
m2<-Ac[((ns+1)+(np/2)):((ns)+(np)),,,drop=FALSE]
#puntos sagitales
pares.sag<-t(combn(1:ns,2))# aca hace la combinatoria del n de orden de los sagitales tomados de a 2
vs<-m0[pares.sag[,1],,,drop=FALSE]-m0[pares.sag[,2],,,drop=FALSE]#obtiene los vectores de direcciones sagitales para los pares.sag
mod<-as.matrix(sqrt((vs[,1,]^2)+(vs[,2,]^2)))#norma
N<-aperm(array(mod,c(dim(pares.sag)[1],n,2)),c(1,3,2))#crea array con la norma repetida, para operar con el array de vectores (abajo)
vsn<-vs/N#vectores sagitales normalizados
#puntos pareados
vp<-m1[pares[,1]-ns,,,drop=FALSE]-m2[pares[,2]-ns-(np/2),,,drop=FALSE]
# proyecta y ordena las direcciones sagitales por proyeccion
P<-array(0,dim = c(dim(vs)[1],(np/2),n))#crea un array para guardar las proyecciones
#Abajo, proyecta sacando producto punto entra la matriz de vectores sagitales normalizados y
#la de vectores pareados para cada "feta" del array correspondiente (cada configuracion)
for(i in 1:n){
P[,,i]<-(vsn[,,i]%*%t(vp[,,i]))
}
P<-abs(P)
p.sum<-apply(P,c(1,3), sum)#suma de proyecciones. Cada columna es un especimen. Adentro, cada fila es la suma de proyecciones de los pareados
p.median<-apply(P,c(1,3),median)#mediana de proyecciones
# indices del vector sagital con suma o mediana de proyecciones minima para cada configuracion
switch(proj.met,
msum={i.min<-apply(p.sum,c(2),which.min)},
mmedian={i.min<-apply(p.median,c(2),which.min)}
)
vr<-NULL #crea matriz para vectores sagitales unitarios para reflexion (que luego se completa abajo)
#Selecciona los vectores normalizados que tuvieron proyeccion minima y los copia a la matriz vr
for(j in 1:n){
vaux<-vsn[i.min[j],,j]
vr<-rbind(vr,vaux)
}
#--------------------------reflexion----------------
#transforma al vector en ortogonal al eje de reflexion (es decir al vector que unia los 2 puntos)
e<-matrix(0,n,2)
e[,2]<-vr[,1]
e[,1]<- -(vr[,2])
#calcula y aplica la matriz de Householder
Ur<-array(0,dim(A))
for(i in 1:n){
R<-diag(2) - 2*e[i,]%*%t(e[i,])
Ur[,,i] <- Ac[,,i]%*%R
}
#reetiquetado
Ure<-Ur #hace un duplicado de la matriz rotada, y el for de abajo cambia los izquierdos por los derechos y viceversa
for (j in 1:(np/2)) {
Ure[pares[j, 1],,] <- Ur[pares[j,2],,]
Ure[pares[j, 2],,] <- Ur[pares[j,1],,]
}
T<-(Ac-Ure)/2 #saca la diferencia entre original y reflejada y lo divide por 2 (lo que queda,T, es el residuo entre original y simétrica)
#saca contrib % de cada punto
distances<-dist.contrib(mc=Ac,mre=Ure)
#plots
plot.result(mc=Ac,mre=Ure,mt=T,nconf=n,object=TRUE,legloc=legend.loc)
#devuelve T
return(list(distances,T))
}
|
BM_DIR = paste0(here::here(), "/eq1/")
BATCHTOOLS_DIR = paste0(BM_DIR, "batchtools")
if (! dir.exists(BATCHTOOLS_DIR)) {
suppressMessages(library(data.table))
suppressMessages(library(R6))
suppressMessages(library(mlr3))
suppressMessages(library(mlr3tuning))
suppressMessages(library(mlrintermbo))
suppressMessages(library(mlr3learners))
suppressMessages(library(mlr3extralearners))
suppressMessages(library(mlr3pipelines))
suppressMessages(library(paradox))
source(paste0(BM_DIR, "classifCompboost.R"))
source(paste0(BM_DIR, "helper.R"))
source(paste0(BM_DIR, "setup.R"))
}
## Batchtools
## ===========================================================
library(batchtools)
if (FALSE) unlink(BATCHTOOLS_DIR, recursive = TRUE)
if (dir.exists(BATCHTOOLS_DIR)) {
loadRegistry(BATCHTOOLS_DIR, writeable = TRUE, work.dir = BM_DIR)
#loadRegistry(BATCHTOOLS_DIR, work.dir = BM_DIR)
jt = getJobTable()
ids_resubmit = jt$job.id[unlist(jt$algo.pars) == "bin_cwb_b"]
ids_resubmit = intersect(ids_resubmit, jt$job.id[grepl("spam", jt$problem)])
ids_resubmit = c(ids_resubmit, 61)
#hcwb_resubmit = jt$job.id[unlist(jt$algo.pars) == "acc_hcwb"]
#hcwb_resubmit = intersect(seq_len(75L), hcwb_resubmit)
not_done = setdiff(seq_len(75L), findDone()$job.id)
not_done = unique(c(not_done, ids_resubmit))
submitJobs(116:150)
} else {
reg = makeExperimentRegistry(
file.dir = BATCHTOOLS_DIR,
packages = c("data.table", "R6", "mlr3", "mlr3learners", "mlr3extralearners",
"mlr3pipelines", "mlr3tuning", "compboost", "paradox"),
source = c("helper.R", "classifCompboost.R", "setup.R"),
seed = 31415)
#reg = getDefaultRegistry()
# reg$cluster.functions = makeClusterFunctionsSSH(workers = list(
# Worker$new("localhost", ncpus = 1L), # 192.168.9.131
# #Worker$new("192.168.9.132", ncpus = 1L),
# Worker$new("192.168.9.133", ncpus = 1L)))
reg$default.resources = list(
#walltime = 3600L * 2,
#memory = 1024L * 16L,
max.concurrent.jobs = 1L,
ntasks = 1L,
ncpus = 1L,
nodes = 1L
)
saveRegistry(reg)
source(paste0(BM_DIR, "add-experiments.R"))
}
if (FALSE) {
# cpuserver3:
submitJobs(findNotDone()[1:75,])
# cpuserver5:
submitJobs(findNotDone()[76,150])
}
### Code for testing:
if (FALSE) {
BM_DIR = paste0(here::here(), "/eq1/")
BATCHTOOLS_DIR = paste0(BM_DIR, "batchtools")
suppressMessages(library(data.table))
suppressMessages(library(R6))
suppressMessages(library(mlr3))
suppressMessages(library(mlr3tuning))
suppressMessages(library(mlrintermbo))
suppressMessages(library(mlr3learners))
suppressMessages(library(mlr3extralearners))
suppressMessages(library(mlr3pipelines))
suppressMessages(library(paradox))
source(paste0(BM_DIR, "classifCompboost.R"))
source(paste0(BM_DIR, "helper.R"))
source(paste0(BM_DIR, "setup.R"))
tl = constructLearner("bin_cwb_nb", ncores = 30L, test_mode = TRUE, raw_learner = FALSE)
tl$train(TASKS[[2]])
tasks = list(train = TASKS[[2]], test = TASKS[[2]])
cbt = getCboostMsrsTrace(tl, tasks, SCORE_MEASURES, iters = c(10, 20, 90, 100, 200))
p1 = tl$predict(TASKS[[5]])
library(compboost)
cboost = boostSplines(data = TASKS[[5]]$data(), target = TASKS[[5]]$target_names,
loss = LossBinomial$new(), iterations = 100L)
lcboost = lrn("classif.compboost", mstop = 50L)
lcboost$train(TASKS[[5]])
tl$setToIteration(1)
p2 = tl$predict(design$task[[1]])
p1$score(msr("classif.auc"))
p2$score(msr("classif.auc"))
clog = tl$model$cboost$getLoggerData()
tl$model$cboost_restart$getLoggerData()
library(compboost)
library(mlr3)
library(mlr3oml)
ts = tsk("oml", task_id = 359994)
cboost = boostSplines(data = ts$data(), target = ts$target_names, loss = LossBinomial$new(), iterations = 125L)
l = lrn("classif.compboost")
l$train(ts)
}
| /eq1/benchmark.R | no_license | schalkdaniel/cacb-batchtools | R | false | false | 3,808 | r | BM_DIR = paste0(here::here(), "/eq1/")
BATCHTOOLS_DIR = paste0(BM_DIR, "batchtools")
if (! dir.exists(BATCHTOOLS_DIR)) {
suppressMessages(library(data.table))
suppressMessages(library(R6))
suppressMessages(library(mlr3))
suppressMessages(library(mlr3tuning))
suppressMessages(library(mlrintermbo))
suppressMessages(library(mlr3learners))
suppressMessages(library(mlr3extralearners))
suppressMessages(library(mlr3pipelines))
suppressMessages(library(paradox))
source(paste0(BM_DIR, "classifCompboost.R"))
source(paste0(BM_DIR, "helper.R"))
source(paste0(BM_DIR, "setup.R"))
}
## Batchtools
## ===========================================================
library(batchtools)
if (FALSE) unlink(BATCHTOOLS_DIR, recursive = TRUE)
if (dir.exists(BATCHTOOLS_DIR)) {
loadRegistry(BATCHTOOLS_DIR, writeable = TRUE, work.dir = BM_DIR)
#loadRegistry(BATCHTOOLS_DIR, work.dir = BM_DIR)
jt = getJobTable()
ids_resubmit = jt$job.id[unlist(jt$algo.pars) == "bin_cwb_b"]
ids_resubmit = intersect(ids_resubmit, jt$job.id[grepl("spam", jt$problem)])
ids_resubmit = c(ids_resubmit, 61)
#hcwb_resubmit = jt$job.id[unlist(jt$algo.pars) == "acc_hcwb"]
#hcwb_resubmit = intersect(seq_len(75L), hcwb_resubmit)
not_done = setdiff(seq_len(75L), findDone()$job.id)
not_done = unique(c(not_done, ids_resubmit))
submitJobs(116:150)
} else {
reg = makeExperimentRegistry(
file.dir = BATCHTOOLS_DIR,
packages = c("data.table", "R6", "mlr3", "mlr3learners", "mlr3extralearners",
"mlr3pipelines", "mlr3tuning", "compboost", "paradox"),
source = c("helper.R", "classifCompboost.R", "setup.R"),
seed = 31415)
#reg = getDefaultRegistry()
# reg$cluster.functions = makeClusterFunctionsSSH(workers = list(
# Worker$new("localhost", ncpus = 1L), # 192.168.9.131
# #Worker$new("192.168.9.132", ncpus = 1L),
# Worker$new("192.168.9.133", ncpus = 1L)))
reg$default.resources = list(
#walltime = 3600L * 2,
#memory = 1024L * 16L,
max.concurrent.jobs = 1L,
ntasks = 1L,
ncpus = 1L,
nodes = 1L
)
saveRegistry(reg)
source(paste0(BM_DIR, "add-experiments.R"))
}
if (FALSE) {
# cpuserver3:
submitJobs(findNotDone()[1:75,])
# cpuserver5:
submitJobs(findNotDone()[76,150])
}
### Code for testing:
if (FALSE) {
BM_DIR = paste0(here::here(), "/eq1/")
BATCHTOOLS_DIR = paste0(BM_DIR, "batchtools")
suppressMessages(library(data.table))
suppressMessages(library(R6))
suppressMessages(library(mlr3))
suppressMessages(library(mlr3tuning))
suppressMessages(library(mlrintermbo))
suppressMessages(library(mlr3learners))
suppressMessages(library(mlr3extralearners))
suppressMessages(library(mlr3pipelines))
suppressMessages(library(paradox))
source(paste0(BM_DIR, "classifCompboost.R"))
source(paste0(BM_DIR, "helper.R"))
source(paste0(BM_DIR, "setup.R"))
tl = constructLearner("bin_cwb_nb", ncores = 30L, test_mode = TRUE, raw_learner = FALSE)
tl$train(TASKS[[2]])
tasks = list(train = TASKS[[2]], test = TASKS[[2]])
cbt = getCboostMsrsTrace(tl, tasks, SCORE_MEASURES, iters = c(10, 20, 90, 100, 200))
p1 = tl$predict(TASKS[[5]])
library(compboost)
cboost = boostSplines(data = TASKS[[5]]$data(), target = TASKS[[5]]$target_names,
loss = LossBinomial$new(), iterations = 100L)
lcboost = lrn("classif.compboost", mstop = 50L)
lcboost$train(TASKS[[5]])
tl$setToIteration(1)
p2 = tl$predict(design$task[[1]])
p1$score(msr("classif.auc"))
p2$score(msr("classif.auc"))
clog = tl$model$cboost$getLoggerData()
tl$model$cboost_restart$getLoggerData()
library(compboost)
library(mlr3)
library(mlr3oml)
ts = tsk("oml", task_id = 359994)
cboost = boostSplines(data = ts$data(), target = ts$target_names, loss = LossBinomial$new(), iterations = 125L)
l = lrn("classif.compboost")
l$train(ts)
}
|
\name{mri}
\alias{mri}
\docType{data}
\title{Intensities of MRI images}
\description{
Felipe et al. (2005) obtained intensities of MRI images of 9 different parts of the human
body (plus a group consisting of all remaining body regions, which was of course very
heterogeneous). They then transformed their data to univariate curves. }
\usage{data("plane")}
\format{
A list of arrays corresponding to each bodypart. For each bodypart, a three-dimensional \eqn{t = 99} by \eqn{n} by \eqn{p = 1} array is available. The index \eqn{t} corresponds to the different points of measurement, the index \eqn{n} to the different observations.
}
\details{
When using this data set please cite both Felipe et al. (2005) and Hubert et al. (2017).
}
\source{
Felipe J.C., Traina A.J.M., Traina C. (2005). Global warp metric distance: boosting
content-based image retrieval through histograms. Proceedings of the Seventh IEEE
International Symposium on Multimedia (ISM05), p.8.
Chen, Y., Keogh, E., Hu, B., Begum, N., Bagnall, A., Mueen, A., Batista, G.J. (2015). The
UCR Time Series Classification Archive. [http://www.cs.ucr.edu/~eamonn/time_series_data]
}
\references{
Hubert M., Rousseeuw P.J., Segaert P. (2017). Multivariate and functional classification using depth and distance. \emph{Advances in Data Analysis and Classification}, 11(3), 445-466.
}
\examples{
data(mri)
par(mfrow = c(2,1))
matplot(y = mri$bodypart1[,,1],
type ="l",col = "black", lty = 1, xlab = "", ylab="x-coordinate", main = "plane 1")
matplot(y = mri$bodypart2[,,1],
type ="l",col = "black", lty = 1, xlab = "", ylab="x-coordinate", main = "plane 2")
par(mfrow = c(1,1))
}
\keyword{datasets}
| /man/mri.Rd | no_license | PSegaert/mrfDepth | R | false | false | 1,675 | rd | \name{mri}
\alias{mri}
\docType{data}
\title{Intensities of MRI images}
\description{
Felipe et al. (2005) obtained intensities of MRI images of 9 different parts of the human
body (plus a group consisting of all remaining body regions, which was of course very
heterogeneous). They then transformed their data to univariate curves. }
\usage{data("plane")}
\format{
A list of arrays corresponding to each bodypart. For each bodypart, a three-dimensional \eqn{t = 99} by \eqn{n} by \eqn{p = 1} array is available. The index \eqn{t} corresponds to the different points of measurement, the index \eqn{n} to the different observations.
}
\details{
When using this data set please cite both Felipe et al. (2005) and Hubert et al. (2017).
}
\source{
Felipe J.C., Traina A.J.M., Traina C. (2005). Global warp metric distance: boosting
content-based image retrieval through histograms. Proceedings of the Seventh IEEE
International Symposium on Multimedia (ISM05), p.8.
Chen, Y., Keogh, E., Hu, B., Begum, N., Bagnall, A., Mueen, A., Batista, G.J. (2015). The
UCR Time Series Classification Archive. [http://www.cs.ucr.edu/~eamonn/time_series_data]
}
\references{
Hubert M., Rousseeuw P.J., Segaert P. (2017). Multivariate and functional classification using depth and distance. \emph{Advances in Data Analysis and Classification}, 11(3), 445-466.
}
\examples{
data(mri)
par(mfrow = c(2,1))
matplot(y = mri$bodypart1[,,1],
type ="l",col = "black", lty = 1, xlab = "", ylab="x-coordinate", main = "plane 1")
matplot(y = mri$bodypart2[,,1],
type ="l",col = "black", lty = 1, xlab = "", ylab="x-coordinate", main = "plane 2")
par(mfrow = c(1,1))
}
\keyword{datasets}
|
#' A helper function to create workflow passed to the ml_tune() function
#'
#' wflw_creater() is a function that creates workflow
#'
#' @param model_spec A model specification created with the Parsnip or Modeltime package
#' @param ml_recipe Recipe for the models
#' @param resamples_kfold Resamples used for tuning parameters
#' @param grid_size The size of the grid of parameters
#'
wflw_creator <- function(model_spec, ml_recipe, resamples_kfold, grid_size = grid_size, parallel_type, learn_rate, min_n, tree_depth, loss_reduction) {
return_list <- list()
engine <- model_spec$engine
wflw <- workflow() %>%
add_model(model_spec) %>%
add_recipe(ml_recipe)
if(engine %in% c("lightgbm")) {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw) %>%
update(
sample_size = sample_prop(range = c(0, 1)),
learn_rate = if (is.null(learn_rate)) learn_rate(range = c(-10, -1), trans = log10_trans()) else learn_rate(range = c(learn_rate[1], learn_rate[2]), trans = log10_trans()),
min_n = if (is.null(min_n)) min_n(range = c(2L, 40L), trans = NULL) else min_n(range = c(min_n[1], min_n[2]), trans = NULL),
tree_depth = if (is.null(tree_depth)) tree_depth(range = c(1L, 15L), trans = NULL) else tree_depth(range = c(tree_depth[1], tree_depth[2]), trans = NULL),
loss_reduction = if (is.null(loss_reduction)) loss_reduction(range = c(-10, 1.5), trans = log10_trans()) else loss_reduction(range = c(loss_reduction[1], loss_reduction[2]), trans = log10_trans())
),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
} else if (engine %in% "catboost") {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw) %>%
update(
sample_size = sample_prop(range = c(0, 1)),
learn_rate = if (is.null(learn_rate)) learn_rate(range = c(-10, -1), trans = log10_trans()) else learn_rate(range = c(learn_rate[1], learn_rate[2]), trans = log10_trans()),
min_n = if (is.null(min_n)) min_n(range = c(2L, 40L), trans = NULL) else min_n(range = c(min_n[1], min_n[2]), trans = NULL),
tree_depth = if (is.null(tree_depth)) tree_depth(range = c(1L, 15L), trans = NULL) else tree_depth(range = c(tree_depth[1], tree_depth[2]), trans = NULL)
),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
} else if (engine %in% "ranger") {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw) %>%
update(
min_n = if (is.null(min_n)) min_n(range = c(2L, 40L), trans = NULL) else min_n(range = c(min_n[1], min_n[2]), trans = NULL)
),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
} else if (engine %in% c("prophet_xgboost", "xgboost")) {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw) %>%
update(
sample_size = sample_prop(range = c(0, 1)),
learn_rate = if (is.null(learn_rate)) learn_rate(range = c(-10, -1), trans = log10_trans()) else learn_rate(range = c(learn_rate[1], learn_rate[2]), trans = log10_trans()),
min_n = if (is.null(min_n)) min_n(range = c(2L, 40L), trans = NULL) else min_n(range = c(min_n[1], min_n[2]), trans = NULL),
tree_depth = if (is.null(tree_depth)) tree_depth(range = c(1L, 15L), trans = NULL) else tree_depth(range = c(tree_depth[1], tree_depth[2]), trans = NULL),
loss_reduction = if (is.null(loss_reduction)) loss_reduction(range = c(-10, 1.5), trans = log10_trans()) else loss_reduction(range = c(loss_reduction[1], loss_reduction[2]), trans = log10_trans())
),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
} else {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
}
best_results <- tune_results %>%
show_best(metric = "rmse", n = 1)
if (grid_size > 1) {
tune_plot <- tune_results %>%
autoplot()
return_list$tune_plot <- tune_plot
}
fin_wflw <- wflw %>%
finalize_workflow(parameters = best_results %>% dplyr::slice(1))
wflw_fit <- fin_wflw %>%
fit(training(splits))
return_list$fitted_workflow <- wflw_fit
return_list$finalized_workflow <- fin_wflw
return(return_list)
}
| /R/wflw_creator.R | no_license | ssh352/sumots | R | false | false | 5,465 | r | #' A helper function to create workflow passed to the ml_tune() function
#'
#' wflw_creater() is a function that creates workflow
#'
#' @param model_spec A model specification created with the Parsnip or Modeltime package
#' @param ml_recipe Recipe for the models
#' @param resamples_kfold Resamples used for tuning parameters
#' @param grid_size The size of the grid of parameters
#'
wflw_creator <- function(model_spec, ml_recipe, resamples_kfold, grid_size = grid_size, parallel_type, learn_rate, min_n, tree_depth, loss_reduction) {
return_list <- list()
engine <- model_spec$engine
wflw <- workflow() %>%
add_model(model_spec) %>%
add_recipe(ml_recipe)
if(engine %in% c("lightgbm")) {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw) %>%
update(
sample_size = sample_prop(range = c(0, 1)),
learn_rate = if (is.null(learn_rate)) learn_rate(range = c(-10, -1), trans = log10_trans()) else learn_rate(range = c(learn_rate[1], learn_rate[2]), trans = log10_trans()),
min_n = if (is.null(min_n)) min_n(range = c(2L, 40L), trans = NULL) else min_n(range = c(min_n[1], min_n[2]), trans = NULL),
tree_depth = if (is.null(tree_depth)) tree_depth(range = c(1L, 15L), trans = NULL) else tree_depth(range = c(tree_depth[1], tree_depth[2]), trans = NULL),
loss_reduction = if (is.null(loss_reduction)) loss_reduction(range = c(-10, 1.5), trans = log10_trans()) else loss_reduction(range = c(loss_reduction[1], loss_reduction[2]), trans = log10_trans())
),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
} else if (engine %in% "catboost") {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw) %>%
update(
sample_size = sample_prop(range = c(0, 1)),
learn_rate = if (is.null(learn_rate)) learn_rate(range = c(-10, -1), trans = log10_trans()) else learn_rate(range = c(learn_rate[1], learn_rate[2]), trans = log10_trans()),
min_n = if (is.null(min_n)) min_n(range = c(2L, 40L), trans = NULL) else min_n(range = c(min_n[1], min_n[2]), trans = NULL),
tree_depth = if (is.null(tree_depth)) tree_depth(range = c(1L, 15L), trans = NULL) else tree_depth(range = c(tree_depth[1], tree_depth[2]), trans = NULL)
),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
} else if (engine %in% "ranger") {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw) %>%
update(
min_n = if (is.null(min_n)) min_n(range = c(2L, 40L), trans = NULL) else min_n(range = c(min_n[1], min_n[2]), trans = NULL)
),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
} else if (engine %in% c("prophet_xgboost", "xgboost")) {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw) %>%
update(
sample_size = sample_prop(range = c(0, 1)),
learn_rate = if (is.null(learn_rate)) learn_rate(range = c(-10, -1), trans = log10_trans()) else learn_rate(range = c(learn_rate[1], learn_rate[2]), trans = log10_trans()),
min_n = if (is.null(min_n)) min_n(range = c(2L, 40L), trans = NULL) else min_n(range = c(min_n[1], min_n[2]), trans = NULL),
tree_depth = if (is.null(tree_depth)) tree_depth(range = c(1L, 15L), trans = NULL) else tree_depth(range = c(tree_depth[1], tree_depth[2]), trans = NULL),
loss_reduction = if (is.null(loss_reduction)) loss_reduction(range = c(-10, 1.5), trans = log10_trans()) else loss_reduction(range = c(loss_reduction[1], loss_reduction[2]), trans = log10_trans())
),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
} else {
tune_results <- tune_grid(
object = wflw,
resamples = resamples_kfold,
param_info = parameters(wflw),
grid = grid_size,
control = control_grid(verbose = TRUE, allow_par = TRUE, parallel_over = parallel_type)
)
}
best_results <- tune_results %>%
show_best(metric = "rmse", n = 1)
if (grid_size > 1) {
tune_plot <- tune_results %>%
autoplot()
return_list$tune_plot <- tune_plot
}
fin_wflw <- wflw %>%
finalize_workflow(parameters = best_results %>% dplyr::slice(1))
wflw_fit <- fin_wflw %>%
fit(training(splits))
return_list$fitted_workflow <- wflw_fit
return_list$finalized_workflow <- fin_wflw
return(return_list)
}
|
#' read a text file(s)
#'
#' Read texts and (if any) associated document-level meta-data from one or more source files.
#' The text source files
#' come from the textual component of the files, and the document-level
#' metadata ("docvars") come from either the file contents or filenames.
#' @param file the complete filename(s) to be read. This is designed to
#' automagically handle a number of common scenarios, so the value can be a
# single filename, a vector of file names a remote URL, or a file "mask" using a
#' "glob"-type wildcard value. Currently available filetypes are:
#'
#' \strong{Single file formats:}
#'
#' \describe{
#' \item{\code{txt}}{plain text files:
#' So-called structured text files, which describe both texts and metadata:
#' For all structured text filetypes, the column, field, or node
#' which contains the the text must be specified with the \code{text_field}
#' parameter, and all other fields are treated as docvars.}
#' \item{\code{json}}{data in some form of JavaScript
#' Object Notation, consisting of the texts and optionally additional docvars.
#' The supported formats are:
#' \itemize{
#' \item a single JSON object per file
#' \item line-delimited JSON, with one object per line
#' \item line-delimited JSON, of the format produced from a Twitter stream.
#' This type of file has special handling which simplifies the Twitter format
#' into docvars. The correct format for each JSON file is automatically detected.}}
#' \item{\code{csv,tab,tsv}}{comma- or tab-separated values}
#' \item{\code{html}}{HTML documents, including specialized formats from known
#' sources, such as Nexis-formatted HTML. See the \code{source} parameter
#' below.}
#' \item{\code{xml}}{Basic flat XML documents are supported -- those of the
#' kind supported by \code{\link[XML]{xmlToDataFrame}}. For xml files, an additional
#' argument \code{collapse} may be passed through \code{...} that names the character(s) to use in
#' appending different text elements together.}
#' \item{\code{pdf}}{pdf formatted files, converted through \pkg{pdftools}.}
#' \item{\code{doc, docx}}{Microsoft Word formatted files.}
#'
#' \strong{Reading multiple files and file types:}
#'
#' In addition, \code{file} can also not be a path
#' to a single local file, but also combinations of any of the above types, such as:
#' \item{a wildcard value}{any valid
#' pathname with a wildcard ("glob") expression that can be expanded by the
#' operating system. This may consist of multiple file types.}
#' \item{a URL to a remote}{which is downloaded then loaded}
#' \item{\code{zip,tar,tar.gz,tar.bz}}{archive file, which is unzipped. The
#' contained files must be either at the top level or in a single directory.
#' Archives, remote URLs and glob patterns can resolve to any of the other
#' filetypes, so you could have, for example, a remote URL to a zip file which
#' contained Twitter JSON files.}
#' }
#' @param text_field a variable (column) name or column number indicating where
#' to find the texts that form the documents for the corpus. This must be
#' specified for file types \code{.csv}, \code{.json}, and \code{.xls}/\code{.xlsx}
#' files. For XML files, an XPath expression can be specified.
#' @param docvarsfrom used to specify that docvars should be taken from the
#' filenames, when the \code{readtext} inputs are filenames and the elements
#' of the filenames are document variables, separated by a delimiter
#' (\code{dvsep}). This allows easy assignment of docvars from filenames such
#' as \code{1789-Washington.txt}, \code{1793-Washington}, etc. by \code{dvsep}
#' or from meta-data embedded in the text file header (\code{headers}).
#' If \code{docvarsfrom} is set to \code{"filepaths"}, consider the full path to the
#' file, not just the filename.
#' @param dvsep separator (a regular expression character string) used in
#' filenames to delimit docvar elements if \code{docvarsfrom="filenames"}
#' or \code{docvarsfrom="filepaths"} is used
#' @param docvarnames character vector of variable names for \code{docvars}, if
#' \code{docvarsfrom} is specified. If this argument is not used, default
#' docvar names will be used (\code{docvar1}, \code{docvar2}, ...).
#' @param encoding vector: either the encoding of all files, or one encoding
#' for each files
#' @param ignore_missing_files if \code{FALSE}, then if the file
#' argument doesn't resolve to an existing file, then an error will be thrown.
#' Note that this can happen in a number of ways, including passing a path
#' to a file that does not exist, to an empty archive file, or to a glob
#' pattern that matches no files.
#' @param source used to specify specific formats of some input file types, such
#' as JSON or HTML. Currently supported types are \code{"twitter"} for JSON and
#' \code{"nexis"} for HTML.
#' @param cache if \code{TRUE}, save remote file to a temporary folder. Only used
#' when \code{file} is a URL.
#' @param verbosity \itemize{
#' \item 0: output errors only
#' \item 1: output errors and warnings (default)
#' \item 2: output a brief summary message
#' \item 3: output detailed file-related messages
#' }
#' @param ... additional arguments passed through to low-level file reading
#' function, such as \code{\link{file}}, \code{\link{fread}}, etc. Useful
#' for specifying an input encoding option, which is specified in the same was
#' as it would be give to \code{\link{iconv}}. See the Encoding section of
#' \link{file} for details.
#' @return a data.frame consisting of a columns \code{doc_id} and \code{text}
#' that contain a document identifier and the texts respectively, with any
#' additional columns consisting of document-level variables either found
#' in the file containing the texts, or created through the
#' \code{readtext} call.
#' @export
#' @importFrom utils unzip type.convert
#' @importFrom httr GET write_disk
#' @examples
#' \donttest{
#' ## get the data directory
#' if (!interactive()) pkgload::load_all()
#' DATA_DIR <- system.file("extdata/", package = "readtext")
#'
#' ## read in some text data
#' # all UDHR files
#' (rt1 <- readtext(paste0(DATA_DIR, "/txt/UDHR/*")))
#'
#' # manifestos with docvars from filenames
#' (rt2 <- readtext(paste0(DATA_DIR, "/txt/EU_manifestos/*.txt"),
#' docvarsfrom = "filenames",
#' docvarnames = c("unit", "context", "year", "language", "party"),
#' encoding = "LATIN1"))
#'
#' # recurse through subdirectories
#' (rt3 <- readtext(paste0(DATA_DIR, "/txt/movie_reviews/*"),
#' docvarsfrom = "filepaths", docvarnames = "sentiment"))
#'
#' ## read in csv data
#' (rt4 <- readtext(paste0(DATA_DIR, "/csv/inaugCorpus.csv")))
#'
#' ## read in tab-separated data
#' (rt5 <- readtext(paste0(DATA_DIR, "/tsv/dailsample.tsv"), text_field = "speech"))
#'
#' ## read in JSON data
#' (rt6 <- readtext(paste0(DATA_DIR, "/json/inaugural_sample.json"), text_field = "texts"))
#'
#' ## read in pdf data
#' # UNHDR
#' (rt7 <- readtext(paste0(DATA_DIR, "/pdf/UDHR/*.pdf"),
#' docvarsfrom = "filenames",
#' docvarnames = c("document", "language")))
#' Encoding(rt7$text)
#'
#' ## read in Word data (.doc)
#' (rt8 <- readtext(paste0(DATA_DIR, "/word/*.doc")))
#' Encoding(rt8$text)
#'
#' ## read in Word data (.docx)
#' (rt9 <- readtext(paste0(DATA_DIR, "/word/*.docx")))
#' Encoding(rt9$text)
#'
#' ## use elements of path and filename as docvars
#' (rt10 <- readtext(paste0(DATA_DIR, "/pdf/UDHR/*.pdf"),
#' docvarsfrom = "filepaths", dvsep = "[/_.]"))
#' }
readtext <- function(file, ignore_missing_files = FALSE, text_field = NULL,
docvarsfrom = c("metadata", "filenames", "filepaths"), dvsep = "_",
docvarnames = NULL, encoding = NULL, source = NULL, cache = TRUE,
verbosity = readtext_options("verbosity"),
...) {
args <- list(...)
if ("textfield" %in% names(args)) {
warning("textfield is deprecated; use text_field instead.")
text_field <- args[["textfield"]]
}
# # in case the function was called without attaching the package,
# # in which case the option is never set
# if (is.null(verbosity))
# verbosity <- 1
if (!verbosity %in% 0:3)
stop("verbosity must be one of 0, 1, 2, 3.")
if (!all(is.character(file)))
stop("file must be a character (specifying file location(s)).")
if (!is.null(source) && !is.character(source))
stop("source must be a character.")
docvarsfrom <- match.arg(docvarsfrom)
# # just use the first, if both are specified?
# if (is.missing(docvarsfrom))
#
# if (!all(docvarsfrom %in% c( c("metadata", "filenames"))))
# stop("illegal docvarsfrom value")
if (is.null(text_field))
text_field <- 1
if (length(encoding) < 2 && is.null(encoding))
encoding <- getOption("encoding")
if (is.null(source))
source <- "auto"
if (verbosity >= 2)
message("Reading texts from ", file)
# TODO: files need to be imported as they are discovered. Currently
# list_files() uses a lot of storage space for temporary files when there
# are a lot of archives.
files <- list_files(file, ignore_missing_files, FALSE, cache, verbosity)
if (length(encoding) == 1) {
encoding <- rep(encoding, length(files))
} else {
if (length(encoding) != length(files))
stop("Encoding parameter must be length 1, or as long as the number of files")
}
sources <- mapply(function(x, e) {
get_source(x, text_field = text_field, encoding = e, source = source, verbosity = verbosity, ...)
}, files, encoding, SIMPLIFY = FALSE)
# combine all of the data.frames returned
result <- data.frame(doc_id = "",
data.table::rbindlist(sources, use.names = TRUE, fill = TRUE),
stringsAsFactors = FALSE)
# this is in case some smart-alec (like AO) globs different directories
# for identical filenames
ids <- lapply(sources, row.names)
id <- unlist(ids, use.names = FALSE)
if (any(duplicated(id))) {
prefix <- rep(basename_unique(files, path_only = TRUE), lengths(ids))
#if (lengths(prefix) > 1)
id <- paste(prefix, id, sep = "/")
}
if (docvarsfrom %in% c("filepaths", "filenames")) {
docvar <- get_docvars_filenames(files, dvsep, docvarnames, docvarsfrom == "filepaths", verbosity)
result <- cbind(result, impute_types(docvar))
}
# change rownames to doc_id
result$doc_id <- id
rownames(result) <- NULL
if (verbosity >= 2)
message(" ... read ", nrow(result), " document", if (nrow(result) == 1) "" else "s.")
class(result) <- c("readtext", "data.frame")
result
}
## Read each file as appropriate, calling the get_* functions for recognized
## file types
get_source <- function(path, text_field, replace_specialchar = FALSE, verbosity = 1, ...,
# deprecated arguments
textfield) {
ext <- tolower(file_ext(path))
if (ext %in% extensions()) {
if (dir.exists(path)) {
call <- deparse(sys.call(1))
call <- sub(path, paste0(sub("/$", "", path), "/*"), call, fixed = TRUE)
stop("File '", path, "' does not exist, but a directory of this name does exist. ",
"To read all files in a directory, you must pass a glob expression like ", call, ".")
}
} else {
if (verbosity >= 1)
warning("Unsupported extension ", sQuote(ext), " of file ", path , " treating as plain text.")
ext <- "txt"
}
if (verbosity >= 3)
message(" ... reading (", ext, ") file: ", path)
result <- switch(ext,
txt = get_txt(path, ...),
csv = get_csv(path, text_field, sep = ",", ...),
tsv = get_csv(path, text_field, sep = "\t", ...),
tab = get_csv(path, text_field, sep = "\t", ...),
json = get_json(path, text_field, verbosity = verbosity, ...),
xml = get_xml(path, text_field, verbosity = verbosity, ...),
html = get_html(path, verbosity = verbosity, ...),
pdf = get_pdf(path, ...),
docx = get_docx(path, ...),
doc = get_doc(path, ...),
xls = get_excel(path, text_field, ...),
xlsx = get_excel(path, text_field, ...),
ods = get_ods(path, text_field, ...)
)
# assign filename (variants) unique text names
len <- nrow(result)
# TODO: stop using row.names as it errors when duplicated
if (len > 1) {
row.names(result) <- paste(basename(path), seq_len(len), sep = ".")
} else {
row.names(result) <- basename(path)
}
if (replace_specialchar)
result$text <- replace_charclass(result$text)
return(result)
}
replace_charclass <- function (text) {
mapping <- c(
"\\p{Dash_Punctuation}" = "-",
"\\p{Space_Separator}" = " ",
"\\p{Initial_Punctuation}" = "'",
"\\p{Final_Punctuation}" = "'",
"\\p{Private_Use}" = "",
"\\p{Unassigned}" = ""
)
for (i in seq_along(mapping))
text <- stri_replace_all(text, names(mapping[i]), regex = mapping[i])
return(text)
}
| /R/readtext.R | no_license | jirkalewandowski/readtext | R | false | false | 13,633 | r | #' read a text file(s)
#'
#' Read texts and (if any) associated document-level meta-data from one or more source files.
#' The text source files
#' come from the textual component of the files, and the document-level
#' metadata ("docvars") come from either the file contents or filenames.
#' @param file the complete filename(s) to be read. This is designed to
#' automagically handle a number of common scenarios, so the value can be a
# single filename, a vector of file names a remote URL, or a file "mask" using a
#' "glob"-type wildcard value. Currently available filetypes are:
#'
#' \strong{Single file formats:}
#'
#' \describe{
#' \item{\code{txt}}{plain text files:
#' So-called structured text files, which describe both texts and metadata:
#' For all structured text filetypes, the column, field, or node
#' which contains the the text must be specified with the \code{text_field}
#' parameter, and all other fields are treated as docvars.}
#' \item{\code{json}}{data in some form of JavaScript
#' Object Notation, consisting of the texts and optionally additional docvars.
#' The supported formats are:
#' \itemize{
#' \item a single JSON object per file
#' \item line-delimited JSON, with one object per line
#' \item line-delimited JSON, of the format produced from a Twitter stream.
#' This type of file has special handling which simplifies the Twitter format
#' into docvars. The correct format for each JSON file is automatically detected.}}
#' \item{\code{csv,tab,tsv}}{comma- or tab-separated values}
#' \item{\code{html}}{HTML documents, including specialized formats from known
#' sources, such as Nexis-formatted HTML. See the \code{source} parameter
#' below.}
#' \item{\code{xml}}{Basic flat XML documents are supported -- those of the
#' kind supported by \code{\link[XML]{xmlToDataFrame}}. For xml files, an additional
#' argument \code{collapse} may be passed through \code{...} that names the character(s) to use in
#' appending different text elements together.}
#' \item{\code{pdf}}{pdf formatted files, converted through \pkg{pdftools}.}
#' \item{\code{doc, docx}}{Microsoft Word formatted files.}
#'
#' \strong{Reading multiple files and file types:}
#'
#' In addition, \code{file} can also not be a path
#' to a single local file, but also combinations of any of the above types, such as:
#' \item{a wildcard value}{any valid
#' pathname with a wildcard ("glob") expression that can be expanded by the
#' operating system. This may consist of multiple file types.}
#' \item{a URL to a remote}{which is downloaded then loaded}
#' \item{\code{zip,tar,tar.gz,tar.bz}}{archive file, which is unzipped. The
#' contained files must be either at the top level or in a single directory.
#' Archives, remote URLs and glob patterns can resolve to any of the other
#' filetypes, so you could have, for example, a remote URL to a zip file which
#' contained Twitter JSON files.}
#' }
#' @param text_field a variable (column) name or column number indicating where
#' to find the texts that form the documents for the corpus. This must be
#' specified for file types \code{.csv}, \code{.json}, and \code{.xls}/\code{.xlsx}
#' files. For XML files, an XPath expression can be specified.
#' @param docvarsfrom used to specify that docvars should be taken from the
#' filenames, when the \code{readtext} inputs are filenames and the elements
#' of the filenames are document variables, separated by a delimiter
#' (\code{dvsep}). This allows easy assignment of docvars from filenames such
#' as \code{1789-Washington.txt}, \code{1793-Washington}, etc. by \code{dvsep}
#' or from meta-data embedded in the text file header (\code{headers}).
#' If \code{docvarsfrom} is set to \code{"filepaths"}, consider the full path to the
#' file, not just the filename.
#' @param dvsep separator (a regular expression character string) used in
#' filenames to delimit docvar elements if \code{docvarsfrom="filenames"}
#' or \code{docvarsfrom="filepaths"} is used
#' @param docvarnames character vector of variable names for \code{docvars}, if
#' \code{docvarsfrom} is specified. If this argument is not used, default
#' docvar names will be used (\code{docvar1}, \code{docvar2}, ...).
#' @param encoding vector: either the encoding of all files, or one encoding
#' for each files
#' @param ignore_missing_files if \code{FALSE}, then if the file
#' argument doesn't resolve to an existing file, then an error will be thrown.
#' Note that this can happen in a number of ways, including passing a path
#' to a file that does not exist, to an empty archive file, or to a glob
#' pattern that matches no files.
#' @param source used to specify specific formats of some input file types, such
#' as JSON or HTML. Currently supported types are \code{"twitter"} for JSON and
#' \code{"nexis"} for HTML.
#' @param cache if \code{TRUE}, save remote file to a temporary folder. Only used
#' when \code{file} is a URL.
#' @param verbosity \itemize{
#' \item 0: output errors only
#' \item 1: output errors and warnings (default)
#' \item 2: output a brief summary message
#' \item 3: output detailed file-related messages
#' }
#' @param ... additional arguments passed through to low-level file reading
#' function, such as \code{\link{file}}, \code{\link{fread}}, etc. Useful
#' for specifying an input encoding option, which is specified in the same was
#' as it would be give to \code{\link{iconv}}. See the Encoding section of
#' \link{file} for details.
#' @return a data.frame consisting of a columns \code{doc_id} and \code{text}
#' that contain a document identifier and the texts respectively, with any
#' additional columns consisting of document-level variables either found
#' in the file containing the texts, or created through the
#' \code{readtext} call.
#' @export
#' @importFrom utils unzip type.convert
#' @importFrom httr GET write_disk
#' @examples
#' \donttest{
#' ## get the data directory
#' if (!interactive()) pkgload::load_all()
#' DATA_DIR <- system.file("extdata/", package = "readtext")
#'
#' ## read in some text data
#' # all UDHR files
#' (rt1 <- readtext(paste0(DATA_DIR, "/txt/UDHR/*")))
#'
#' # manifestos with docvars from filenames
#' (rt2 <- readtext(paste0(DATA_DIR, "/txt/EU_manifestos/*.txt"),
#' docvarsfrom = "filenames",
#' docvarnames = c("unit", "context", "year", "language", "party"),
#' encoding = "LATIN1"))
#'
#' # recurse through subdirectories
#' (rt3 <- readtext(paste0(DATA_DIR, "/txt/movie_reviews/*"),
#' docvarsfrom = "filepaths", docvarnames = "sentiment"))
#'
#' ## read in csv data
#' (rt4 <- readtext(paste0(DATA_DIR, "/csv/inaugCorpus.csv")))
#'
#' ## read in tab-separated data
#' (rt5 <- readtext(paste0(DATA_DIR, "/tsv/dailsample.tsv"), text_field = "speech"))
#'
#' ## read in JSON data
#' (rt6 <- readtext(paste0(DATA_DIR, "/json/inaugural_sample.json"), text_field = "texts"))
#'
#' ## read in pdf data
#' # UNHDR
#' (rt7 <- readtext(paste0(DATA_DIR, "/pdf/UDHR/*.pdf"),
#' docvarsfrom = "filenames",
#' docvarnames = c("document", "language")))
#' Encoding(rt7$text)
#'
#' ## read in Word data (.doc)
#' (rt8 <- readtext(paste0(DATA_DIR, "/word/*.doc")))
#' Encoding(rt8$text)
#'
#' ## read in Word data (.docx)
#' (rt9 <- readtext(paste0(DATA_DIR, "/word/*.docx")))
#' Encoding(rt9$text)
#'
#' ## use elements of path and filename as docvars
#' (rt10 <- readtext(paste0(DATA_DIR, "/pdf/UDHR/*.pdf"),
#' docvarsfrom = "filepaths", dvsep = "[/_.]"))
#' }
readtext <- function(file, ignore_missing_files = FALSE, text_field = NULL,
docvarsfrom = c("metadata", "filenames", "filepaths"), dvsep = "_",
docvarnames = NULL, encoding = NULL, source = NULL, cache = TRUE,
verbosity = readtext_options("verbosity"),
...) {
args <- list(...)
if ("textfield" %in% names(args)) {
warning("textfield is deprecated; use text_field instead.")
text_field <- args[["textfield"]]
}
# # in case the function was called without attaching the package,
# # in which case the option is never set
# if (is.null(verbosity))
# verbosity <- 1
if (!verbosity %in% 0:3)
stop("verbosity must be one of 0, 1, 2, 3.")
if (!all(is.character(file)))
stop("file must be a character (specifying file location(s)).")
if (!is.null(source) && !is.character(source))
stop("source must be a character.")
docvarsfrom <- match.arg(docvarsfrom)
# # just use the first, if both are specified?
# if (is.missing(docvarsfrom))
#
# if (!all(docvarsfrom %in% c( c("metadata", "filenames"))))
# stop("illegal docvarsfrom value")
if (is.null(text_field))
text_field <- 1
if (length(encoding) < 2 && is.null(encoding))
encoding <- getOption("encoding")
if (is.null(source))
source <- "auto"
if (verbosity >= 2)
message("Reading texts from ", file)
# TODO: files need to be imported as they are discovered. Currently
# list_files() uses a lot of storage space for temporary files when there
# are a lot of archives.
files <- list_files(file, ignore_missing_files, FALSE, cache, verbosity)
if (length(encoding) == 1) {
encoding <- rep(encoding, length(files))
} else {
if (length(encoding) != length(files))
stop("Encoding parameter must be length 1, or as long as the number of files")
}
sources <- mapply(function(x, e) {
get_source(x, text_field = text_field, encoding = e, source = source, verbosity = verbosity, ...)
}, files, encoding, SIMPLIFY = FALSE)
# combine all of the data.frames returned
result <- data.frame(doc_id = "",
data.table::rbindlist(sources, use.names = TRUE, fill = TRUE),
stringsAsFactors = FALSE)
# this is in case some smart-alec (like AO) globs different directories
# for identical filenames
ids <- lapply(sources, row.names)
id <- unlist(ids, use.names = FALSE)
if (any(duplicated(id))) {
prefix <- rep(basename_unique(files, path_only = TRUE), lengths(ids))
#if (lengths(prefix) > 1)
id <- paste(prefix, id, sep = "/")
}
if (docvarsfrom %in% c("filepaths", "filenames")) {
docvar <- get_docvars_filenames(files, dvsep, docvarnames, docvarsfrom == "filepaths", verbosity)
result <- cbind(result, impute_types(docvar))
}
# change rownames to doc_id
result$doc_id <- id
rownames(result) <- NULL
if (verbosity >= 2)
message(" ... read ", nrow(result), " document", if (nrow(result) == 1) "" else "s.")
class(result) <- c("readtext", "data.frame")
result
}
## Read each file as appropriate, calling the get_* functions for recognized
## file types
get_source <- function(path, text_field, replace_specialchar = FALSE, verbosity = 1, ...,
# deprecated arguments
textfield) {
ext <- tolower(file_ext(path))
if (ext %in% extensions()) {
if (dir.exists(path)) {
call <- deparse(sys.call(1))
call <- sub(path, paste0(sub("/$", "", path), "/*"), call, fixed = TRUE)
stop("File '", path, "' does not exist, but a directory of this name does exist. ",
"To read all files in a directory, you must pass a glob expression like ", call, ".")
}
} else {
if (verbosity >= 1)
warning("Unsupported extension ", sQuote(ext), " of file ", path , " treating as plain text.")
ext <- "txt"
}
if (verbosity >= 3)
message(" ... reading (", ext, ") file: ", path)
result <- switch(ext,
txt = get_txt(path, ...),
csv = get_csv(path, text_field, sep = ",", ...),
tsv = get_csv(path, text_field, sep = "\t", ...),
tab = get_csv(path, text_field, sep = "\t", ...),
json = get_json(path, text_field, verbosity = verbosity, ...),
xml = get_xml(path, text_field, verbosity = verbosity, ...),
html = get_html(path, verbosity = verbosity, ...),
pdf = get_pdf(path, ...),
docx = get_docx(path, ...),
doc = get_doc(path, ...),
xls = get_excel(path, text_field, ...),
xlsx = get_excel(path, text_field, ...),
ods = get_ods(path, text_field, ...)
)
# assign filename (variants) unique text names
len <- nrow(result)
# TODO: stop using row.names as it errors when duplicated
if (len > 1) {
row.names(result) <- paste(basename(path), seq_len(len), sep = ".")
} else {
row.names(result) <- basename(path)
}
if (replace_specialchar)
result$text <- replace_charclass(result$text)
return(result)
}
replace_charclass <- function (text) {
mapping <- c(
"\\p{Dash_Punctuation}" = "-",
"\\p{Space_Separator}" = " ",
"\\p{Initial_Punctuation}" = "'",
"\\p{Final_Punctuation}" = "'",
"\\p{Private_Use}" = "",
"\\p{Unassigned}" = ""
)
for (i in seq_along(mapping))
text <- stri_replace_all(text, names(mapping[i]), regex = mapping[i])
return(text)
}
|
# Download and unzip the data file
fileURL = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL,"./household_power_consumption.zip", method="curl")
unzip("household_power_consumption.zip")
# Read the data file into a data frame
elecData <- read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
# Convert date to Date class
elecData$Date <- as.Date(elecData$Date,format="%d/%m/%Y")
# Subset data for dates of interest
elecDataSubset <- subset(elecData,elecData$Date == as.Date("2007-02-01")
| elecData$Date == as.Date("2007-02-02"))
# Open graphics device
png(filename="plot1.png", width = 480, height = 480, units = "px")
# Plot histogram
hist(elecDataSubset$Global_active_power, col="red", main="Global Active Power",
xlab="Global Active Power (kilowatts)")
# Turn off the graphics device
dev.off()
| /plot1.R | no_license | az129/ExData_Plotting1 | R | false | false | 915 | r | # Download and unzip the data file
fileURL = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL,"./household_power_consumption.zip", method="curl")
unzip("household_power_consumption.zip")
# Read the data file into a data frame
elecData <- read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
# Convert date to Date class
elecData$Date <- as.Date(elecData$Date,format="%d/%m/%Y")
# Subset data for dates of interest
elecDataSubset <- subset(elecData,elecData$Date == as.Date("2007-02-01")
| elecData$Date == as.Date("2007-02-02"))
# Open graphics device
png(filename="plot1.png", width = 480, height = 480, units = "px")
# Plot histogram
hist(elecDataSubset$Global_active_power, col="red", main="Global Active Power",
xlab="Global Active Power (kilowatts)")
# Turn off the graphics device
dev.off()
|
#Install packages - Ensure No. TRUE = No. packages
list.of.packages <- c("maptools","cartogram","tidyverse","dplyr",
"broom","raster","tmap","plyr","rgeos","viridis")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only=T)
#Set working directory#
setwd("C:/Git/Cartogram")
#read in data#
overweight_data <- read.csv("JME Overweight_AH.csv")
#create a subset called 'overweight df'
#Look in 'overweight_data' as specified above
#Select everything that is " Latest " under the column head "Latest.Estimate
#Exclude! everything that has " CFSVA " under "Short.Source" head
overweight_df <- subset(overweight_data, Latest.Estimate==" Latest " & Short.Source!=" CFSVA ")
#overweight_df is now a new dataset which can be viewed
overweight_df <- overweight_df[,c("ISO","National")]
#Now it's narrowed down to just two variables
overweight_df <- overweight_df[-c(1),]
#Rename the column headings
names(overweight_df)[1]="ISO3"
names(overweight_df)[2]="overweight"
#can't figure out why this needs to be done??
overweight_df$overweight_sqr=overweight_df$overweight^1
#Ensure values are numeric under the column overweight in overweight_df
overweight_df$overweight=as.numeric(as.character(overweight_df$overweight))
#Assigning some red values for styling later on
reds = c("#FBD7CB","#F6B2A7","#F28E83","#ED695E","#E8443A")
#Loading in the data used to compile the cartogram
data("wrld_simpl")
#Merging the ISO3 asignations in the wrld_simpl dataset with overweight_df
wrld_simpl=merge(wrld_simpl,overweight_df,by="ISO3")
#Grabbing the 'Africa' spatial polygons
afr=wrld_simpl[wrld_simpl$REGION==2,]
#spTransform object (afr) with the Coordinate Reference System of+init
afr <- spTransform(afr, CRS("+init=epsg:3395"))
#Construct a continuous area cartogram by a rubber sheet distortion algorithm
afr_cont <- cartogram_cont(afr,"overweight_sqr", itermax = 50)
#Style creation#
DI_style <- structure(
list(
bg.color = c(fill = "white", borders = "black",
symbols = "grey80", dots = "grey80",
lines = "black", text = "black",
na = "grey30", null = "grey15"),
aes.palette = list(seq = c("#FBD7CB","#F6B2A7","#F28E83","#ED695E","#E8443A"), div = "PiYG", cat = "Dark2"),
attr.color = "Black",
panel.label.color = "Black",
panel.label.bg.color = "Black",
main.title.color = "Black"
),
style = "DI"
)
#Load style and plot
tmap_options(DI_style)
tm_shape(afr_cont) +
tm_polygons("overweight_sqr", style = "jenks") +
tm_layout(frame = F)
#Adam Hughes dataviz# | /cartogram.R | no_license | adam-hughes-dataviz/ah_repo | R | false | false | 2,775 | r | #Install packages - Ensure No. TRUE = No. packages
list.of.packages <- c("maptools","cartogram","tidyverse","dplyr",
"broom","raster","tmap","plyr","rgeos","viridis")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only=T)
#Set working directory#
setwd("C:/Git/Cartogram")
#read in data#
overweight_data <- read.csv("JME Overweight_AH.csv")
#create a subset called 'overweight df'
#Look in 'overweight_data' as specified above
#Select everything that is " Latest " under the column head "Latest.Estimate
#Exclude! everything that has " CFSVA " under "Short.Source" head
overweight_df <- subset(overweight_data, Latest.Estimate==" Latest " & Short.Source!=" CFSVA ")
#overweight_df is now a new dataset which can be viewed
overweight_df <- overweight_df[,c("ISO","National")]
#Now it's narrowed down to just two variables
overweight_df <- overweight_df[-c(1),]
#Rename the column headings
names(overweight_df)[1]="ISO3"
names(overweight_df)[2]="overweight"
#can't figure out why this needs to be done??
overweight_df$overweight_sqr=overweight_df$overweight^1
#Ensure values are numeric under the column overweight in overweight_df
overweight_df$overweight=as.numeric(as.character(overweight_df$overweight))
#Assigning some red values for styling later on
reds = c("#FBD7CB","#F6B2A7","#F28E83","#ED695E","#E8443A")
#Loading in the data used to compile the cartogram
data("wrld_simpl")
#Merging the ISO3 asignations in the wrld_simpl dataset with overweight_df
wrld_simpl=merge(wrld_simpl,overweight_df,by="ISO3")
#Grabbing the 'Africa' spatial polygons
afr=wrld_simpl[wrld_simpl$REGION==2,]
#spTransform object (afr) with the Coordinate Reference System of+init
afr <- spTransform(afr, CRS("+init=epsg:3395"))
#Construct a continuous area cartogram by a rubber sheet distortion algorithm
afr_cont <- cartogram_cont(afr,"overweight_sqr", itermax = 50)
#Style creation#
DI_style <- structure(
list(
bg.color = c(fill = "white", borders = "black",
symbols = "grey80", dots = "grey80",
lines = "black", text = "black",
na = "grey30", null = "grey15"),
aes.palette = list(seq = c("#FBD7CB","#F6B2A7","#F28E83","#ED695E","#E8443A"), div = "PiYG", cat = "Dark2"),
attr.color = "Black",
panel.label.color = "Black",
panel.label.bg.color = "Black",
main.title.color = "Black"
),
style = "DI"
)
#Load style and plot
tmap_options(DI_style)
tm_shape(afr_cont) +
tm_polygons("overweight_sqr", style = "jenks") +
tm_layout(frame = F)
#Adam Hughes dataviz# |
#Final Project- Tina Vo
install.packages("tidyverse") #If not installed
#Load packages
library(tidyverse)
library(tidyverse)
library(vroom)
library(visdat)
library(here)
library(visdat)
library(janitor)
install.packages("janitor")
library(readr)
rm(list = ls()) #Clean out workspace
SATSA_datascience <- read_csv("~/GitHub/Rrepos/SATSA_datascience.csv")
View(SATSA_datascience)
#using visdat package to explore the data
vis_dat(SATSA_datascience)
#Selected only the sleep variables of interest, created new dataset
SATSA<-SATSA_datascience %>% select(ID, AGE, ASLEEPN_Src, ASLEEPM_Src, ASLEEP1:ASLEEP12,
ZSLEEPNrc, ZSLEEPMrc, ZSLEEP1:ZSLEEP12)
#create a function to calculate hours slept (duration) from
#bedtime and waketime
HRS_SLEPT <- function(a, b) {
duration <- (b-a)+24
return(duration)
}
SATSA$SleepDurationA<-HRS_SLEPT(SATSA$ASLEEPN_Src, SATSA$ASLEEPM_Src)
SATSA$SleepDurationZ<-HRS_SLEPT(SATSA$ZSLEEPNrc, SATSA$ZSLEEPMrc)
View(SATSA)
#PREVIOUS CODE
#data SATSA_num; set SATSAs;
#Abedtime= input (ASLEEPN_Src, time10.);
#Awaketime= input (ASLEEPM_Src, time10.);
#Zbedtime= input (ZSLEEPNrc, time10.);
#Zwaketime= input (ZSLEEPMrc, time10.);
#run;
#title 'Calculating sleep duration based on bedtime and waketime';
#/*calculate sleep duration based on waketime and bedtime*/
# data SATSA_num2; set SATSA_num1;
#A_hrs_slept = mod(24+(Awaketime-Abedtime)/3600,24);
#Z_hrs_slept = mod(24+(Zwaketime-Zbedtime)/3600,24);
#run;
#VISUALIZE DATA/ formal check of data
SATSA %>% ggplot(aes(x = SATSA$AGE, y = SATSA$SleepDurationA)) + geom_point()
SATSA %>% ggplot(aes(x = SATSA$AGE, y = SATSA$SleepDurationZ)) + geom_point()
#checking data
range(SATSA$SleepDurationA, na.rm=T)
range(SATSA$SleepDurationZ, na.rm=T)
#See some outliers, will create new dataset setting individuals below
#two hours of sleep and above 15 hours of sleep to NA (according to national
#sleep foundation recommendations for what counts as healthy sleep duration)
SATSA_cleaned <- SATSA %>% mutate(
SleepDurationA = ifelse(SleepDurationA >= 2 & SleepDurationA < 15, SleepDurationA, NA),
SleepDurationZ = ifelse(SleepDurationZ >= 2 & SleepDurationZ < 15, SleepDurationZ, NA)
)
range(SATSA_cleaned$SleepDurationA, na.rm = T)
range(SATSA_cleaned$SleepDurationZ, na.rm = T)
#now the range for SleepDurationA is 5.3 and 12 and SleepDurationZ is 4.3 and 11.3
SATSA_cleaned %>% ggplot(aes(x = AGE, y = SleepDurationA)) + geom_point()
SATSA_cleaned %>% ggplot(aes(x = AGE, y = SleepDurationZ)) + geom_point()
#appears that people are sleeping more hours in the second wave (Z) compared to first wave (A)
#another way to visualize data
SATSA_cleaned %>% ggplot(aes(x = SleepDurationA)) + geom_histogram()
SATSA_cleaned %>% ggplot(aes(x = SleepDurationZ)) + geom_histogram()
#For Loop (graphing to visualize data by age group for sleep duration wave 1)
graphA<-function(i){
p <-ggplot(i, aes(x=SleepDurationA)) + geom_histogram()+
ggtitle(i$AGE)
print(p)
}
SATSA_agegroup <-split(SATSA_cleaned, as.factor(SATSA_cleaned$AGE))
for(x in SATSA_agegroup) {
graphA(x)
}
#For Loop (graphing to visualize data by age group for sleep duration wave 2)
graphZ<-function(q){
z <-ggplot(q, aes(x=SleepDurationZ)) + geom_histogram()+
ggtitle(q$AGE)
print(z)
}
SATSA_agegroup <-split(SATSA_cleaned, as.factor(SATSA_cleaned$AGE))
for(h in SATSA_agegroup) {
graphZ(h)
}
library(dplyr)
range(SATSA$ASLEEP1, na.rm=T)
a1<-c(5:16, 19:30)
as.character(a1)
#automation of recoding rather than if then statements
SATSA_RC<-SATSA %>%
mutate_at(5:16, recode, '2'='1', '3'='1', '4'='1','5'='1', '1'='0') %>%
mutate_at(19:30, recode, '2'='1', '3'='1', '4'='1','5'='1', '1'='0')
range(SATSA_RC$ASLEEP1, na.rm=T)
#Previous code example
#if ASLEEP3=1 then ASLEEP3_h=0;
#if ASLEEP3 ge 2 then ASLEEP3_h=1;
#if ASLEEP1=1 then ASLEEP1_h=0;
#if ASLEEP1 ge 2 then ASLEEP1_h=1;
#if ASLEEP4=1 then ASLEEP4_h=0;
#if ASLEEP4 ge 2 then ASLEEP4_h=1;
#if ASLEEP8=1 then ASLEEP8_h=0;
#if ASLEEP8 ge 2 then ASLEEP8_h=1;
#if ASLEEP10=1 then ASLEEP10_h=0;
#if ASLEEP10 ge 2 then ASLEEP10_h=1;
#if ZSLEEP1=1 then ZSLEEP1_h=0;
#if ZSLEEP1 ge 2 then ZSLEEP1_h=1;
#if ZSLEEP3=1 then ZSLEEP3_h=0;
#if ZSLEEP3 ge 2 then ZSLEEP3_h=1;
#if ZSLEEP4=1 then ZSLEEP4_h=0;
#if ZSLEEP4 ge 2 then ZSLEEP4_h=1;
#if ZSLEEP8=1 then ZSLEEP8_h=0;
#if ZSLEEP8 ge 2 then ZSLEEP8_h=1;
#if ZSLEEP10=1 then ZSLEEP10_h=0;
#if ZSLEEP10 ge 2 then ZSLEEP10_h=1;
#if BSLEEP1=1 then BSLEEP1_h=0;
#if BSLEEP1 ge 2 then BSLEEP1_h=1;
#if BSLEEP3=1 then BSLEEP3_h=0;
#if BSLEEP3 ge 2 then BSLEEP3_h=1;
#if BSLEEP4=1 then BSLEEP4_h=0;
#if BSLEEP4 ge 2 then BSLEEP4_h=1;
#if BSLEEP8=1 then BSLEEP8_h=0;
#if BSLEEP8 ge 2 then BSLEEP8_h=1;
#if BSLEEP10=1 then BSLEEP10_h=0;
#if BSLEEP10 ge 2 then BSLEEP10_h=1;
#having clean names and same naming conventions (lettercase etc.) will
#allow for efficient merging with other datasets
SATSA_clean_names<-SATSA_RC%>%
janitor::clean_names()
| /R for SATSA_improved.R | no_license | tvo015/Rrepos | R | false | false | 5,227 | r | #Final Project- Tina Vo
install.packages("tidyverse") #If not installed
#Load packages
library(tidyverse)
library(tidyverse)
library(vroom)
library(visdat)
library(here)
library(visdat)
library(janitor)
install.packages("janitor")
library(readr)
rm(list = ls()) #Clean out workspace
SATSA_datascience <- read_csv("~/GitHub/Rrepos/SATSA_datascience.csv")
View(SATSA_datascience)
#using visdat package to explore the data
vis_dat(SATSA_datascience)
#Selected only the sleep variables of interest, created new dataset
SATSA<-SATSA_datascience %>% select(ID, AGE, ASLEEPN_Src, ASLEEPM_Src, ASLEEP1:ASLEEP12,
ZSLEEPNrc, ZSLEEPMrc, ZSLEEP1:ZSLEEP12)
#create a function to calculate hours slept (duration) from
#bedtime and waketime
HRS_SLEPT <- function(a, b) {
duration <- (b-a)+24
return(duration)
}
SATSA$SleepDurationA<-HRS_SLEPT(SATSA$ASLEEPN_Src, SATSA$ASLEEPM_Src)
SATSA$SleepDurationZ<-HRS_SLEPT(SATSA$ZSLEEPNrc, SATSA$ZSLEEPMrc)
View(SATSA)
#PREVIOUS CODE
#data SATSA_num; set SATSAs;
#Abedtime= input (ASLEEPN_Src, time10.);
#Awaketime= input (ASLEEPM_Src, time10.);
#Zbedtime= input (ZSLEEPNrc, time10.);
#Zwaketime= input (ZSLEEPMrc, time10.);
#run;
#title 'Calculating sleep duration based on bedtime and waketime';
#/*calculate sleep duration based on waketime and bedtime*/
# data SATSA_num2; set SATSA_num1;
#A_hrs_slept = mod(24+(Awaketime-Abedtime)/3600,24);
#Z_hrs_slept = mod(24+(Zwaketime-Zbedtime)/3600,24);
#run;
#VISUALIZE DATA/ formal check of data
SATSA %>% ggplot(aes(x = SATSA$AGE, y = SATSA$SleepDurationA)) + geom_point()
SATSA %>% ggplot(aes(x = SATSA$AGE, y = SATSA$SleepDurationZ)) + geom_point()
#checking data
range(SATSA$SleepDurationA, na.rm=T)
range(SATSA$SleepDurationZ, na.rm=T)
#See some outliers, will create new dataset setting individuals below
#two hours of sleep and above 15 hours of sleep to NA (according to national
#sleep foundation recommendations for what counts as healthy sleep duration)
SATSA_cleaned <- SATSA %>% mutate(
SleepDurationA = ifelse(SleepDurationA >= 2 & SleepDurationA < 15, SleepDurationA, NA),
SleepDurationZ = ifelse(SleepDurationZ >= 2 & SleepDurationZ < 15, SleepDurationZ, NA)
)
range(SATSA_cleaned$SleepDurationA, na.rm = T)
range(SATSA_cleaned$SleepDurationZ, na.rm = T)
#now the range for SleepDurationA is 5.3 and 12 and SleepDurationZ is 4.3 and 11.3
SATSA_cleaned %>% ggplot(aes(x = AGE, y = SleepDurationA)) + geom_point()
SATSA_cleaned %>% ggplot(aes(x = AGE, y = SleepDurationZ)) + geom_point()
#appears that people are sleeping more hours in the second wave (Z) compared to first wave (A)
#another way to visualize data
SATSA_cleaned %>% ggplot(aes(x = SleepDurationA)) + geom_histogram()
SATSA_cleaned %>% ggplot(aes(x = SleepDurationZ)) + geom_histogram()
#For Loop (graphing to visualize data by age group for sleep duration wave 1)
graphA<-function(i){
p <-ggplot(i, aes(x=SleepDurationA)) + geom_histogram()+
ggtitle(i$AGE)
print(p)
}
SATSA_agegroup <-split(SATSA_cleaned, as.factor(SATSA_cleaned$AGE))
for(x in SATSA_agegroup) {
graphA(x)
}
#For Loop (graphing to visualize data by age group for sleep duration wave 2)
graphZ<-function(q){
z <-ggplot(q, aes(x=SleepDurationZ)) + geom_histogram()+
ggtitle(q$AGE)
print(z)
}
SATSA_agegroup <-split(SATSA_cleaned, as.factor(SATSA_cleaned$AGE))
for(h in SATSA_agegroup) {
graphZ(h)
}
library(dplyr)
range(SATSA$ASLEEP1, na.rm=T)
a1<-c(5:16, 19:30)
as.character(a1)
#automation of recoding rather than if then statements
SATSA_RC<-SATSA %>%
mutate_at(5:16, recode, '2'='1', '3'='1', '4'='1','5'='1', '1'='0') %>%
mutate_at(19:30, recode, '2'='1', '3'='1', '4'='1','5'='1', '1'='0')
range(SATSA_RC$ASLEEP1, na.rm=T)
#Previous code example
#if ASLEEP3=1 then ASLEEP3_h=0;
#if ASLEEP3 ge 2 then ASLEEP3_h=1;
#if ASLEEP1=1 then ASLEEP1_h=0;
#if ASLEEP1 ge 2 then ASLEEP1_h=1;
#if ASLEEP4=1 then ASLEEP4_h=0;
#if ASLEEP4 ge 2 then ASLEEP4_h=1;
#if ASLEEP8=1 then ASLEEP8_h=0;
#if ASLEEP8 ge 2 then ASLEEP8_h=1;
#if ASLEEP10=1 then ASLEEP10_h=0;
#if ASLEEP10 ge 2 then ASLEEP10_h=1;
#if ZSLEEP1=1 then ZSLEEP1_h=0;
#if ZSLEEP1 ge 2 then ZSLEEP1_h=1;
#if ZSLEEP3=1 then ZSLEEP3_h=0;
#if ZSLEEP3 ge 2 then ZSLEEP3_h=1;
#if ZSLEEP4=1 then ZSLEEP4_h=0;
#if ZSLEEP4 ge 2 then ZSLEEP4_h=1;
#if ZSLEEP8=1 then ZSLEEP8_h=0;
#if ZSLEEP8 ge 2 then ZSLEEP8_h=1;
#if ZSLEEP10=1 then ZSLEEP10_h=0;
#if ZSLEEP10 ge 2 then ZSLEEP10_h=1;
#if BSLEEP1=1 then BSLEEP1_h=0;
#if BSLEEP1 ge 2 then BSLEEP1_h=1;
#if BSLEEP3=1 then BSLEEP3_h=0;
#if BSLEEP3 ge 2 then BSLEEP3_h=1;
#if BSLEEP4=1 then BSLEEP4_h=0;
#if BSLEEP4 ge 2 then BSLEEP4_h=1;
#if BSLEEP8=1 then BSLEEP8_h=0;
#if BSLEEP8 ge 2 then BSLEEP8_h=1;
#if BSLEEP10=1 then BSLEEP10_h=0;
#if BSLEEP10 ge 2 then BSLEEP10_h=1;
#having clean names and same naming conventions (lettercase etc.) will
#allow for efficient merging with other datasets
SATSA_clean_names<-SATSA_RC%>%
janitor::clean_names()
|
#-- script to do some QC on Tim's line by line data
rm(list = objects())
options(stringsAsFactors = FALSE,
scipen = 200)
library(wrangleR)
read_in_xlsx <- function(f) {
# read in an Excel file into a dataframe
require(openxlsx)
d <- read.xlsx(f)
cat(paste0(f, " - ", nrow(d), "Rx", ncol(d), "C\n"))
return(d)
}
lbl <- read_in_xlsx("/Users/simonthompson/scratch/210429 AF Database v1.xlsx")
# check that we don't have different status for same participant
d <- unique(lbl[, c("Participant", "Can.be.included.in.Afs")])
table(duplicated(d$Participant))
| /accessory_scripts/check_tim_line_by_line.r | no_license | genomicsengland/af_letter_distribution_dataset | R | false | false | 578 | r | #-- script to do some QC on Tim's line by line data
rm(list = objects())
options(stringsAsFactors = FALSE,
scipen = 200)
library(wrangleR)
read_in_xlsx <- function(f) {
# read in an Excel file into a dataframe
require(openxlsx)
d <- read.xlsx(f)
cat(paste0(f, " - ", nrow(d), "Rx", ncol(d), "C\n"))
return(d)
}
lbl <- read_in_xlsx("/Users/simonthompson/scratch/210429 AF Database v1.xlsx")
# check that we don't have different status for same participant
d <- unique(lbl[, c("Participant", "Can.be.included.in.Afs")])
table(duplicated(d$Participant))
|
RMauthClient<-setClass(
"RMauthClient",
slots=c(
app_uuid="character",
mauth_base_url="character",
mauth_api_version="character",
private_key="character"
)
)
setMethod("initialize",
"RMauthClient",
function(.Object,
app_uuid=NULL,
mauth_base_url=NULL,
mauth_api_version="v1",
private_key=NULL
)
{
requiredConfigs<- c("app_uuid", "mauth_base_url", "mauth_api_version", "private_key")
lapply(seq(1,length(requiredConfigs)), function(c){
if(is.null(eval(as.symbol(requiredConfigs[c])))){
stop(paste("missing config element", requiredConfigs[c]))
}
})
.Object@app_uuid <- app_uuid
.Object@mauth_base_url <- mauth_base_url
.Object@mauth_api_version <- mauth_api_version
.Object@private_key <- private_key
.Object
})
composeMAuthHeader<-function(RMauthClientObject, method, base_url, route, body="")
{
load_pk<-function()
{
PKI.load.key(what=RMauthClientObject@private_key, format = "PEM", private = T)
}
make_headers<-function(app_uuid, signature, time)
{
list(
'X-MWS-Authentication' = sprintf('MWS %s:%s',app_uuid,signature),
'X-MWS-Time' = time,
'Content-Type' = 'application/json;charset=utf-8')
}
make_request_string<-function(app_uuid, route, http_req_method, message_body, time)
{
s<-sprintf('%s\n%s\n%s\n%s\n%s', http_req_method, route, message_body, app_uuid, time)
sha512(s)
}
generate_padding<-function(hashed_bin, keylength)
{
padding_length=keylength-length(hashed_bin)-3
as.raw(c(0x00,0x01, rep(0xff, padding_length),0x00,hashed_bin))
}
sign_request<-function(request_string, pk)
{
PKI.pencrypt(generate_padding(charToRaw(request_string),256), key=pk)
}
request_time<-as.character(as.integer(Sys.time()))
private_key<-load_pk()
signed_string<-sign_request(make_request_string(RMauthClientObject@app_uuid, route, method, body, request_time),
private_key)
make_headers(RMauthClientObject@app_uuid, base64_encode(signed_string), request_time)
}
makeMAuthCall<-function(RMauthClientObject, method, base_url, route, body="", header_overrides=NULL)
{
mAuthHeader<-composeMAuthHeader(RMauthClientObject, method, base_url, route, body)
if(!is.null(header_overrides) && !is.null(header_overrides$`Content-Type`)){
mAuthHeader$`Content-Type`<-NULL
mAuthHeader<-append(mAuthHeader, header_overrides)
} else if (!is.null(header_overrides)){
mAuthHeader<-append(mAuthHeader, header_overrides)
}
mAuthHeader<-setNames(as.character(mAuthHeader), names(mAuthHeader))
if(method=="GET")
{
GET(paste(base_url,route,sep = ""),
add_headers(.headers = mAuthHeader))
} else if (method=="POST"){
POST(paste(base_url,route,sep = ""),
add_headers(.headers = mAuthHeader),
body=body)
} else {
stop("Not Supported HTTP Verb. Please use only GET or POST.")
}
} | /R/RMauthClient.R | permissive | jthomson/RMauthClient | R | false | false | 3,140 | r | RMauthClient<-setClass(
"RMauthClient",
slots=c(
app_uuid="character",
mauth_base_url="character",
mauth_api_version="character",
private_key="character"
)
)
setMethod("initialize",
"RMauthClient",
function(.Object,
app_uuid=NULL,
mauth_base_url=NULL,
mauth_api_version="v1",
private_key=NULL
)
{
requiredConfigs<- c("app_uuid", "mauth_base_url", "mauth_api_version", "private_key")
lapply(seq(1,length(requiredConfigs)), function(c){
if(is.null(eval(as.symbol(requiredConfigs[c])))){
stop(paste("missing config element", requiredConfigs[c]))
}
})
.Object@app_uuid <- app_uuid
.Object@mauth_base_url <- mauth_base_url
.Object@mauth_api_version <- mauth_api_version
.Object@private_key <- private_key
.Object
})
composeMAuthHeader<-function(RMauthClientObject, method, base_url, route, body="")
{
load_pk<-function()
{
PKI.load.key(what=RMauthClientObject@private_key, format = "PEM", private = T)
}
make_headers<-function(app_uuid, signature, time)
{
list(
'X-MWS-Authentication' = sprintf('MWS %s:%s',app_uuid,signature),
'X-MWS-Time' = time,
'Content-Type' = 'application/json;charset=utf-8')
}
make_request_string<-function(app_uuid, route, http_req_method, message_body, time)
{
s<-sprintf('%s\n%s\n%s\n%s\n%s', http_req_method, route, message_body, app_uuid, time)
sha512(s)
}
generate_padding<-function(hashed_bin, keylength)
{
padding_length=keylength-length(hashed_bin)-3
as.raw(c(0x00,0x01, rep(0xff, padding_length),0x00,hashed_bin))
}
sign_request<-function(request_string, pk)
{
PKI.pencrypt(generate_padding(charToRaw(request_string),256), key=pk)
}
request_time<-as.character(as.integer(Sys.time()))
private_key<-load_pk()
signed_string<-sign_request(make_request_string(RMauthClientObject@app_uuid, route, method, body, request_time),
private_key)
make_headers(RMauthClientObject@app_uuid, base64_encode(signed_string), request_time)
}
makeMAuthCall<-function(RMauthClientObject, method, base_url, route, body="", header_overrides=NULL)
{
mAuthHeader<-composeMAuthHeader(RMauthClientObject, method, base_url, route, body)
if(!is.null(header_overrides) && !is.null(header_overrides$`Content-Type`)){
mAuthHeader$`Content-Type`<-NULL
mAuthHeader<-append(mAuthHeader, header_overrides)
} else if (!is.null(header_overrides)){
mAuthHeader<-append(mAuthHeader, header_overrides)
}
mAuthHeader<-setNames(as.character(mAuthHeader), names(mAuthHeader))
if(method=="GET")
{
GET(paste(base_url,route,sep = ""),
add_headers(.headers = mAuthHeader))
} else if (method=="POST"){
POST(paste(base_url,route,sep = ""),
add_headers(.headers = mAuthHeader),
body=body)
} else {
stop("Not Supported HTTP Verb. Please use only GET or POST.")
}
} |
targets::tar_test("tar_knitr_deps()", {
skip_pandoc()
lines1 <- c(
"---",
"title: report",
"output_format: html_document",
"---",
"",
"```{r}",
"tar_load(data1)",
"tar_read(data2)",
"```"
)
lines2 <- c(
"---",
"title: report",
"output_format: html_document",
"---",
"",
"```{r}",
"tar_load(data2)",
"tar_read(data3)",
"```"
)
report1 <- tempfile()
report2 <- tempfile()
writeLines(lines1, report1)
writeLines(lines2, report2)
out <- tar_knitr_deps(c(report1, report2))
exp <- c("data1", "data2", "data3")
expect_equal(sort(out), sort(exp))
})
| /tests/testthat/test-tar_knitr_deps.R | permissive | fkohrt/tarchetypes | R | false | false | 639 | r | targets::tar_test("tar_knitr_deps()", {
skip_pandoc()
lines1 <- c(
"---",
"title: report",
"output_format: html_document",
"---",
"",
"```{r}",
"tar_load(data1)",
"tar_read(data2)",
"```"
)
lines2 <- c(
"---",
"title: report",
"output_format: html_document",
"---",
"",
"```{r}",
"tar_load(data2)",
"tar_read(data3)",
"```"
)
report1 <- tempfile()
report2 <- tempfile()
writeLines(lines1, report1)
writeLines(lines2, report2)
out <- tar_knitr_deps(c(report1, report2))
exp <- c("data1", "data2", "data3")
expect_equal(sort(out), sort(exp))
})
|
################################################
# classes defined in the cplm package
################################################
# virtual classes used in other class definitions
setClassUnion("NullNum", c("NULL","numeric"))
setClassUnion("NullList", c("NULL","list"))
setClassUnion("NullFunc", c("NULL","function"))
setClassUnion("ListFrame", c("list","data.frame"))
# import from package coda
setOldClass(c("mcmc", "mcmc.list", "summary.mcmc"))
## -------------------- lmer-related Classes --------------------------------
setOldClass("data.frame")
setOldClass("family")
setOldClass("logLik")
setClass("mer",
representation(## original data
env = "environment",# evaluation env for nonlinear model
nlmodel = "call",# nonlinear model call
frame = "data.frame",# model frame (or empty frame)
call = "call", # matched call
flist = "data.frame", # list of grouping factors
X = "matrix", # fixed effects model matrix
Xst = "dgCMatrix", # sparse fixed effects model matrix
Zt = "dgCMatrix",# sparse form of Z'
pWt = "numeric",# prior weights,
offset = "numeric", # length 0 -> no offset
y = "numeric", # response vector
###FIXME: Eliminate the cnames slot. Put the names on the elements of the ST slot.
# cnames = "list", # row/column names of els of ST
Gp = "integer", # pointers to row groups of Zt
dims = "integer",# dimensions and indicators
## slots that vary during optimization
ST = "list", #
V = "matrix", # gradient matrix
A = "dgCMatrix", # (ZTS)'
Cm = "dgCMatrix", # AH'G^{-1}W^{1/2} when s > 0
Cx = "numeric", # x slot of Cm when s == 1 (full Cm not stored)
L = "CHMfactor", # Cholesky factor of weighted P(AA' + I)P'
deviance = "numeric", # ML and REML deviance and components
fixef = "numeric",# fixed effects (length p)
ranef = "numeric",# random effects (length q)
u = "numeric", # orthogonal random effects (q)
eta = "numeric", # unbounded predictor
mu = "numeric", # fitted values at current beta and b
muEta = "numeric",# d mu/d eta evaluated at current eta
var = "numeric", # conditional variances of Y
resid = "numeric",# raw residuals at current beta and b
sqrtXWt = "matrix",# sqrt of model matrix row weights
sqrtrWt = "numeric",# sqrt of weights used with residuals
RZX = "matrix", # dense sol. to L RZX = ST'ZtX = AX
RX = "matrix", # Cholesky factor of downdated X'X
ghx = "numeric", # zeros of Hermite polynomial
ghw = "numeric"))
## -------------------- End lmer-related Classes --------------------------------
# class defining slots common to all derived classes
setClass("cplm",
representation(
call = "call",
formula = "formula",
contrasts = "NullList",
link.power = "numeric",
model.frame = "ListFrame",
inits = "NullList")
)
# class of "cpglm", returned by a call to "cpglm"
setClass("cpglm",
representation(
coefficients = "numeric",
residuals = "numeric",
fitted.values = "numeric",
linear.predictors = "numeric",
y = "numeric",
offset = "NullNum",
prior.weights = "NullNum",
weights = "numeric",
df.residual = "integer",
deviance = "numeric",
aic = "numeric",
control = "list",
p = "numeric",
phi = "numeric",
iter = "integer",
converged = "logical",
na.action = "NullFunc",
vcov = "matrix"),
contains = "cplm"
)
# class of "cpglm", returned by a call to "cpglm"
setClass("zcpglm",
representation(
coefficients = "list",
residuals = "numeric",
fitted.values = "numeric",
y = "numeric",
offset = "list",
prior.weights = "numeric",
df.residual = "integer",
llik = "numeric",
control = "list",
p = "numeric",
phi ="numeric",
converged = "logical",
na.action = "NullFunc",
vcov = "matrix"),
contains = "cplm"
)
# class "cpglmm" returned from a call of cpglmm
setClass("cpglmm",
representation(
p = "numeric",
phi = "numeric",
bound.p = "numeric",
vcov = "matrix",
smooths = "list"),
contains = c("cplm", "mer")
)
# class "summary.cpglmm"
setClass("summary.cpglmm",
representation(
methTitle = "character",
logLik= "logLik",
ngrps = "integer",
sigma = "numeric", # scale, non-negative number
coefs = "matrix",
REmat = "matrix",
AICtab= "data.frame"),
contains = "cpglmm"
)
# class "bcplm_input"
setClass("bcplm_input",
representation(
X = "matrix",
y = "numeric",
Zt = "dgCMatrix",
ygt0 = "integer",
offset = "numeric",
pWt = "numeric",
mu = "numeric",
eta = "numeric",
inits = "list",
fixef = "numeric",
u = "numeric",
phi = "numeric",
p = "numeric",
link.power = "numeric",
pbeta.mean = "numeric",
pbeta.var = "numeric",
bound.phi = "numeric",
bound.p = "numeric",
mh.sd = "numeric",
dims = "integer",
k = "integer",
Sigma = "list",
cllik = "numeric",
Xb = "numeric",
Zu = "numeric",
Gp = "integer",
ncol = "integer",
nlev = "integer",
accept = "numeric")
)
# class of "bcplm"
setClass("bcplm",
representation(
dims = "integer",
sims.list = "mcmc.list",
summary = "summary.mcmc",
prop.sd = "list",
Zt = "dgCMatrix",
flist = "list",
Sigma = "list"),
contains="cplm"
)
################################################
# methods defined for cplm
################################################
# extraction of slots using $
setMethod("$",
signature(x = "cplm"),
function(x, name) slot(x,name)
)
# names to get slot names
setMethod("names",
signature(x = "cplm"),
function(x) slotNames(x)
)
# extraction of slots using "[["
setMethod("[[",
signature(x = "cplm", i = "numeric", j = "missing"),
function (x, i, j, ...) slot(x,names(x)[i])
)
setMethod("[[",
signature(x = "cplm", i = "character", j = "missing"),
function (x, i, j, ...) slot(x, i)
)
setMethod("[",
signature(x = "cplm", i = "numeric",
j = "missing", drop = "missing"),
function (x, i, j, ..., drop) {
output <- lapply(i, function(y) slot(x, names(x)[y]))
names(output) <- names(x)[i]
return(output)
}
)
setMethod("[",
signature(x = "cplm",i = "character",
j = "missing", drop = "missing"),
function (x, i, j, ..., drop) {
output <- lapply(1:length(i), function(y) slot(x, i[y]))
names(output) <- i
return(output)
}
)
setMethod("terms",
signature(x = "cplm"),
function (x, ...) attr(x@model.frame, "terms")
)
setMethod("model.matrix",
signature(object = "cplm"),
function (object,...)
model.matrix(attr(object@model.frame, "terms"),
object@model.frame, object@contrasts)
)
setMethod("formula",
signature(x = "cplm"),
function (x, ...) x@formula
)
setMethod("show",
signature(object = "cplm"),
function(object) summary(object)
)
setMethod("vcov",
signature(object = "cplm"),
function(object, ...) object@vcov
)
model.frame.cplm <- function (formula, ...)
{
formula@model.frame
}
################################################
# methods defined for cpglm
################################################
setMethod("coef",
signature(object = "cpglm"),
function (object, ...) object@coefficients
)
setMethod("residuals",
signature(object = "cpglm"),
function (object, type = c("deviance", "pearson", "working",
"response", "partial"), ...) {
type <- match.arg(type)
y <- object@y
r <- object@residuals
mu <- object@fitted.values
wts <- object@prior.weights
family <- tweedie(var.power = object@p,link.power = object@link.power)
switch(type, deviance = , pearson = , response = if (is.null(y)) {
eta <- object@linear.predictors
y <- mu + r * family$mu.eta(eta)
})
res <- switch(type,
deviance = if (object@df.residual > 0) {
d.res <- sqrt(pmax((family$dev.resids)(y, mu,
wts), 0))
ifelse(y > mu, d.res, -d.res)
} else rep.int(0, length(mu)),
pearson = (y - mu) * sqrt(wts)/sqrt(family$variance(mu)),
working = r,
response = y - mu,
partial = r)
na.action <- attr(object@model.frame,"na.action")
if (!is.null(na.action))
res <- naresid(na.action, res)
#if (type == "partial")
# res <- res + predict(object, type = "terms")
res
}
)
setMethod("resid",
signature(object = "cpglm"),
function (object, type = c("deviance", "pearson", "working",
"response", "partial"), ...)
return(residuals(object, type = type))
)
# generate fitted values on the original scale
setMethod("fitted",
signature(object = "cpglm"),
function (object, ...) object@fitted.values
)
setMethod("AIC",
signature(object = "cpglm",k = "missing" ),
function (object, ..., k) object@aic
)
setMethod("deviance",
signature(object = "cpglm"),
function (object, ...) object@deviance
)
setMethod("summary", signature(object = "cpglm"),
function(object,...){
coef.beta <- coef(object)
vc <- vcov(object)
s.err <- sqrt(diag(vc))
err.beta <- s.err
test.value <- coef.beta / err.beta
dn <- c("Estimate", "Std. Error")
pvalue <- 2 * pt(-abs(test.value), object@df.residual)
coef.table <- cbind(coef.beta, err.beta, test.value, pvalue)
dn2 <- c("t value", "Pr(>|t|)")
dimnames(coef.table) <- list(names(coef.beta), c(dn, dn2))
keep <- match(c("call", "deviance", "aic", "contrasts", "df.residual",
"iter","na.action"), names(object), 0L)
ans <- c(object[keep], list(deviance.resid = residuals(object,
type = "deviance"), coefficients = coef.table,
dispersion = object@phi, vcov = vc, p = object@p))
.print.cpglm.summary(ans)
}
)
.print.cpglm.summary<-function(x,digits = max(3, getOption("digits") - 3),
signif.stars = getOption("show.signif.stars"), ...){
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat("Deviance Residuals: \n")
if (x$df.residual > 5) {
x$deviance.resid <- quantile(x$deviance.resid, na.rm = TRUE)
names(x$deviance.resid) <- c("Min", "1Q", "Median", "3Q",
"Max")
}
xx <- zapsmall(x$deviance.resid, digits + 1)
print.default(xx, digits = digits, na.print = "", print.gap = 2)
printCoefmat(x$coefficients, digits = digits, signif.stars = signif.stars,
na.print = "NA",...)
cat("\nEstimated dispersion parameter:",
format(x$dispersion, digits = max(5, digits + 1)))
cat("\nEstimated index parameter:",
format(x$p, digits = max(5, digits + 1)),"\n\n")
cat("Residual deviance:", format(x$deviance, digits = max(5, digits + 1)),
" on", format(x$df.residual), " degrees of freedom\n")
if (nzchar(mess <- naprint(x$na.action)))
cat(" (", mess, ")\n", sep = "")
cat("AIC: ", format(x$aic, digits = max(4, digits + 1)), "\n\n")
cat("Number of Fisher Scoring iterations: ", x$iter, "\n")
cat("\n")
invisible(x)
}
# simple prediction method for cpglm
setMethod("predict", signature(object = "cpglm"),
function (object, newdata, type = c("response", "link"),
na.action = na.pass, ...) {
tt <- attr(object@model.frame, "terms")
if (missing(newdata) || is.null(newdata)) {
X <- model.matrix(object)
offset <- object$offset
}
else {
Terms <- delete.response(tt)
xlevels <- .getXlevels(Terms, object@model.frame)
m <- model.frame(Terms, newdata, na.action = na.action, xlev = xlevels)
X <- model.matrix(Terms, m, contrasts.arg = object$contrasts)
offset <- rep(0, nrow(X))
if (!is.null(off.num <- attr(tt, "offset")))
for (i in off.num) offset <- offset + eval(attr(tt,
"variables")[[i + 1]], newdata)
if (!is.null(object$call$offset))
offset <- offset + eval(object$call$offset, newdata)
}
beta <- object$coefficients
na.ps <- which(is.na(beta))
if (length(na.ps)) {
predictor <- X[, -na.ps, drop = FALSE] %*% beta[-na.ps]
warning("prediction from a rank-deficient fit may be misleading")
} else {
predictor <- X%*% beta
}
if (!is.null(offset))
predictor <- predictor + offset
mu <- tweedie(link.power = object@link.power)$linkinv(predictor)
type <- match.arg(type)
switch(type,link = predictor, response = mu)
})
################################################
# methods defined for cpglmm
################################################
setMethod("vcov", signature(object = "cpglmm"),
function(object, ...){
rr <- object$phi * chol2inv(object@RX, size = object@dims['p'])
nms <- colnames(object@X)
dimnames(rr) <- list(nms, nms)
if (FALSE){
# compute vcov for phi and p numerically
cpglmm_dev <- function(x, ...){
parm <- c(.Call("cpglmm_ST_getPars", object),
object$fixef, log(x[1]), x[2])
.Call("cpglmm_update_dev", object, parm)
}
x <- c(object$phi, object$p)
hs <- hess(x, cpglmm_dev)
dimnames(hs) <- list(c("phi", "p"), c("phi", "p"))
attr(rr,"phi_p") <- solve(hs)
}
rr
})
setGeneric("VarCorr", function(x, ...) standardGeneric("VarCorr"))
setMethod("VarCorr", signature(x = "cpglmm"),
function(x, ...){
sc <- sqrt(x@phi)
ans <- lapply(cc <- .Call("cpglmm_ST_chol", x),
function(ch) {
val <- crossprod(sc * ch) # variance-covariance
stddev <- sqrt(diag(val))
correl <- t(val / stddev)/stddev
diag(correl) <- 1
attr(val, "stddev") <- stddev
attr(val, "correlation") <- correl
val
})
fl <- x@flist
names(ans) <- names(fl)[attr(fl, "assign")]
attr(ans, "sc") <- sc
ans
})
setMethod("logLik", signature(object="cpglmm"),
function(object, REML = NULL, ...)
### Extract the log-likelihood or restricted log-likelihood
{
dims <- object@dims
if (is.null(REML) || is.na(REML[1]))
REML <- dims[["REML"]]
val <- -object@deviance["ML"]/2
attr(val, "nall") <- attr(val, "nobs") <- dims[["n"]]
attr(val, "df") <-
dims[["p"]] + dims[["np"]] + as.logical(dims[["useSc"]])
attr(val, "REML") <- as.logical(REML)
class(val) <- "logLik"
val
})
setMethod("summary", signature(object = "cpglmm"),
function(object, ...){
fcoef <- fixef(object)
vcov <- object@vcov
dims <- object@dims
coefs <- cbind("Estimate" = fcoef, "Std. Error" = sqrt(diag(vcov)) )
llik <- logLik(object)
dev <- object@deviance
mType <- "LMM"
mName <- "Compound Poisson linear"
method <- paste("the", if(dims[["nAGQ"]] == 1) "Laplace" else
"adaptive Gaussian Hermite","approximation")
AICframe <- data.frame(AIC = AIC(llik), BIC = BIC(llik),
logLik = as.vector(llik),
deviance = dev[["ML"]],
row.names = "")
varcor <- VarCorr(object)
REmat <- formatVC(varcor)
if (is.na(attr(varcor, "sc")))
REmat <- REmat[-nrow(REmat), , drop = FALSE]
if (nrow(coefs) > 0) {
if (!dims[["useSc"]]) {
coefs <- coefs[, 1:2, drop = FALSE]
stat <- coefs[,1]/coefs[,2]
pval <- 2*pnorm(abs(stat), lower.tail = FALSE)
coefs <- cbind(coefs, "z value" = stat, "Pr(>|z|)" = pval)
} else {
stat <- coefs[,1]/coefs[,2]
##pval <- 2*pt(abs(stat), coefs[,3], lower = FALSE)
coefs <- cbind(coefs, "t value" = stat) #, "Pr(>|t|)" = pval)
}
}
new("summary.cpglmm", object,
methTitle = paste(mName, "mixed model fit by", method),
logLik = llik,
ngrps = sapply(object@flist, function(x) length(levels(x))),
sigma = sqrt(object@phi),
coefs = coefs,
REmat = REmat,
AICtab = AICframe)
}
)
## This is modeled a bit after print.summary.lm :
print.cpglmm <- function(x, digits = max(3, getOption("digits") - 3),
correlation = FALSE, symbolic.cor = FALSE,
signif.stars = getOption("show.signif.stars"), ...){
so <- summary(x)
llik <- so@logLik
dev <- so@deviance
dims <- x@dims
cat(so@methTitle, "\n")
if (!is.null(x@call$formula))
cat("Formula:", deparse(x@call$formula),"\n")
if (!is.null(x@call$data))
cat(" Data:", deparse(x@call$data), "\n")
if (!is.null(x@call$subset))
cat(" Subset:", deparse(x@call$subset),"\n")
print(so@AICtab, digits = digits)
cat("Random effects:\n")
print(so@REmat, quote = FALSE, digits = digits, ...)
ngrps <- so@ngrps
cat(sprintf("Number of obs: %d, groups: ", dims[["n"]]))
cat(paste(paste(names(ngrps), ngrps, sep = ", "), collapse = "; "))
cat("\n")
if (nrow(so@coefs) > 0) {
cat("\nFixed effects:\n")
printCoefmat(so@coefs, zap.ind = 3, #, tst.ind = 4
digits = digits, signif.stars = signif.stars)
cat("\nEstimated dispersion parameter:", round(so@phi, digits=digits))
cat("\n")
cat("Estimated index parameter:", round(so@p, digits=digits))
cat("\n")
if(correlation) {
corF <- so@vcov@factors$correlation
if (!is.null(corF)) {
p <- ncol(corF)
if (p > 1) {
rn <- rownames(so@coefs)
rns <- abbreviate(rn, minlength=11)
cat("\nCorrelation of Fixed Effects:\n")
if (is.logical(symbolic.cor) && symbolic.cor) {
corf <- as(corF, "matrix")
dimnames(corf) <- list(rns,
abbreviate(rn, minlength=1, strict=TRUE))
print(symnum(corf))
} else {
corf <- matrix(format(round(corF@x, 3), nsmall = 3),
ncol = p,dimnames = list(rns, abbreviate(rn, minlength=6)))
corf[!lower.tri(corf)] <- ""
print(corf[-1, -p, drop=FALSE], quote = FALSE)
}
}
}
}
}
invisible(x)
}
setMethod("print", "cpglmm", print.cpglmm)
setMethod("show", "cpglmm",
function(object) print.cpglmm(object)
)
# predict method for cpglmm
getZt <- function(formula, oldmf, newmf){
bars <- expandSlash(findbars(formula[[3]]))
names(bars) <- unlist(lapply(bars, function(x) deparse(x[[3]])))
fl <- lapply(bars, function(x) {
oldlvl <- eval(substitute(levels(as.factor(fac)[, drop = TRUE]),
list(fac = x[[3]])), oldmf)
ff <- eval(substitute(factor(fac,levels = oldlvl)[, drop = TRUE],
list(fac = x[[3]])), newmf)
# fill columns of 0's if some levels are missing
im <- as(ff, "sparseMatrix")
im2 <- Matrix(0, nrow = length(oldlvl), ncol = length(ff), sparse = TRUE)
# this is awkward as the Matrix package seems to fail
for (i in 1:nrow(im)){
ind <- match(rownames(im)[i], oldlvl)
im2[as.numeric(ind), ] <- im[as.numeric(i), ]
}
if (!isTRUE(validObject(im, test = TRUE)))
stop("invalid conditioning factor in random effect: ",
format(x[[3]]))
mm <- model.matrix(eval(substitute(~expr, list(expr = x[[2]]))), newmf)
mm <- mm[!is.na(ff), , drop = F]
Zt <- do.call(rBind, lapply(seq_len(ncol(mm)),
function(j) {
im2@x <- mm[, j]
im2
}))
ans <- list(f = oldlvl, Zt = Zt)
ans
})
nlev <- sapply(fl, function(el) length(levels(el$f)))
if (any(diff(nlev)) > 0)
fl <- fl[rev(order(nlev))]
Zt <- do.call(rBind, lapply(fl, "[[", "Zt"))
Zt
}
setMethod("predict", signature(object = "cpglmm"),
function(object, newdata, type = c("response", "link"),
na.action = na.pass, ...) {
tt <- attr(object@model.frame,"terms")
if (missing(newdata) || is.null(newdata)) {
mm <- X <- model.matrix(object)
Zt <- object@Zt
offset <- object$offset
}
else {
#FIXME: should I use xlev ???
Terms <- delete.response(tt)
# design matrix for fixed effects
X <- model.matrix(Terms, newdata, contrasts.arg = object@contrasts)
# design matrix for random effects
formula <- object@formula
oldmf <- object@model.frame
Zt <- getZt(formula, oldmf, newdata)
# get offset
offset <- rep(0, nrow(X))
if (!is.null(off.num <- attr(tt, "offset")))
for (i in off.num) offset <- offset + eval(attr(tt,
"variables")[[i + 1]], newdata)
if (!is.null(object$call$offset))
offset <- offset + eval(object$call$offset, newdata)
}
beta <- object@fixef
u <- object@ranef
predictor <- as.numeric(X %*% beta + t(Zt)%*% u)
if (!is.null(offset))
predictor <- predictor + offset
mu <- tweedie(link.power = object@link.power)$linkinv(predictor)
type <- match.arg(type)
switch(type,link = predictor, response = mu)
})
################################################
# methods defined for bcplm
################################################
# fixed effects
setMethod("fixef", signature = "bcplm",
function(object, type = c("median", "mean"), sd = FALSE,
quantiles = NULL, ...){
type <- match.arg(type)
s <- object@summary
dm <- object@dims
rn <- 1:unname(dm["n.beta"])
mu.beta <- if (type == "median") as.numeric(s[[2]][rn, 3]) else
as.numeric(s[[1]][rn, 1])
names(mu.beta) <- rownames(s[[1]])[rn]
if (sd){
sd.beta <- as.numeric(s[[1]][rn, 2])
attr(mu.beta, "sd") <- sd.beta
}
if (!is.null(quantiles)){
qt <- as.matrix(summary(object$sims.list, quantiles = quantiles)[[2]])
attr(mu.beta, "quantiles") <- qt[rn, , drop = FALSE]
}
return(mu.beta)
}
)
# variance components
setMethod("VarCorr", signature(x = "bcplm"),
function(x, ...){
dm <- x@dims
if (dm["n.u"] == 0)
stop("No random effects in 'VarCorr'!")
ans <- lapply(x@Sigma, function(xx) {
stddev <- sqrt(diag(xx))
correl <- t(xx / stddev)/stddev
diag(correl) <- 1
attr(xx, "stddev") <- stddev
attr(xx, "correlation") <- correl
xx
})
fl <- x@flist
names(ans) <- names(fl)[attr(fl, "assign")]
attr(ans, "sc") <- sqrt(x@summary[[2]][dm["n.beta"] + 1, 3])
ans
}
)
setMethod("show", signature = "bcplm",
function(object)
print.bcplm(object)
)
setMethod("summary", signature = "bcplm",
function(object)
object
)
setMethod("plot", signature(x = "bcplm", y = "missing"),
function(x, y, ...) plot(x@sims.list)
)
# print out (summarize) model results
print.bcplm <- function(x, digits = max(3, getOption("digits") - 3)){
dims <- x@dims
# fixed effects
fcoef <- fixef(x, sd = TRUE, quantiles = c(0.025, 0.975))
coefs <- cbind("Estimate" = fcoef, "Std. Error" = attr(fcoef, "sd"),
"Lower (2.5%)" = attr(fcoef, "quantiles")[, 1],
"Upper (97.5%)" = attr(fcoef, "quantiles")[, 2])
# start printing
cat("Compound Poisson linear models via MCMC\n")
cat(dims["n.chains"], " chains, each with ", dims["n.iter"], " iterations (first ",
dims["n.burnin"], " discarded)", sep = "")
if (dims["n.thin"] > 1)
cat(", n.thin =", dims["n.thin"])
cat("\nn.sims =", dims["n.sims"], "iterations saved\n")
cat("\n")
if (!is.null(x@call$formula))
cat("Formula:", deparse(x@call$formula),"\n")
if (!is.null(x@call$data))
cat(" Data:", deparse(x@call$data), "\n")
if (!is.null(x@call$subset))
cat(" Subset:", deparse(x@call$subset),"\n")
if (dims["n.u"] > 0){
cat("\nRandom and dynamic variance components:\n")
varcor <- VarCorr(x)
REmat <- formatVC(varcor)
if (is.na(attr(varcor, "sc")))
REmat <- REmat[-nrow(REmat), , drop = FALSE]
print(REmat, quote = FALSE, digits = digits)
}
cat(sprintf("Number of obs: %d ", x@dims["n.obs"]))
if (dims["n.u"] > 0){
ngrps <- sapply(x@flist, nlevels)
cat(", groups: ")
cat(paste(paste(names(ngrps), ngrps, sep = ", "), collapse = "; "))
}
cat("\n")
if (nrow(coefs) > 0) {
cat("\nFixed effects:\n")
printCoefmat(coefs, zap.ind = 3, digits = digits)
cat("---")
}
s <- x@summary
phi.ps <- grep("^phi$", rownames(s[[1]]))
p.ps <- grep("^p$", rownames(s[[1]]))
cat("\nEstimated dispersion parameter:",
format(s[[2]][phi.ps, 3], digits = max(5, digits + 1)))
cat("\nEstimated index parameter:",
format(s[[2]][p.ps, 3], digits = max(5, digits + 1)),"\n\n")
out <- list(fixef = coefs,
VarCorr = if (dims["n.u"]) REmat else list())
invisible(out)
}
################################################
# methods defined for zcpglm
################################################
setMethod("coef",
signature(object = "zcpglm"),
function (object, ...) object@coefficients
)
setMethod("residuals",
signature(object = "zcpglm"),
function(object, ...) object@residuals
)
setMethod("resid",
signature(object = "zcpglm"),
function(object, ...) residuals(object)
)
# generate fitted values on the original scale
setMethod("fitted",
signature(object = "zcpglm"),
function (object, ...) object@fitted.values
)
setMethod("summary", signature(object = "zcpglm"),
function(object, ...){
nbz <- length(coef(object)$zero)
nbt <- length(coef(object)$tweedie)
se <- sqrt(diag(vcov(object)))
coef <- unlist(coef(object))
zstat <- coef / se
pval <- 2 * pnorm(-abs(zstat))
coef <- cbind(coef, se, zstat, pval)
colnames(coef) <- c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
rownames(coef) <- c(names(coef(object)$zero), names(coef(object)$tweedie))
coef.table <- list()
coef.table$zero <- coef[1:nbz, , drop = FALSE]
coef.table$tweedie <- coef[(nbz + 1):(nbz + nbt), , drop = FALSE]
keep <- match(c("llik", "contrasts", "df.residual",
"na.action", "vcov"), names(object), 0L)
out <- list(llik = object@llik, contrasts = object@contrasts,
df.residual = object@df.residual, vcov = object @vcov,
na.action = object@na.action, coefficients = coef.table,
call = object@call, phi = object@phi, p = object@p)
.print.zcpglm.summary(out)
}
)
.print.zcpglm.summary<-function(x,digits = max(3, getOption("digits") - 3),
signif.stars = getOption("show.signif.stars"), ...){
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat(paste("Zero-inflation model coefficients:\n"))
printCoefmat(x$coefficients$zero, digits = digits, signif.stars = signif.stars,
na.print = "NA", signif.legend = FALSE)
cat(paste("\nCompound Poisson model coefficients:\n"))
printCoefmat(x$coefficients$tweedie, digits = digits, signif.stars = signif.stars,
na.print = "NA")
cat("\nEstimated dispersion parameter:",
format(x$phi, digits = max(5, digits + 1)))
cat("\nEstimated index parameter:",
format(x$p, digits = max(5, digits + 1)),"\n")
if (nzchar(mess <- naprint(x$na.action)))
cat(" (", mess, ")\n", sep = "")
invisible(x)
}
setMethod("predict", signature(object = "zcpglm"),
function (object, newdata, type = c("response", "zero", "tweedie"),
na.action = na.pass, ...) {
call <- object$call
ttz <- attr(object@model.frame$zero, "terms")
ttt <- attr(object@model.frame$tweedie, "terms")
Termsz <- delete.response(ttz)
Termst <- delete.response(ttt)
xlevz <- .getXlevels(Termsz, object@model.frame$zero)
xlevt <- .getXlevels(Termst, object@model.frame$tweedie)
mz <- model.frame(Termsz, newdata, na.action = na.action, xlev = xlevz)
mt <- model.frame(Termst, newdata, na.action = na.action, xlev = xlevt)
Xz <- model.matrix(Termsz, mz, contrasts.arg = object$contrasts)
Xt <- model.matrix(Termst, mt, contrasts.arg = object$contrasts)
offt <- offz <- rep(0, nrow(Xz))
if (!is.null(off.num <- attr(ttz, "offset")))
for (i in off.num)
offz <- offz + eval(attr(ttz, "variables")[[i + 1]], newdata)
if (!is.null(off.num <- attr(ttt, "offset")))
for (i in off.num)
offt <- offt + eval(attr(ttt, "variables")[[i + 1]], newdata)
if (!is.null(object$call$offset)) {
off <- eval(object$call$offset, newdata)
offz <- offz + off
offt <- offt + off
}
link.power <- make.link.power(object$link.power)
tw <- tweedie(link.power = link.power)
logit <- binomial()
betaz <- object$coefficients$zero
betat <- object$coefficients$tweedie
muz <- logit$linkinv(Xz %*% betaz + offz)
mut <- tw$linkinv(Xt %*% betat + offt)
mu <- as.numeric((1 - muz) * mut)
type <- match.arg(type)
switch(type, response = mu, zero = muz, tweedie = mut)
}) | /R/classMethods.R | no_license | Weekend-Warrior/cplm | R | false | false | 29,917 | r |
################################################
# classes defined in the cplm package
################################################
# virtual classes used in other class definitions
setClassUnion("NullNum", c("NULL","numeric"))
setClassUnion("NullList", c("NULL","list"))
setClassUnion("NullFunc", c("NULL","function"))
setClassUnion("ListFrame", c("list","data.frame"))
# import from package coda
setOldClass(c("mcmc", "mcmc.list", "summary.mcmc"))
## -------------------- lmer-related Classes --------------------------------
setOldClass("data.frame")
setOldClass("family")
setOldClass("logLik")
setClass("mer",
representation(## original data
env = "environment",# evaluation env for nonlinear model
nlmodel = "call",# nonlinear model call
frame = "data.frame",# model frame (or empty frame)
call = "call", # matched call
flist = "data.frame", # list of grouping factors
X = "matrix", # fixed effects model matrix
Xst = "dgCMatrix", # sparse fixed effects model matrix
Zt = "dgCMatrix",# sparse form of Z'
pWt = "numeric",# prior weights,
offset = "numeric", # length 0 -> no offset
y = "numeric", # response vector
###FIXME: Eliminate the cnames slot. Put the names on the elements of the ST slot.
# cnames = "list", # row/column names of els of ST
Gp = "integer", # pointers to row groups of Zt
dims = "integer",# dimensions and indicators
## slots that vary during optimization
ST = "list", #
V = "matrix", # gradient matrix
A = "dgCMatrix", # (ZTS)'
Cm = "dgCMatrix", # AH'G^{-1}W^{1/2} when s > 0
Cx = "numeric", # x slot of Cm when s == 1 (full Cm not stored)
L = "CHMfactor", # Cholesky factor of weighted P(AA' + I)P'
deviance = "numeric", # ML and REML deviance and components
fixef = "numeric",# fixed effects (length p)
ranef = "numeric",# random effects (length q)
u = "numeric", # orthogonal random effects (q)
eta = "numeric", # unbounded predictor
mu = "numeric", # fitted values at current beta and b
muEta = "numeric",# d mu/d eta evaluated at current eta
var = "numeric", # conditional variances of Y
resid = "numeric",# raw residuals at current beta and b
sqrtXWt = "matrix",# sqrt of model matrix row weights
sqrtrWt = "numeric",# sqrt of weights used with residuals
RZX = "matrix", # dense sol. to L RZX = ST'ZtX = AX
RX = "matrix", # Cholesky factor of downdated X'X
ghx = "numeric", # zeros of Hermite polynomial
ghw = "numeric"))
## -------------------- End lmer-related Classes --------------------------------
# class defining slots common to all derived classes
setClass("cplm",
representation(
call = "call",
formula = "formula",
contrasts = "NullList",
link.power = "numeric",
model.frame = "ListFrame",
inits = "NullList")
)
# class of "cpglm", returned by a call to "cpglm"
setClass("cpglm",
representation(
coefficients = "numeric",
residuals = "numeric",
fitted.values = "numeric",
linear.predictors = "numeric",
y = "numeric",
offset = "NullNum",
prior.weights = "NullNum",
weights = "numeric",
df.residual = "integer",
deviance = "numeric",
aic = "numeric",
control = "list",
p = "numeric",
phi = "numeric",
iter = "integer",
converged = "logical",
na.action = "NullFunc",
vcov = "matrix"),
contains = "cplm"
)
# class of "cpglm", returned by a call to "cpglm"
setClass("zcpglm",
representation(
coefficients = "list",
residuals = "numeric",
fitted.values = "numeric",
y = "numeric",
offset = "list",
prior.weights = "numeric",
df.residual = "integer",
llik = "numeric",
control = "list",
p = "numeric",
phi ="numeric",
converged = "logical",
na.action = "NullFunc",
vcov = "matrix"),
contains = "cplm"
)
# class "cpglmm" returned from a call of cpglmm
setClass("cpglmm",
representation(
p = "numeric",
phi = "numeric",
bound.p = "numeric",
vcov = "matrix",
smooths = "list"),
contains = c("cplm", "mer")
)
# class "summary.cpglmm"
setClass("summary.cpglmm",
representation(
methTitle = "character",
logLik= "logLik",
ngrps = "integer",
sigma = "numeric", # scale, non-negative number
coefs = "matrix",
REmat = "matrix",
AICtab= "data.frame"),
contains = "cpglmm"
)
# class "bcplm_input"
setClass("bcplm_input",
representation(
X = "matrix",
y = "numeric",
Zt = "dgCMatrix",
ygt0 = "integer",
offset = "numeric",
pWt = "numeric",
mu = "numeric",
eta = "numeric",
inits = "list",
fixef = "numeric",
u = "numeric",
phi = "numeric",
p = "numeric",
link.power = "numeric",
pbeta.mean = "numeric",
pbeta.var = "numeric",
bound.phi = "numeric",
bound.p = "numeric",
mh.sd = "numeric",
dims = "integer",
k = "integer",
Sigma = "list",
cllik = "numeric",
Xb = "numeric",
Zu = "numeric",
Gp = "integer",
ncol = "integer",
nlev = "integer",
accept = "numeric")
)
# class of "bcplm"
setClass("bcplm",
representation(
dims = "integer",
sims.list = "mcmc.list",
summary = "summary.mcmc",
prop.sd = "list",
Zt = "dgCMatrix",
flist = "list",
Sigma = "list"),
contains="cplm"
)
################################################
# methods defined for cplm
################################################
# extraction of slots using $
setMethod("$",
signature(x = "cplm"),
function(x, name) slot(x,name)
)
# names to get slot names
setMethod("names",
signature(x = "cplm"),
function(x) slotNames(x)
)
# extraction of slots using "[["
setMethod("[[",
signature(x = "cplm", i = "numeric", j = "missing"),
function (x, i, j, ...) slot(x,names(x)[i])
)
setMethod("[[",
signature(x = "cplm", i = "character", j = "missing"),
function (x, i, j, ...) slot(x, i)
)
setMethod("[",
signature(x = "cplm", i = "numeric",
j = "missing", drop = "missing"),
function (x, i, j, ..., drop) {
output <- lapply(i, function(y) slot(x, names(x)[y]))
names(output) <- names(x)[i]
return(output)
}
)
setMethod("[",
signature(x = "cplm",i = "character",
j = "missing", drop = "missing"),
function (x, i, j, ..., drop) {
output <- lapply(1:length(i), function(y) slot(x, i[y]))
names(output) <- i
return(output)
}
)
setMethod("terms",
signature(x = "cplm"),
function (x, ...) attr(x@model.frame, "terms")
)
setMethod("model.matrix",
signature(object = "cplm"),
function (object,...)
model.matrix(attr(object@model.frame, "terms"),
object@model.frame, object@contrasts)
)
setMethod("formula",
signature(x = "cplm"),
function (x, ...) x@formula
)
setMethod("show",
signature(object = "cplm"),
function(object) summary(object)
)
setMethod("vcov",
signature(object = "cplm"),
function(object, ...) object@vcov
)
model.frame.cplm <- function (formula, ...)
{
formula@model.frame
}
################################################
# methods defined for cpglm
################################################
setMethod("coef",
signature(object = "cpglm"),
function (object, ...) object@coefficients
)
setMethod("residuals",
signature(object = "cpglm"),
function (object, type = c("deviance", "pearson", "working",
"response", "partial"), ...) {
type <- match.arg(type)
y <- object@y
r <- object@residuals
mu <- object@fitted.values
wts <- object@prior.weights
family <- tweedie(var.power = object@p,link.power = object@link.power)
switch(type, deviance = , pearson = , response = if (is.null(y)) {
eta <- object@linear.predictors
y <- mu + r * family$mu.eta(eta)
})
res <- switch(type,
deviance = if (object@df.residual > 0) {
d.res <- sqrt(pmax((family$dev.resids)(y, mu,
wts), 0))
ifelse(y > mu, d.res, -d.res)
} else rep.int(0, length(mu)),
pearson = (y - mu) * sqrt(wts)/sqrt(family$variance(mu)),
working = r,
response = y - mu,
partial = r)
na.action <- attr(object@model.frame,"na.action")
if (!is.null(na.action))
res <- naresid(na.action, res)
#if (type == "partial")
# res <- res + predict(object, type = "terms")
res
}
)
setMethod("resid",
signature(object = "cpglm"),
function (object, type = c("deviance", "pearson", "working",
"response", "partial"), ...)
return(residuals(object, type = type))
)
# generate fitted values on the original scale
setMethod("fitted",
signature(object = "cpglm"),
function (object, ...) object@fitted.values
)
setMethod("AIC",
signature(object = "cpglm",k = "missing" ),
function (object, ..., k) object@aic
)
setMethod("deviance",
signature(object = "cpglm"),
function (object, ...) object@deviance
)
setMethod("summary", signature(object = "cpglm"),
function(object,...){
coef.beta <- coef(object)
vc <- vcov(object)
s.err <- sqrt(diag(vc))
err.beta <- s.err
test.value <- coef.beta / err.beta
dn <- c("Estimate", "Std. Error")
pvalue <- 2 * pt(-abs(test.value), object@df.residual)
coef.table <- cbind(coef.beta, err.beta, test.value, pvalue)
dn2 <- c("t value", "Pr(>|t|)")
dimnames(coef.table) <- list(names(coef.beta), c(dn, dn2))
keep <- match(c("call", "deviance", "aic", "contrasts", "df.residual",
"iter","na.action"), names(object), 0L)
ans <- c(object[keep], list(deviance.resid = residuals(object,
type = "deviance"), coefficients = coef.table,
dispersion = object@phi, vcov = vc, p = object@p))
.print.cpglm.summary(ans)
}
)
.print.cpglm.summary<-function(x,digits = max(3, getOption("digits") - 3),
signif.stars = getOption("show.signif.stars"), ...){
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat("Deviance Residuals: \n")
if (x$df.residual > 5) {
x$deviance.resid <- quantile(x$deviance.resid, na.rm = TRUE)
names(x$deviance.resid) <- c("Min", "1Q", "Median", "3Q",
"Max")
}
xx <- zapsmall(x$deviance.resid, digits + 1)
print.default(xx, digits = digits, na.print = "", print.gap = 2)
printCoefmat(x$coefficients, digits = digits, signif.stars = signif.stars,
na.print = "NA",...)
cat("\nEstimated dispersion parameter:",
format(x$dispersion, digits = max(5, digits + 1)))
cat("\nEstimated index parameter:",
format(x$p, digits = max(5, digits + 1)),"\n\n")
cat("Residual deviance:", format(x$deviance, digits = max(5, digits + 1)),
" on", format(x$df.residual), " degrees of freedom\n")
if (nzchar(mess <- naprint(x$na.action)))
cat(" (", mess, ")\n", sep = "")
cat("AIC: ", format(x$aic, digits = max(4, digits + 1)), "\n\n")
cat("Number of Fisher Scoring iterations: ", x$iter, "\n")
cat("\n")
invisible(x)
}
# simple prediction method for cpglm
setMethod("predict", signature(object = "cpglm"),
function (object, newdata, type = c("response", "link"),
na.action = na.pass, ...) {
tt <- attr(object@model.frame, "terms")
if (missing(newdata) || is.null(newdata)) {
X <- model.matrix(object)
offset <- object$offset
}
else {
Terms <- delete.response(tt)
xlevels <- .getXlevels(Terms, object@model.frame)
m <- model.frame(Terms, newdata, na.action = na.action, xlev = xlevels)
X <- model.matrix(Terms, m, contrasts.arg = object$contrasts)
offset <- rep(0, nrow(X))
if (!is.null(off.num <- attr(tt, "offset")))
for (i in off.num) offset <- offset + eval(attr(tt,
"variables")[[i + 1]], newdata)
if (!is.null(object$call$offset))
offset <- offset + eval(object$call$offset, newdata)
}
beta <- object$coefficients
na.ps <- which(is.na(beta))
if (length(na.ps)) {
predictor <- X[, -na.ps, drop = FALSE] %*% beta[-na.ps]
warning("prediction from a rank-deficient fit may be misleading")
} else {
predictor <- X%*% beta
}
if (!is.null(offset))
predictor <- predictor + offset
mu <- tweedie(link.power = object@link.power)$linkinv(predictor)
type <- match.arg(type)
switch(type,link = predictor, response = mu)
})
################################################
# methods defined for cpglmm
################################################
setMethod("vcov", signature(object = "cpglmm"),
function(object, ...){
rr <- object$phi * chol2inv(object@RX, size = object@dims['p'])
nms <- colnames(object@X)
dimnames(rr) <- list(nms, nms)
if (FALSE){
# compute vcov for phi and p numerically
cpglmm_dev <- function(x, ...){
parm <- c(.Call("cpglmm_ST_getPars", object),
object$fixef, log(x[1]), x[2])
.Call("cpglmm_update_dev", object, parm)
}
x <- c(object$phi, object$p)
hs <- hess(x, cpglmm_dev)
dimnames(hs) <- list(c("phi", "p"), c("phi", "p"))
attr(rr,"phi_p") <- solve(hs)
}
rr
})
setGeneric("VarCorr", function(x, ...) standardGeneric("VarCorr"))
setMethod("VarCorr", signature(x = "cpglmm"),
function(x, ...){
sc <- sqrt(x@phi)
ans <- lapply(cc <- .Call("cpglmm_ST_chol", x),
function(ch) {
val <- crossprod(sc * ch) # variance-covariance
stddev <- sqrt(diag(val))
correl <- t(val / stddev)/stddev
diag(correl) <- 1
attr(val, "stddev") <- stddev
attr(val, "correlation") <- correl
val
})
fl <- x@flist
names(ans) <- names(fl)[attr(fl, "assign")]
attr(ans, "sc") <- sc
ans
})
setMethod("logLik", signature(object="cpglmm"),
function(object, REML = NULL, ...)
### Extract the log-likelihood or restricted log-likelihood
{
dims <- object@dims
if (is.null(REML) || is.na(REML[1]))
REML <- dims[["REML"]]
val <- -object@deviance["ML"]/2
attr(val, "nall") <- attr(val, "nobs") <- dims[["n"]]
attr(val, "df") <-
dims[["p"]] + dims[["np"]] + as.logical(dims[["useSc"]])
attr(val, "REML") <- as.logical(REML)
class(val) <- "logLik"
val
})
setMethod("summary", signature(object = "cpglmm"),
function(object, ...){
fcoef <- fixef(object)
vcov <- object@vcov
dims <- object@dims
coefs <- cbind("Estimate" = fcoef, "Std. Error" = sqrt(diag(vcov)) )
llik <- logLik(object)
dev <- object@deviance
mType <- "LMM"
mName <- "Compound Poisson linear"
method <- paste("the", if(dims[["nAGQ"]] == 1) "Laplace" else
"adaptive Gaussian Hermite","approximation")
AICframe <- data.frame(AIC = AIC(llik), BIC = BIC(llik),
logLik = as.vector(llik),
deviance = dev[["ML"]],
row.names = "")
varcor <- VarCorr(object)
REmat <- formatVC(varcor)
if (is.na(attr(varcor, "sc")))
REmat <- REmat[-nrow(REmat), , drop = FALSE]
if (nrow(coefs) > 0) {
if (!dims[["useSc"]]) {
coefs <- coefs[, 1:2, drop = FALSE]
stat <- coefs[,1]/coefs[,2]
pval <- 2*pnorm(abs(stat), lower.tail = FALSE)
coefs <- cbind(coefs, "z value" = stat, "Pr(>|z|)" = pval)
} else {
stat <- coefs[,1]/coefs[,2]
##pval <- 2*pt(abs(stat), coefs[,3], lower = FALSE)
coefs <- cbind(coefs, "t value" = stat) #, "Pr(>|t|)" = pval)
}
}
new("summary.cpglmm", object,
methTitle = paste(mName, "mixed model fit by", method),
logLik = llik,
ngrps = sapply(object@flist, function(x) length(levels(x))),
sigma = sqrt(object@phi),
coefs = coefs,
REmat = REmat,
AICtab = AICframe)
}
)
## This is modeled a bit after print.summary.lm :
print.cpglmm <- function(x, digits = max(3, getOption("digits") - 3),
correlation = FALSE, symbolic.cor = FALSE,
signif.stars = getOption("show.signif.stars"), ...){
so <- summary(x)
llik <- so@logLik
dev <- so@deviance
dims <- x@dims
cat(so@methTitle, "\n")
if (!is.null(x@call$formula))
cat("Formula:", deparse(x@call$formula),"\n")
if (!is.null(x@call$data))
cat(" Data:", deparse(x@call$data), "\n")
if (!is.null(x@call$subset))
cat(" Subset:", deparse(x@call$subset),"\n")
print(so@AICtab, digits = digits)
cat("Random effects:\n")
print(so@REmat, quote = FALSE, digits = digits, ...)
ngrps <- so@ngrps
cat(sprintf("Number of obs: %d, groups: ", dims[["n"]]))
cat(paste(paste(names(ngrps), ngrps, sep = ", "), collapse = "; "))
cat("\n")
if (nrow(so@coefs) > 0) {
cat("\nFixed effects:\n")
printCoefmat(so@coefs, zap.ind = 3, #, tst.ind = 4
digits = digits, signif.stars = signif.stars)
cat("\nEstimated dispersion parameter:", round(so@phi, digits=digits))
cat("\n")
cat("Estimated index parameter:", round(so@p, digits=digits))
cat("\n")
if(correlation) {
corF <- so@vcov@factors$correlation
if (!is.null(corF)) {
p <- ncol(corF)
if (p > 1) {
rn <- rownames(so@coefs)
rns <- abbreviate(rn, minlength=11)
cat("\nCorrelation of Fixed Effects:\n")
if (is.logical(symbolic.cor) && symbolic.cor) {
corf <- as(corF, "matrix")
dimnames(corf) <- list(rns,
abbreviate(rn, minlength=1, strict=TRUE))
print(symnum(corf))
} else {
corf <- matrix(format(round(corF@x, 3), nsmall = 3),
ncol = p,dimnames = list(rns, abbreviate(rn, minlength=6)))
corf[!lower.tri(corf)] <- ""
print(corf[-1, -p, drop=FALSE], quote = FALSE)
}
}
}
}
}
invisible(x)
}
setMethod("print", "cpglmm", print.cpglmm)
setMethod("show", "cpglmm",
function(object) print.cpglmm(object)
)
# predict method for cpglmm
getZt <- function(formula, oldmf, newmf){
bars <- expandSlash(findbars(formula[[3]]))
names(bars) <- unlist(lapply(bars, function(x) deparse(x[[3]])))
fl <- lapply(bars, function(x) {
oldlvl <- eval(substitute(levels(as.factor(fac)[, drop = TRUE]),
list(fac = x[[3]])), oldmf)
ff <- eval(substitute(factor(fac,levels = oldlvl)[, drop = TRUE],
list(fac = x[[3]])), newmf)
# fill columns of 0's if some levels are missing
im <- as(ff, "sparseMatrix")
im2 <- Matrix(0, nrow = length(oldlvl), ncol = length(ff), sparse = TRUE)
# this is awkward as the Matrix package seems to fail
for (i in 1:nrow(im)){
ind <- match(rownames(im)[i], oldlvl)
im2[as.numeric(ind), ] <- im[as.numeric(i), ]
}
if (!isTRUE(validObject(im, test = TRUE)))
stop("invalid conditioning factor in random effect: ",
format(x[[3]]))
mm <- model.matrix(eval(substitute(~expr, list(expr = x[[2]]))), newmf)
mm <- mm[!is.na(ff), , drop = F]
Zt <- do.call(rBind, lapply(seq_len(ncol(mm)),
function(j) {
im2@x <- mm[, j]
im2
}))
ans <- list(f = oldlvl, Zt = Zt)
ans
})
nlev <- sapply(fl, function(el) length(levels(el$f)))
if (any(diff(nlev)) > 0)
fl <- fl[rev(order(nlev))]
Zt <- do.call(rBind, lapply(fl, "[[", "Zt"))
Zt
}
setMethod("predict", signature(object = "cpglmm"),
function(object, newdata, type = c("response", "link"),
na.action = na.pass, ...) {
tt <- attr(object@model.frame,"terms")
if (missing(newdata) || is.null(newdata)) {
mm <- X <- model.matrix(object)
Zt <- object@Zt
offset <- object$offset
}
else {
#FIXME: should I use xlev ???
Terms <- delete.response(tt)
# design matrix for fixed effects
X <- model.matrix(Terms, newdata, contrasts.arg = object@contrasts)
# design matrix for random effects
formula <- object@formula
oldmf <- object@model.frame
Zt <- getZt(formula, oldmf, newdata)
# get offset
offset <- rep(0, nrow(X))
if (!is.null(off.num <- attr(tt, "offset")))
for (i in off.num) offset <- offset + eval(attr(tt,
"variables")[[i + 1]], newdata)
if (!is.null(object$call$offset))
offset <- offset + eval(object$call$offset, newdata)
}
beta <- object@fixef
u <- object@ranef
predictor <- as.numeric(X %*% beta + t(Zt)%*% u)
if (!is.null(offset))
predictor <- predictor + offset
mu <- tweedie(link.power = object@link.power)$linkinv(predictor)
type <- match.arg(type)
switch(type,link = predictor, response = mu)
})
################################################
# methods defined for bcplm
################################################
# fixed effects
setMethod("fixef", signature = "bcplm",
function(object, type = c("median", "mean"), sd = FALSE,
quantiles = NULL, ...){
type <- match.arg(type)
s <- object@summary
dm <- object@dims
rn <- 1:unname(dm["n.beta"])
mu.beta <- if (type == "median") as.numeric(s[[2]][rn, 3]) else
as.numeric(s[[1]][rn, 1])
names(mu.beta) <- rownames(s[[1]])[rn]
if (sd){
sd.beta <- as.numeric(s[[1]][rn, 2])
attr(mu.beta, "sd") <- sd.beta
}
if (!is.null(quantiles)){
qt <- as.matrix(summary(object$sims.list, quantiles = quantiles)[[2]])
attr(mu.beta, "quantiles") <- qt[rn, , drop = FALSE]
}
return(mu.beta)
}
)
# variance components
setMethod("VarCorr", signature(x = "bcplm"),
function(x, ...){
dm <- x@dims
if (dm["n.u"] == 0)
stop("No random effects in 'VarCorr'!")
ans <- lapply(x@Sigma, function(xx) {
stddev <- sqrt(diag(xx))
correl <- t(xx / stddev)/stddev
diag(correl) <- 1
attr(xx, "stddev") <- stddev
attr(xx, "correlation") <- correl
xx
})
fl <- x@flist
names(ans) <- names(fl)[attr(fl, "assign")]
attr(ans, "sc") <- sqrt(x@summary[[2]][dm["n.beta"] + 1, 3])
ans
}
)
setMethod("show", signature = "bcplm",
function(object)
print.bcplm(object)
)
setMethod("summary", signature = "bcplm",
function(object)
object
)
setMethod("plot", signature(x = "bcplm", y = "missing"),
function(x, y, ...) plot(x@sims.list)
)
# print out (summarize) model results
print.bcplm <- function(x, digits = max(3, getOption("digits") - 3)){
dims <- x@dims
# fixed effects
fcoef <- fixef(x, sd = TRUE, quantiles = c(0.025, 0.975))
coefs <- cbind("Estimate" = fcoef, "Std. Error" = attr(fcoef, "sd"),
"Lower (2.5%)" = attr(fcoef, "quantiles")[, 1],
"Upper (97.5%)" = attr(fcoef, "quantiles")[, 2])
# start printing
cat("Compound Poisson linear models via MCMC\n")
cat(dims["n.chains"], " chains, each with ", dims["n.iter"], " iterations (first ",
dims["n.burnin"], " discarded)", sep = "")
if (dims["n.thin"] > 1)
cat(", n.thin =", dims["n.thin"])
cat("\nn.sims =", dims["n.sims"], "iterations saved\n")
cat("\n")
if (!is.null(x@call$formula))
cat("Formula:", deparse(x@call$formula),"\n")
if (!is.null(x@call$data))
cat(" Data:", deparse(x@call$data), "\n")
if (!is.null(x@call$subset))
cat(" Subset:", deparse(x@call$subset),"\n")
if (dims["n.u"] > 0){
cat("\nRandom and dynamic variance components:\n")
varcor <- VarCorr(x)
REmat <- formatVC(varcor)
if (is.na(attr(varcor, "sc")))
REmat <- REmat[-nrow(REmat), , drop = FALSE]
print(REmat, quote = FALSE, digits = digits)
}
cat(sprintf("Number of obs: %d ", x@dims["n.obs"]))
if (dims["n.u"] > 0){
ngrps <- sapply(x@flist, nlevels)
cat(", groups: ")
cat(paste(paste(names(ngrps), ngrps, sep = ", "), collapse = "; "))
}
cat("\n")
if (nrow(coefs) > 0) {
cat("\nFixed effects:\n")
printCoefmat(coefs, zap.ind = 3, digits = digits)
cat("---")
}
s <- x@summary
phi.ps <- grep("^phi$", rownames(s[[1]]))
p.ps <- grep("^p$", rownames(s[[1]]))
cat("\nEstimated dispersion parameter:",
format(s[[2]][phi.ps, 3], digits = max(5, digits + 1)))
cat("\nEstimated index parameter:",
format(s[[2]][p.ps, 3], digits = max(5, digits + 1)),"\n\n")
out <- list(fixef = coefs,
VarCorr = if (dims["n.u"]) REmat else list())
invisible(out)
}
################################################
# methods defined for zcpglm
################################################
setMethod("coef",
signature(object = "zcpglm"),
function (object, ...) object@coefficients
)
setMethod("residuals",
signature(object = "zcpglm"),
function(object, ...) object@residuals
)
setMethod("resid",
signature(object = "zcpglm"),
function(object, ...) residuals(object)
)
# generate fitted values on the original scale
setMethod("fitted",
signature(object = "zcpglm"),
function (object, ...) object@fitted.values
)
setMethod("summary", signature(object = "zcpglm"),
function(object, ...){
nbz <- length(coef(object)$zero)
nbt <- length(coef(object)$tweedie)
se <- sqrt(diag(vcov(object)))
coef <- unlist(coef(object))
zstat <- coef / se
pval <- 2 * pnorm(-abs(zstat))
coef <- cbind(coef, se, zstat, pval)
colnames(coef) <- c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
rownames(coef) <- c(names(coef(object)$zero), names(coef(object)$tweedie))
coef.table <- list()
coef.table$zero <- coef[1:nbz, , drop = FALSE]
coef.table$tweedie <- coef[(nbz + 1):(nbz + nbt), , drop = FALSE]
keep <- match(c("llik", "contrasts", "df.residual",
"na.action", "vcov"), names(object), 0L)
out <- list(llik = object@llik, contrasts = object@contrasts,
df.residual = object@df.residual, vcov = object @vcov,
na.action = object@na.action, coefficients = coef.table,
call = object@call, phi = object@phi, p = object@p)
.print.zcpglm.summary(out)
}
)
.print.zcpglm.summary<-function(x,digits = max(3, getOption("digits") - 3),
signif.stars = getOption("show.signif.stars"), ...){
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
cat(paste("Zero-inflation model coefficients:\n"))
printCoefmat(x$coefficients$zero, digits = digits, signif.stars = signif.stars,
na.print = "NA", signif.legend = FALSE)
cat(paste("\nCompound Poisson model coefficients:\n"))
printCoefmat(x$coefficients$tweedie, digits = digits, signif.stars = signif.stars,
na.print = "NA")
cat("\nEstimated dispersion parameter:",
format(x$phi, digits = max(5, digits + 1)))
cat("\nEstimated index parameter:",
format(x$p, digits = max(5, digits + 1)),"\n")
if (nzchar(mess <- naprint(x$na.action)))
cat(" (", mess, ")\n", sep = "")
invisible(x)
}
setMethod("predict", signature(object = "zcpglm"),
function (object, newdata, type = c("response", "zero", "tweedie"),
na.action = na.pass, ...) {
call <- object$call
ttz <- attr(object@model.frame$zero, "terms")
ttt <- attr(object@model.frame$tweedie, "terms")
Termsz <- delete.response(ttz)
Termst <- delete.response(ttt)
xlevz <- .getXlevels(Termsz, object@model.frame$zero)
xlevt <- .getXlevels(Termst, object@model.frame$tweedie)
mz <- model.frame(Termsz, newdata, na.action = na.action, xlev = xlevz)
mt <- model.frame(Termst, newdata, na.action = na.action, xlev = xlevt)
Xz <- model.matrix(Termsz, mz, contrasts.arg = object$contrasts)
Xt <- model.matrix(Termst, mt, contrasts.arg = object$contrasts)
offt <- offz <- rep(0, nrow(Xz))
if (!is.null(off.num <- attr(ttz, "offset")))
for (i in off.num)
offz <- offz + eval(attr(ttz, "variables")[[i + 1]], newdata)
if (!is.null(off.num <- attr(ttt, "offset")))
for (i in off.num)
offt <- offt + eval(attr(ttt, "variables")[[i + 1]], newdata)
if (!is.null(object$call$offset)) {
off <- eval(object$call$offset, newdata)
offz <- offz + off
offt <- offt + off
}
link.power <- make.link.power(object$link.power)
tw <- tweedie(link.power = link.power)
logit <- binomial()
betaz <- object$coefficients$zero
betat <- object$coefficients$tweedie
muz <- logit$linkinv(Xz %*% betaz + offz)
mut <- tw$linkinv(Xt %*% betat + offt)
mu <- as.numeric((1 - muz) * mut)
type <- match.arg(type)
switch(type, response = mu, zero = muz, tweedie = mut)
}) |
#' @title A Function to estimate a GERGM.
#' @description The main function provided by the package.
#'
#' @param formula A formula object that specifies the relationship between
#' statistics and the observed network. Currently, the user may specify a model
#' using any combination of the following statistics: `out2stars(alpha = 1)`,
#' `in2stars(alpha = 1)`, `ctriads(alpha = 1)`, `mutual(alpha = 1)`,
#' `ttriads(alpha = 1)`, `absdiff(covariate = "MyCov")`,
#' `edgecov(covariate = "MyCov")`, `sender(covariate = "MyCov")`,
#' `reciever(covariate = "MyCov")`, `nodematch(covariate)`,
#' `nodemix(covariate, base = "MyBase")`, `netcov(network)`. To use
#' exponential downweighting for any of the network level terms, simply
#' specify a value for alpha less than 1. The `(alpha = 1)` term may be omitted
#' from the structural terms if no exponential downweighting is required. In
#' this case, the terms may be provided as: `out2star`, `in2star`, `ctriads`,
#' `recip`, `ttriads`. If the network is undirected the user may only specify
#' the following terms: `twostars(alpha = 1)`, `ttriads(alpha = 1)`,
#' `absdiff(covariate = "MyCov")`, `edgecov(covariate = "MyCov")`,
#' `sender(covariate = "MyCov")`, `nodematch(covariate)`,
#' `nodemix(covariate, base = "MyBase")`, `netcov(network)`. An intercept
#' term is included by default, but can be omitted by setting
#' omit_intercept_term = TRUE. If the user specifies
#' `nodemix(covariate, base = NULL)`, then all levels of the covariate
#' will be matched on.
#' @param covariate_data A data frame containing node level covariates the user
#' wished to transform into sender or reciever effects. It must have row names
#' that match every entry in colnames(raw_network), should have descriptive
#' column names. If left NULL, then no sender or reciever effects will be
#' added.
#' @param normalization_type If only a raw_network is provided and
#' omit_intercept_term = TRUE then, the function
#' will automatically check to determine if all edges fall in the [0,1] interval.
#' If edges are determined to fall outside of this interval, then a trasformation
#' onto the interval may be specified. If "division" is selected, then the data
#' will have a value added to them such that the minimum value is atleast zero
#' (if necessary) and then all edge values will be divided by the maximum to
#' ensure that the maximum value is in [0,1]. If "log" is selected, then the data
#' will have a value added to them such that the minimum value is atleast zero
#' (if necessary), then 1 will be added to all edge values before they are logged
#' and then divided by the largest value, again ensuring that the resulting
#' network is on [0,1]. Defaults to "log" and need not be set to NULL if
#' providing covariates as it will be ignored.
#' @param network_is_directed Logical specifying whether or not the observed
#' network is directed. Default is TRUE.
#' @param use_MPLE_only Logical specifying whether or not only the maximum pseudo
#' likelihood estimates should be obtained. In this case, no simulations will be
#' performed. Default is FALSE.
#' @param transformation_type Specifies how covariates are transformed onto the
#' raw network. When working with heavly tailed data that are not strictly
#' positive, select "Cauchy" to transform the data using a Cauchy distribution.
#' If data are strictly positive and heavy tailed (such as financial data) it is
#' suggested the user select "LogCauchy" to perform a Log-Cauchy transformation
#' of the data. For a tranformation of the data using a Gaussian distribution,
#' select "Gaussian" and for strictly positive raw networks, select "LogNormal".
#' The Default value is "Cauchy".
#' @param estimation_method Simulation method for MCMC estimation. Default is
#' "Gibbs" which will generally be faster with well behaved networks but will not
#' allow for exponential downweighting.
#' @param maximum_number_of_lambda_updates Maximum number of iterations of outer
#' MCMC loop which alternately estimates transform parameters and ERGM
#' parameters. In the case that data_transformation = NULL, this argument is
#' ignored. Default is 10.
#' @param maximum_number_of_theta_updates Maximum number of iterations within the
#' MCMC inner loop which estimates the ERGM parameters. Default is 100.
#' @param number_of_networks_to_simulate Number of simulations generated for
#' estimation via MCMC. Default is 500.
#' @param thin The proportion of samples that are kept from each simulation. For
#' example, thin = 1/200 will keep every 200th network in the overall simulated
#' sample. Default is 1.
#' @param proposal_variance The variance specified for the Metropolis Hastings
#' simulation method. This parameter is inversely proportional to the average
#' acceptance rate of the M-H sampler and should be adjusted so that the average
#' acceptance rate is approximately 0.25. Default is 0.1.
#' @param downweight_statistics_together Logical specifying whether or not the
#' weights should be applied inside or outside the sum. Default is TRUE and user
#' should not select FALSE under normal circumstances.
#' @param MCMC_burnin Number of samples from the MCMC simulation procedure that
#' will be discarded before drawing the samples used for estimation.
#' Default is 100.
#' @param seed Seed used for reproducibility. Default is 123.
#' @param convergence_tolerance Threshold designated for stopping criterion. If
#' the difference of parameter estimates from one iteration to the next all have
#' a p -value (under a paired t-test) greater than this value, the parameter
#' estimates are declared to have converged. Default is 0.01.
#' @param MPLE_gain_factor Multiplicative constant between 0 and 1 that controls
#' how far away the initial theta estimates will be from the standard MPLEs via
#' a one step Fisher update. In the case of strongly dependent data, it is
#' suggested to use a value of 0.10. Default is 0.
#' @param acceptable_fit_p_value_threshold A p-value threshold for how closely
#' statistics of observed network conform to statistics of networks simulated
#' from GERGM parameterized by converged final parameter estimates. Default value
#' is 0.05.
#' @param force_x_theta_updates Defaults to 1 where theta estimation is not
#' allowed to converge until thetas have updated for x iterations . Useful when
#' model is not degenerate but simulated statistics do not match observed network
#' well when algorithm stops after first y updates.
#' @param force_x_lambda_updates Defaults to 1 where lambda estimation is not
#' allowed to converge until lambdas have updated for x iterations . Useful when
#' model is not degenerate but simulated statistics do not match observed network
#' well when algorithm stops after first y updates.
#' @param output_directory The directory where you would like output generated
#' by the GERGM estimation proceedure to be saved (if output_name is specified).
#' This includes, GOF, trace, and parameter estimate plots, as well as a summary
#' of the estimation proceedure and an .Rdata file containing the GERGM object
#' returned by this function. May be left as NULL if the user would prefer all
#' plots be printed to the graphics device.
#' @param output_name The common name stem you would like to assign to all
#' objects output by the gergm function. Default value of NULL will not save any
#' output directly to .pdf files, it will be printed to the console instead. Must
#' be a character string or NULL. For example, if "Test" is supplied as the
#' output_name, then 4 files will be output: "Test_GOF.pdf", "Test_Parameter_Estim
#' ates.pdf", "Test_GERGM_Object.Rdata", "Test_Estimation_Log.txt", and
#' "Test_Trace_Plot.pdf"
#' @param generate_plots Defaults to TRUE, if FALSE, then no diagnostic or
#' parameter plots are generated.
#' @param verbose Defaults to TRUE (providing lots of output while model is
#' running). Can be set to FALSE if the user wishes to see less output.
#' @param omit_intercept_term Defualts to FALSE, can be set to TRUE if the
#' user wishes to omit the model intercept term.
#' @param hyperparameter_optimization Logical indicating whether automatic
#' hyperparameter optimization should be used. Defaults to FALSE. If TRUE, then
#' the algorithm will automatically seek to find an optimal burnin and number of
#' networks to simulate, and if using Metropolis Hasings, will attempt to select
#' a proposal variance that leads to a acceptance rate within +-0.05 of
#' target_accept_rate. Furthermore, if degeneracy is detected, the algorithm
#' will attempt to adress the issue automatically. WARNING: This feature is
#' experimental, and may greatly increase runtime. Please monitor console
#' output!
#' @param target_accept_rate The target Metropolis Hastings acceptance rate.
#' Defaults to 0.25
#' @param theta_grid_optimization_list Defaults to NULL. This highly
#' experimental feature may allow the user to address model degeneracy arising
#' from a suboptimal theta initialization. It performs a grid search around the
#' theta values calculated via MPLE to select a potentially improved
#' initialization. The runtime complexity of this feature grows exponentially in
#' the size of the grid and number of parameters -- use with great care. This
#' feature may only be used if hyperparameter_optimization = TRUE, and if a list
#' object of the following form is provided: list(grid_steps = 2,
#' step_size = 0.5, cores = 2, iteration_fraction = 0.5). grid_steps indicates
#' the number of steps out the grid search will perform, step_size indicates the
#' fraction of the MPLE theta estimate that each grid search step will change by,
#' cores indicates the number of cores to be used for parallel optimization, and
#' iteration_fraction indcates the fraction of the nubmer of MCMC iterations
#' that will be used for each grid point (should be set less than 1 to speed up
#' optimization). In general grid_steps should be smaller the more structural
#' parameters the user wishes to specify. For example, with 5 structural
#' paramters (mutual, ttriads, etc.), grid_steps = 3 will result in a (2*3+1)^5
#' = 16807 parameter grid search. Again this feature is highly experimental and
#' should only be used as a last resort (after playing with exponential
#' downweighting and the MPLE_gain_factor).
#' @param ... Optional arguments, currently unsupported.
#' @return A gergm object containing parameter estimates.
#' @examples
#' \dontrun{
#' set.seed(12345)
#' net <- matrix(rnorm(100,0,20),10,10)
#' colnames(net) <- rownames(net) <- letters[1:10]
#' formula <- net ~ mutual + ttriads
#'
#' test <- gergm(formula,
#' normalization_type = "division",
#' network_is_directed = TRUE,
#' use_MPLE_only = FALSE,
#' estimation_method = "Metropolis",
#' number_of_networks_to_simulate = 40000,
#' thin = 1/10,
#' proposal_variance = 0.5,
#' downweight_statistics_together = TRUE,
#' MCMC_burnin = 10000,
#' seed = 456,
#' convergence_tolerance = 0.01,
#' MPLE_gain_factor = 0,
#' force_x_theta_updates = 4)
#' }
#' @export
gergm <- function(formula,
covariate_data = NULL,
normalization_type = c("log","division"),
network_is_directed = c(TRUE, FALSE),
use_MPLE_only = c(FALSE, TRUE),
transformation_type = c("Cauchy","LogCauchy","Gaussian","LogNormal"),
estimation_method = c("Gibbs", "Metropolis"),
maximum_number_of_lambda_updates = 10,
maximum_number_of_theta_updates = 10,
number_of_networks_to_simulate = 500,
thin = 1,
proposal_variance = 0.1,
downweight_statistics_together = TRUE,
MCMC_burnin = 100,
seed = 123,
convergence_tolerance = 0.01,
MPLE_gain_factor = 0,
acceptable_fit_p_value_threshold = 0.05,
force_x_theta_updates = 1,
force_x_lambda_updates = 1,
output_directory = NULL,
output_name = NULL,
generate_plots = TRUE,
verbose = TRUE,
omit_intercept_term = FALSE,
hyperparameter_optimization = FALSE,
target_accept_rate = 0.25,
theta_grid_optimization_list = NULL,
...
){
# pass in experimental features through elipsis
weighted_MPLE <- FALSE
object <- as.list(substitute(list(...)))[-1L]
if (length(object) > 0) {
if (!is.null(object$weighted_MPLE)) {
if (object$weighted_MPLE) {
weighted_MPLE <- TRUE
cat("Using experimental weighted_MPLE...\n")
}
}
}
# hard coded possible stats
possible_structural_terms <- c("out2stars",
"in2stars",
"ctriads",
"mutual",
"ttriads",
"edges")
possible_structural_terms_undirected <- c("edges",
"twostars",
"ttriads")
possible_covariate_terms <- c("absdiff",
"nodecov",
"nodematch",
"sender",
"receiver",
"intercept",
"nodemix")
possible_network_terms <- "netcov"
possible_transformations <- c("cauchy",
"logcauchy",
"gaussian",
"lognormal")
# check terms for undirected network
if (!network_is_directed) {
formula <- parse_undirected_structural_terms(
formula,
possible_structural_terms,
possible_structural_terms_undirected)
}
# automatically add an intercept term unless omit_intercept_term is TRUE
if (!omit_intercept_term) {
formula <- add_intercept_term(formula)
#check for an edges statistic
form <- as.formula(formula)
parsed <- deparse(form)
if (length(parsed) > 1) {
parsed <- paste0(parsed, collapse = " ")
}
if (grepl("edges",parsed)) {
stop("You may not specify an edges statistic if omit_intercept_term == FALSE as this will introduce two identical intercept terms and instability in the model. An intercept term is automatically added in the lambda transformation step unless omit_intercept_term == TRUE, and we have found this method of adding an intercept to be less prone to degeneracy.")
}
}
# set logical values for whether we are using MPLE only, whether the network
# is directed, and which estimation method we are using as well as the
# transformation type
use_MPLE_only <- use_MPLE_only[1] #default is FALSE
network_is_directed <- network_is_directed[1] #default is TRUE
estimation_method <- estimation_method[1] #default is Gibbs
transformation_type <- transformation_type[1] #default is "Cauchy"
transformation_type <- tolower(transformation_type)
normalization_type <- normalization_type[1]
if (is.null(output_directory) & !is.null(output_name)) {
stop("You have specified an output file name but no output directory. Please
specify both or neither.")
}
if (length(which(possible_transformations %in% transformation_type == T)) != 1) {
stop("You have specified a transformation that is not recognized. Please
specify one of: Cauchy, LogCauchy, Gaussian, or LogNormal")
}
#make sure proposal variance is greater than zero
if (proposal_variance <= 0) {
stop("You supplied a proposal variance that was less than or equal to zero.")
}
formula <- as.formula(formula)
#0. Prepare the data
Transformed_Data <- Prepare_Network_and_Covariates(
formula,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
covariate_data = covariate_data,
normalization_type = normalization_type,
is_correlation_network = FALSE,
is_directed = network_is_directed,
beta_correlation_model = FALSE)
data_transformation <- NULL
if (!is.null(Transformed_Data$transformed_covariates)) {
data_transformation <- Transformed_Data$transformed_covariates
}
gpar.names <- c(Transformed_Data$gpar.names, "dispersion")
#1. Create GERGM object from network
GERGM_Object <- Create_GERGM_Object_From_Formula(
formula,
theta.coef = NULL,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
raw_network = Transformed_Data$network,
together = 1,
transform.data = data_transformation,
lambda.coef = NULL,
transformation_type = transformation_type,
is_correlation_network = FALSE,
is_directed = network_is_directed,
beta_correlation_model = FALSE)
GERGM_Object@theta_estimation_converged <- FALSE
GERGM_Object@lambda_estimation_converged <- FALSE
GERGM_Object@observed_network <- GERGM_Object@network
GERGM_Object@observed_bounded_network <- GERGM_Object@bounded.network
GERGM_Object@simulation_only <- FALSE
GERGM_Object@transformation_type <- transformation_type
GERGM_Object@downweight_statistics_together <- downweight_statistics_together
GERGM_Object@directed_network <- network_is_directed
# only add in list if not NULL
GERGM_Object@using_grid_optimization <- FALSE
if (class(theta_grid_optimization_list) == "list") {
GERGM_Object@using_grid_optimization <- TRUE
GERGM_Object@theta_grid_optimization_list <- theta_grid_optimization_list
}
if (!is.null(data_transformation)) {
GERGM_Object@data_transformation <- data_transformation
}
if (is.null(output_name)) {
GERGM_Object@print_output <- FALSE
}else{
GERGM_Object@print_output <- TRUE
}
# if we are using a correlation network then set field to TRUE.
GERGM_Object@is_correlation_network <- FALSE
GERGM_Object@beta_correlation_model <- FALSE
GERGM_Object@weighted_MPLE <- weighted_MPLE
# set adaptive metropolis parameters
GERGM_Object@hyperparameter_optimization <- hyperparameter_optimization
GERGM_Object@target_accept_rate <- target_accept_rate
GERGM_Object@proposal_variance <- proposal_variance
GERGM_Object@estimation_method <- estimation_method
GERGM_Object@number_of_simulations <- number_of_networks_to_simulate
GERGM_Object@thin <- thin
GERGM_Object@burnin <- MCMC_burnin
GERGM_Object@MPLE_gain_factor <- MPLE_gain_factor
#2. Estimate GERGM
GERGM_Object <- Estimate_GERGM(formula,
MPLE.only = use_MPLE_only,
max.num.iterations = maximum_number_of_lambda_updates,
mc.num.iterations = maximum_number_of_theta_updates,
seed = seed,
tolerance = convergence_tolerance,
possible.stats = possible_structural_terms,
GERGM_Object = GERGM_Object,
force_x_theta_updates = force_x_theta_updates,
verbose = verbose,
force_x_lambda_updates = force_x_lambda_updates)
#3. Perform degeneracy diagnostics and create GOF plots
if (!GERGM_Object@theta_estimation_converged) {
warning("Estimation procedure did not detect convergence in Theta estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
GERGM_Object <- store_console_output(GERGM_Object,"Estimation procedure did not detect convergence in Theta estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
}
if (!GERGM_Object@lambda_estimation_converged) {
warning("Estimation procedure did not detect convergence in Lambda estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
GERGM_Object <- store_console_output(GERGM_Object,"Estimation procedure did not detect convergence in Lambda estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
}
#now simulate from last update of theta parameters
GERGM_Object <- Simulate_GERGM(GERGM_Object,
seed1 = seed,
possible.stats = possible_structural_terms)
colnames(GERGM_Object@lambda.coef) = gpar.names
num.nodes <- GERGM_Object@num_nodes
triples <- t(combn(1:num.nodes, 3))
# change back column names if we are dealing with an undirected network
if (!network_is_directed) {
change <- which(colnames(GERGM_Object@theta.coef) == "in2stars")
if (length(change) > 0) {
colnames(GERGM_Object@theta.coef)[change] <- "twostars"
}
}
init.statistics <- NULL
if (GERGM_Object@is_correlation_network) {
init.statistics <- h2(GERGM_Object@network,
triples = triples,
statistics = rep(1, length(possible_structural_terms)),
alphas = GERGM_Object@weights,
together = downweight_statistics_together)
}else{
init.statistics <- h2(GERGM_Object@bounded.network,
triples = triples,
statistics = rep(1, length(possible_structural_terms)),
alphas = GERGM_Object@weights,
together = downweight_statistics_together)
}
# fix issue with the wrong stats being saved
GERGM_Object@stats[2,] <- init.statistics
hsn.tot <- GERGM_Object@MCMC_output$Statistics
# save these statistics so we can make GOF plots in the future, otherwise
# they would be the transformed statistics which would produce poor GOF plots.
GERGM_Object@simulated_statistics_for_GOF <- hsn.tot
#thin statsitics
hsn.tot <- Thin_Statistic_Samples(hsn.tot)
#calculate t.test p-values for calculating the difference in the means of
# the newly simulated data with the original network
statistic_test_p_values <- rep(NA,length(possible_structural_terms))
for (i in 1:length(possible_structural_terms)) {
statistic_test_p_values[i] <- round(t.test(hsn.tot[, i],
mu = init.statistics[i])$p.value,3)
}
stats.data <- data.frame(Observed = init.statistics,
Simulated = colMeans(hsn.tot))
rownames(stats.data) <- possible_structural_terms
cat("Statistics of observed network and networks simulated from final theta parameter estimates:\n")
GERGM_Object <- store_console_output(GERGM_Object,"Statistics of observed network and networks simulated from final theta parameter estimates:\n")
GERGM_Object <- store_console_output(GERGM_Object, toString(stats.data))
statistic_test_p_values <- data.frame(p_values = statistic_test_p_values)
rownames(statistic_test_p_values) <- possible_structural_terms
cat("\nt-test p-values for statistics of observed network and networks simulated from final theta parameter estimates:\n \n")
GERGM_Object <- store_console_output(GERGM_Object,"\nt-test p-values for statistics of observed network and networks simulated from final theta parameter estimates:\n \n")
print(statistic_test_p_values)
GERGM_Object <- store_console_output(GERGM_Object, toString(statistic_test_p_values))
colnames(statistic_test_p_values) <- "p_values"
GERGM_Object@observed_simulated_t_test <- statistic_test_p_values
#test to see if we have an acceptable fit
acceptable_fit <- statistic_test_p_values[which(GERGM_Object@stats_to_use == 1), 1]
if (min(acceptable_fit) > acceptable_fit_p_value_threshold) {
GERGM_Object@acceptable_fit <- TRUE
message("Parameter estimates simulate networks that are statistically indistinguishable from observed network on the statistics specified by the user. ")
GERGM_Object <- store_console_output(GERGM_Object,"Parameter estimates simulate networks that are statistically indistinguishable from observed network on the statistics specified by the user. ")
}else{
GERGM_Object@acceptable_fit <- FALSE
message("Parameter estimates simulate networks that are statistically distinguishable from observed network. Check GOF plots to determine if the model provides a reasonable fit . This is a very stringent test for goodness of fit, so results may still be acceptable even if this criterion is not met.")
GERGM_Object <- store_console_output(GERGM_Object, "Parameter estimates simulate networks that are statistically distinguishable from observed network. Check GOF plots to determine if the model provides a reasonable fit . This is a very stringent test for goodness of fit, so results may still be acceptable even if this criterion is not met.")
}
#4. output everything to the appropriate files and return GERGM object.
if (generate_plots) {
# only generate output if output_name is not NULL
if (!is.null(output_name)) {
if (is.null(output_directory)) {
output_directory <- getwd()
}
current_directory <- getwd()
setwd(output_directory)
try({
pdf(file = paste(output_name,"_GOF.pdf",sep = ""), height = 4, width = 8)
GOF(GERGM_Object)
dev.off()
pdf(file = paste(output_name,"_Parameter_Estimates.pdf",sep = ""), height = 4, width = 5)
Estimate_Plot(GERGM_Object)
dev.off()
pdf(file = paste(output_name,"_Trace_Plot.pdf",sep = ""), height = 4, width = 6)
Trace_Plot(GERGM_Object)
dev.off()
save(GERGM_Object, file = paste(output_name,"_GERGM_Object.Rdata",sep = ""))
write.table(GERGM_Object@console_output,file = paste(output_name,"_Estimation_Log.txt",sep = ""),row.names = F,col.names = F,fileEncoding = "utf8", quote = F)
})
setwd(current_directory)
} else{
# if we are not saving everything to a directory then just print stuff to
# the graphics device
try({
GOF(GERGM_Object)
Sys.sleep(2)
Estimate_Plot(GERGM_Object)
Sys.sleep(2)
Trace_Plot(GERGM_Object)
})
}
}
# transform networks back to observed scale
cat("Transforming networks simulated via MCMC as part of the fit diagnostics back on to the scale of observed network. You can access these networks through the '@MCMC_output$Networks' field returned by this function...\n")
GERGM_Object <- Convert_Simulated_Networks_To_Observed_Scale(GERGM_Object)
return(GERGM_Object)
}
| /R/gergm.R | no_license | fototo/GERGM | R | false | false | 27,080 | r | #' @title A Function to estimate a GERGM.
#' @description The main function provided by the package.
#'
#' @param formula A formula object that specifies the relationship between
#' statistics and the observed network. Currently, the user may specify a model
#' using any combination of the following statistics: `out2stars(alpha = 1)`,
#' `in2stars(alpha = 1)`, `ctriads(alpha = 1)`, `mutual(alpha = 1)`,
#' `ttriads(alpha = 1)`, `absdiff(covariate = "MyCov")`,
#' `edgecov(covariate = "MyCov")`, `sender(covariate = "MyCov")`,
#' `reciever(covariate = "MyCov")`, `nodematch(covariate)`,
#' `nodemix(covariate, base = "MyBase")`, `netcov(network)`. To use
#' exponential downweighting for any of the network level terms, simply
#' specify a value for alpha less than 1. The `(alpha = 1)` term may be omitted
#' from the structural terms if no exponential downweighting is required. In
#' this case, the terms may be provided as: `out2star`, `in2star`, `ctriads`,
#' `recip`, `ttriads`. If the network is undirected the user may only specify
#' the following terms: `twostars(alpha = 1)`, `ttriads(alpha = 1)`,
#' `absdiff(covariate = "MyCov")`, `edgecov(covariate = "MyCov")`,
#' `sender(covariate = "MyCov")`, `nodematch(covariate)`,
#' `nodemix(covariate, base = "MyBase")`, `netcov(network)`. An intercept
#' term is included by default, but can be omitted by setting
#' omit_intercept_term = TRUE. If the user specifies
#' `nodemix(covariate, base = NULL)`, then all levels of the covariate
#' will be matched on.
#' @param covariate_data A data frame containing node level covariates the user
#' wished to transform into sender or reciever effects. It must have row names
#' that match every entry in colnames(raw_network), should have descriptive
#' column names. If left NULL, then no sender or reciever effects will be
#' added.
#' @param normalization_type If only a raw_network is provided and
#' omit_intercept_term = TRUE then, the function
#' will automatically check to determine if all edges fall in the [0,1] interval.
#' If edges are determined to fall outside of this interval, then a trasformation
#' onto the interval may be specified. If "division" is selected, then the data
#' will have a value added to them such that the minimum value is atleast zero
#' (if necessary) and then all edge values will be divided by the maximum to
#' ensure that the maximum value is in [0,1]. If "log" is selected, then the data
#' will have a value added to them such that the minimum value is atleast zero
#' (if necessary), then 1 will be added to all edge values before they are logged
#' and then divided by the largest value, again ensuring that the resulting
#' network is on [0,1]. Defaults to "log" and need not be set to NULL if
#' providing covariates as it will be ignored.
#' @param network_is_directed Logical specifying whether or not the observed
#' network is directed. Default is TRUE.
#' @param use_MPLE_only Logical specifying whether or not only the maximum pseudo
#' likelihood estimates should be obtained. In this case, no simulations will be
#' performed. Default is FALSE.
#' @param transformation_type Specifies how covariates are transformed onto the
#' raw network. When working with heavly tailed data that are not strictly
#' positive, select "Cauchy" to transform the data using a Cauchy distribution.
#' If data are strictly positive and heavy tailed (such as financial data) it is
#' suggested the user select "LogCauchy" to perform a Log-Cauchy transformation
#' of the data. For a tranformation of the data using a Gaussian distribution,
#' select "Gaussian" and for strictly positive raw networks, select "LogNormal".
#' The Default value is "Cauchy".
#' @param estimation_method Simulation method for MCMC estimation. Default is
#' "Gibbs" which will generally be faster with well behaved networks but will not
#' allow for exponential downweighting.
#' @param maximum_number_of_lambda_updates Maximum number of iterations of outer
#' MCMC loop which alternately estimates transform parameters and ERGM
#' parameters. In the case that data_transformation = NULL, this argument is
#' ignored. Default is 10.
#' @param maximum_number_of_theta_updates Maximum number of iterations within the
#' MCMC inner loop which estimates the ERGM parameters. Default is 100.
#' @param number_of_networks_to_simulate Number of simulations generated for
#' estimation via MCMC. Default is 500.
#' @param thin The proportion of samples that are kept from each simulation. For
#' example, thin = 1/200 will keep every 200th network in the overall simulated
#' sample. Default is 1.
#' @param proposal_variance The variance specified for the Metropolis Hastings
#' simulation method. This parameter is inversely proportional to the average
#' acceptance rate of the M-H sampler and should be adjusted so that the average
#' acceptance rate is approximately 0.25. Default is 0.1.
#' @param downweight_statistics_together Logical specifying whether or not the
#' weights should be applied inside or outside the sum. Default is TRUE and user
#' should not select FALSE under normal circumstances.
#' @param MCMC_burnin Number of samples from the MCMC simulation procedure that
#' will be discarded before drawing the samples used for estimation.
#' Default is 100.
#' @param seed Seed used for reproducibility. Default is 123.
#' @param convergence_tolerance Threshold designated for stopping criterion. If
#' the difference of parameter estimates from one iteration to the next all have
#' a p -value (under a paired t-test) greater than this value, the parameter
#' estimates are declared to have converged. Default is 0.01.
#' @param MPLE_gain_factor Multiplicative constant between 0 and 1 that controls
#' how far away the initial theta estimates will be from the standard MPLEs via
#' a one step Fisher update. In the case of strongly dependent data, it is
#' suggested to use a value of 0.10. Default is 0.
#' @param acceptable_fit_p_value_threshold A p-value threshold for how closely
#' statistics of observed network conform to statistics of networks simulated
#' from GERGM parameterized by converged final parameter estimates. Default value
#' is 0.05.
#' @param force_x_theta_updates Defaults to 1 where theta estimation is not
#' allowed to converge until thetas have updated for x iterations . Useful when
#' model is not degenerate but simulated statistics do not match observed network
#' well when algorithm stops after first y updates.
#' @param force_x_lambda_updates Defaults to 1 where lambda estimation is not
#' allowed to converge until lambdas have updated for x iterations . Useful when
#' model is not degenerate but simulated statistics do not match observed network
#' well when algorithm stops after first y updates.
#' @param output_directory The directory where you would like output generated
#' by the GERGM estimation proceedure to be saved (if output_name is specified).
#' This includes, GOF, trace, and parameter estimate plots, as well as a summary
#' of the estimation proceedure and an .Rdata file containing the GERGM object
#' returned by this function. May be left as NULL if the user would prefer all
#' plots be printed to the graphics device.
#' @param output_name The common name stem you would like to assign to all
#' objects output by the gergm function. Default value of NULL will not save any
#' output directly to .pdf files, it will be printed to the console instead. Must
#' be a character string or NULL. For example, if "Test" is supplied as the
#' output_name, then 4 files will be output: "Test_GOF.pdf", "Test_Parameter_Estim
#' ates.pdf", "Test_GERGM_Object.Rdata", "Test_Estimation_Log.txt", and
#' "Test_Trace_Plot.pdf"
#' @param generate_plots Defaults to TRUE, if FALSE, then no diagnostic or
#' parameter plots are generated.
#' @param verbose Defaults to TRUE (providing lots of output while model is
#' running). Can be set to FALSE if the user wishes to see less output.
#' @param omit_intercept_term Defualts to FALSE, can be set to TRUE if the
#' user wishes to omit the model intercept term.
#' @param hyperparameter_optimization Logical indicating whether automatic
#' hyperparameter optimization should be used. Defaults to FALSE. If TRUE, then
#' the algorithm will automatically seek to find an optimal burnin and number of
#' networks to simulate, and if using Metropolis Hasings, will attempt to select
#' a proposal variance that leads to a acceptance rate within +-0.05 of
#' target_accept_rate. Furthermore, if degeneracy is detected, the algorithm
#' will attempt to adress the issue automatically. WARNING: This feature is
#' experimental, and may greatly increase runtime. Please monitor console
#' output!
#' @param target_accept_rate The target Metropolis Hastings acceptance rate.
#' Defaults to 0.25
#' @param theta_grid_optimization_list Defaults to NULL. This highly
#' experimental feature may allow the user to address model degeneracy arising
#' from a suboptimal theta initialization. It performs a grid search around the
#' theta values calculated via MPLE to select a potentially improved
#' initialization. The runtime complexity of this feature grows exponentially in
#' the size of the grid and number of parameters -- use with great care. This
#' feature may only be used if hyperparameter_optimization = TRUE, and if a list
#' object of the following form is provided: list(grid_steps = 2,
#' step_size = 0.5, cores = 2, iteration_fraction = 0.5). grid_steps indicates
#' the number of steps out the grid search will perform, step_size indicates the
#' fraction of the MPLE theta estimate that each grid search step will change by,
#' cores indicates the number of cores to be used for parallel optimization, and
#' iteration_fraction indcates the fraction of the nubmer of MCMC iterations
#' that will be used for each grid point (should be set less than 1 to speed up
#' optimization). In general grid_steps should be smaller the more structural
#' parameters the user wishes to specify. For example, with 5 structural
#' paramters (mutual, ttriads, etc.), grid_steps = 3 will result in a (2*3+1)^5
#' = 16807 parameter grid search. Again this feature is highly experimental and
#' should only be used as a last resort (after playing with exponential
#' downweighting and the MPLE_gain_factor).
#' @param ... Optional arguments, currently unsupported.
#' @return A gergm object containing parameter estimates.
#' @examples
#' \dontrun{
#' set.seed(12345)
#' net <- matrix(rnorm(100,0,20),10,10)
#' colnames(net) <- rownames(net) <- letters[1:10]
#' formula <- net ~ mutual + ttriads
#'
#' test <- gergm(formula,
#' normalization_type = "division",
#' network_is_directed = TRUE,
#' use_MPLE_only = FALSE,
#' estimation_method = "Metropolis",
#' number_of_networks_to_simulate = 40000,
#' thin = 1/10,
#' proposal_variance = 0.5,
#' downweight_statistics_together = TRUE,
#' MCMC_burnin = 10000,
#' seed = 456,
#' convergence_tolerance = 0.01,
#' MPLE_gain_factor = 0,
#' force_x_theta_updates = 4)
#' }
#' @export
gergm <- function(formula,
covariate_data = NULL,
normalization_type = c("log","division"),
network_is_directed = c(TRUE, FALSE),
use_MPLE_only = c(FALSE, TRUE),
transformation_type = c("Cauchy","LogCauchy","Gaussian","LogNormal"),
estimation_method = c("Gibbs", "Metropolis"),
maximum_number_of_lambda_updates = 10,
maximum_number_of_theta_updates = 10,
number_of_networks_to_simulate = 500,
thin = 1,
proposal_variance = 0.1,
downweight_statistics_together = TRUE,
MCMC_burnin = 100,
seed = 123,
convergence_tolerance = 0.01,
MPLE_gain_factor = 0,
acceptable_fit_p_value_threshold = 0.05,
force_x_theta_updates = 1,
force_x_lambda_updates = 1,
output_directory = NULL,
output_name = NULL,
generate_plots = TRUE,
verbose = TRUE,
omit_intercept_term = FALSE,
hyperparameter_optimization = FALSE,
target_accept_rate = 0.25,
theta_grid_optimization_list = NULL,
...
){
# pass in experimental features through elipsis
weighted_MPLE <- FALSE
object <- as.list(substitute(list(...)))[-1L]
if (length(object) > 0) {
if (!is.null(object$weighted_MPLE)) {
if (object$weighted_MPLE) {
weighted_MPLE <- TRUE
cat("Using experimental weighted_MPLE...\n")
}
}
}
# hard coded possible stats
possible_structural_terms <- c("out2stars",
"in2stars",
"ctriads",
"mutual",
"ttriads",
"edges")
possible_structural_terms_undirected <- c("edges",
"twostars",
"ttriads")
possible_covariate_terms <- c("absdiff",
"nodecov",
"nodematch",
"sender",
"receiver",
"intercept",
"nodemix")
possible_network_terms <- "netcov"
possible_transformations <- c("cauchy",
"logcauchy",
"gaussian",
"lognormal")
# check terms for undirected network
if (!network_is_directed) {
formula <- parse_undirected_structural_terms(
formula,
possible_structural_terms,
possible_structural_terms_undirected)
}
# automatically add an intercept term unless omit_intercept_term is TRUE
if (!omit_intercept_term) {
formula <- add_intercept_term(formula)
#check for an edges statistic
form <- as.formula(formula)
parsed <- deparse(form)
if (length(parsed) > 1) {
parsed <- paste0(parsed, collapse = " ")
}
if (grepl("edges",parsed)) {
stop("You may not specify an edges statistic if omit_intercept_term == FALSE as this will introduce two identical intercept terms and instability in the model. An intercept term is automatically added in the lambda transformation step unless omit_intercept_term == TRUE, and we have found this method of adding an intercept to be less prone to degeneracy.")
}
}
# set logical values for whether we are using MPLE only, whether the network
# is directed, and which estimation method we are using as well as the
# transformation type
use_MPLE_only <- use_MPLE_only[1] #default is FALSE
network_is_directed <- network_is_directed[1] #default is TRUE
estimation_method <- estimation_method[1] #default is Gibbs
transformation_type <- transformation_type[1] #default is "Cauchy"
transformation_type <- tolower(transformation_type)
normalization_type <- normalization_type[1]
if (is.null(output_directory) & !is.null(output_name)) {
stop("You have specified an output file name but no output directory. Please
specify both or neither.")
}
if (length(which(possible_transformations %in% transformation_type == T)) != 1) {
stop("You have specified a transformation that is not recognized. Please
specify one of: Cauchy, LogCauchy, Gaussian, or LogNormal")
}
#make sure proposal variance is greater than zero
if (proposal_variance <= 0) {
stop("You supplied a proposal variance that was less than or equal to zero.")
}
formula <- as.formula(formula)
#0. Prepare the data
Transformed_Data <- Prepare_Network_and_Covariates(
formula,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
covariate_data = covariate_data,
normalization_type = normalization_type,
is_correlation_network = FALSE,
is_directed = network_is_directed,
beta_correlation_model = FALSE)
data_transformation <- NULL
if (!is.null(Transformed_Data$transformed_covariates)) {
data_transformation <- Transformed_Data$transformed_covariates
}
gpar.names <- c(Transformed_Data$gpar.names, "dispersion")
#1. Create GERGM object from network
GERGM_Object <- Create_GERGM_Object_From_Formula(
formula,
theta.coef = NULL,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
raw_network = Transformed_Data$network,
together = 1,
transform.data = data_transformation,
lambda.coef = NULL,
transformation_type = transformation_type,
is_correlation_network = FALSE,
is_directed = network_is_directed,
beta_correlation_model = FALSE)
GERGM_Object@theta_estimation_converged <- FALSE
GERGM_Object@lambda_estimation_converged <- FALSE
GERGM_Object@observed_network <- GERGM_Object@network
GERGM_Object@observed_bounded_network <- GERGM_Object@bounded.network
GERGM_Object@simulation_only <- FALSE
GERGM_Object@transformation_type <- transformation_type
GERGM_Object@downweight_statistics_together <- downweight_statistics_together
GERGM_Object@directed_network <- network_is_directed
# only add in list if not NULL
GERGM_Object@using_grid_optimization <- FALSE
if (class(theta_grid_optimization_list) == "list") {
GERGM_Object@using_grid_optimization <- TRUE
GERGM_Object@theta_grid_optimization_list <- theta_grid_optimization_list
}
if (!is.null(data_transformation)) {
GERGM_Object@data_transformation <- data_transformation
}
if (is.null(output_name)) {
GERGM_Object@print_output <- FALSE
}else{
GERGM_Object@print_output <- TRUE
}
# if we are using a correlation network then set field to TRUE.
GERGM_Object@is_correlation_network <- FALSE
GERGM_Object@beta_correlation_model <- FALSE
GERGM_Object@weighted_MPLE <- weighted_MPLE
# set adaptive metropolis parameters
GERGM_Object@hyperparameter_optimization <- hyperparameter_optimization
GERGM_Object@target_accept_rate <- target_accept_rate
GERGM_Object@proposal_variance <- proposal_variance
GERGM_Object@estimation_method <- estimation_method
GERGM_Object@number_of_simulations <- number_of_networks_to_simulate
GERGM_Object@thin <- thin
GERGM_Object@burnin <- MCMC_burnin
GERGM_Object@MPLE_gain_factor <- MPLE_gain_factor
#2. Estimate GERGM
GERGM_Object <- Estimate_GERGM(formula,
MPLE.only = use_MPLE_only,
max.num.iterations = maximum_number_of_lambda_updates,
mc.num.iterations = maximum_number_of_theta_updates,
seed = seed,
tolerance = convergence_tolerance,
possible.stats = possible_structural_terms,
GERGM_Object = GERGM_Object,
force_x_theta_updates = force_x_theta_updates,
verbose = verbose,
force_x_lambda_updates = force_x_lambda_updates)
#3. Perform degeneracy diagnostics and create GOF plots
if (!GERGM_Object@theta_estimation_converged) {
warning("Estimation procedure did not detect convergence in Theta estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
GERGM_Object <- store_console_output(GERGM_Object,"Estimation procedure did not detect convergence in Theta estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
}
if (!GERGM_Object@lambda_estimation_converged) {
warning("Estimation procedure did not detect convergence in Lambda estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
GERGM_Object <- store_console_output(GERGM_Object,"Estimation procedure did not detect convergence in Lambda estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
}
#now simulate from last update of theta parameters
GERGM_Object <- Simulate_GERGM(GERGM_Object,
seed1 = seed,
possible.stats = possible_structural_terms)
colnames(GERGM_Object@lambda.coef) = gpar.names
num.nodes <- GERGM_Object@num_nodes
triples <- t(combn(1:num.nodes, 3))
# change back column names if we are dealing with an undirected network
if (!network_is_directed) {
change <- which(colnames(GERGM_Object@theta.coef) == "in2stars")
if (length(change) > 0) {
colnames(GERGM_Object@theta.coef)[change] <- "twostars"
}
}
init.statistics <- NULL
if (GERGM_Object@is_correlation_network) {
init.statistics <- h2(GERGM_Object@network,
triples = triples,
statistics = rep(1, length(possible_structural_terms)),
alphas = GERGM_Object@weights,
together = downweight_statistics_together)
}else{
init.statistics <- h2(GERGM_Object@bounded.network,
triples = triples,
statistics = rep(1, length(possible_structural_terms)),
alphas = GERGM_Object@weights,
together = downweight_statistics_together)
}
# fix issue with the wrong stats being saved
GERGM_Object@stats[2,] <- init.statistics
hsn.tot <- GERGM_Object@MCMC_output$Statistics
# save these statistics so we can make GOF plots in the future, otherwise
# they would be the transformed statistics which would produce poor GOF plots.
GERGM_Object@simulated_statistics_for_GOF <- hsn.tot
#thin statsitics
hsn.tot <- Thin_Statistic_Samples(hsn.tot)
#calculate t.test p-values for calculating the difference in the means of
# the newly simulated data with the original network
statistic_test_p_values <- rep(NA,length(possible_structural_terms))
for (i in 1:length(possible_structural_terms)) {
statistic_test_p_values[i] <- round(t.test(hsn.tot[, i],
mu = init.statistics[i])$p.value,3)
}
stats.data <- data.frame(Observed = init.statistics,
Simulated = colMeans(hsn.tot))
rownames(stats.data) <- possible_structural_terms
cat("Statistics of observed network and networks simulated from final theta parameter estimates:\n")
GERGM_Object <- store_console_output(GERGM_Object,"Statistics of observed network and networks simulated from final theta parameter estimates:\n")
GERGM_Object <- store_console_output(GERGM_Object, toString(stats.data))
statistic_test_p_values <- data.frame(p_values = statistic_test_p_values)
rownames(statistic_test_p_values) <- possible_structural_terms
cat("\nt-test p-values for statistics of observed network and networks simulated from final theta parameter estimates:\n \n")
GERGM_Object <- store_console_output(GERGM_Object,"\nt-test p-values for statistics of observed network and networks simulated from final theta parameter estimates:\n \n")
print(statistic_test_p_values)
GERGM_Object <- store_console_output(GERGM_Object, toString(statistic_test_p_values))
colnames(statistic_test_p_values) <- "p_values"
GERGM_Object@observed_simulated_t_test <- statistic_test_p_values
#test to see if we have an acceptable fit
acceptable_fit <- statistic_test_p_values[which(GERGM_Object@stats_to_use == 1), 1]
if (min(acceptable_fit) > acceptable_fit_p_value_threshold) {
GERGM_Object@acceptable_fit <- TRUE
message("Parameter estimates simulate networks that are statistically indistinguishable from observed network on the statistics specified by the user. ")
GERGM_Object <- store_console_output(GERGM_Object,"Parameter estimates simulate networks that are statistically indistinguishable from observed network on the statistics specified by the user. ")
}else{
GERGM_Object@acceptable_fit <- FALSE
message("Parameter estimates simulate networks that are statistically distinguishable from observed network. Check GOF plots to determine if the model provides a reasonable fit . This is a very stringent test for goodness of fit, so results may still be acceptable even if this criterion is not met.")
GERGM_Object <- store_console_output(GERGM_Object, "Parameter estimates simulate networks that are statistically distinguishable from observed network. Check GOF plots to determine if the model provides a reasonable fit . This is a very stringent test for goodness of fit, so results may still be acceptable even if this criterion is not met.")
}
#4. output everything to the appropriate files and return GERGM object.
if (generate_plots) {
# only generate output if output_name is not NULL
if (!is.null(output_name)) {
if (is.null(output_directory)) {
output_directory <- getwd()
}
current_directory <- getwd()
setwd(output_directory)
try({
pdf(file = paste(output_name,"_GOF.pdf",sep = ""), height = 4, width = 8)
GOF(GERGM_Object)
dev.off()
pdf(file = paste(output_name,"_Parameter_Estimates.pdf",sep = ""), height = 4, width = 5)
Estimate_Plot(GERGM_Object)
dev.off()
pdf(file = paste(output_name,"_Trace_Plot.pdf",sep = ""), height = 4, width = 6)
Trace_Plot(GERGM_Object)
dev.off()
save(GERGM_Object, file = paste(output_name,"_GERGM_Object.Rdata",sep = ""))
write.table(GERGM_Object@console_output,file = paste(output_name,"_Estimation_Log.txt",sep = ""),row.names = F,col.names = F,fileEncoding = "utf8", quote = F)
})
setwd(current_directory)
} else{
# if we are not saving everything to a directory then just print stuff to
# the graphics device
try({
GOF(GERGM_Object)
Sys.sleep(2)
Estimate_Plot(GERGM_Object)
Sys.sleep(2)
Trace_Plot(GERGM_Object)
})
}
}
# transform networks back to observed scale
cat("Transforming networks simulated via MCMC as part of the fit diagnostics back on to the scale of observed network. You can access these networks through the '@MCMC_output$Networks' field returned by this function...\n")
GERGM_Object <- Convert_Simulated_Networks_To_Observed_Scale(GERGM_Object)
return(GERGM_Object)
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "analcatdata_cyyoung8092")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Cy_Young")
lrn = makeLearner("classif.svm", par.vals = list(), predict.type = "prob")
#:# hash
#:# 4df92626769ebc63fd1edc93a3d3c643
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_analcatdata_cyyoung8092/classification_Cy_Young/4df92626769ebc63fd1edc93a3d3c643/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 697 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "analcatdata_cyyoung8092")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Cy_Young")
lrn = makeLearner("classif.svm", par.vals = list(), predict.type = "prob")
#:# hash
#:# 4df92626769ebc63fd1edc93a3d3c643
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
# Arguments: pheno: numeric vector with no NAs containing the phenotype.
# probs: 3D numeric array of haplotype probabilities.
# K: numeric matrix.
#fast.qtlrel.hap = function(pheno, probs, K, addcovar, snps) {
#single kinship A
args=(commandArgs(TRUE))
eff<-as.numeric(args[1])
samp.size<-as.numeric(args[2])
iseed<-as.numeric(args[3])
#eff<-0.2
#samp.size<-878
setwd("/home/kingeg/Projects/BeavisProj/")
source("Functions/hacked_DOQTL.R")
library(regress)
library(DSPRqtl)
data(positionlist_wgenetic)
load(file="Data/DSPR/pA_bigarray.rda")
load("Data/DSPR/Kin_LOCO_A.rda")
#all.equal(rownames(Phys.Out),dimnames(big.array)[[1]])
#mapall<-function(eff,samp.size)
#{
fname<-paste("Data/DSPR/SimPhenos/pheno_biallelic_",100*eff,".rda",sep="")
load(file=fname)
pheno$CHROM<-as.character(pheno$CHROM)
pheno<-pheno[1:1000,]
lod.matrix<-vector('list',nrow(pheno))
for (j in 1:nrow(pheno))
{
#sample
rilset<-sample(colnames(pheno[,4:ncol(pheno)]),samp.size)
phenotype<-as.numeric(pheno[j,rilset])
KK<-kinall[[pheno$CHROM[j]]][rilset,rilset]
pp<-pheno$POS[j]
ind<-which(poslist$chr==pheno$CHROM[j] & poslist$Ppos==(round(pp/1e4,0)*1e4))
st<-ind-500
if(st<0){st<-0}
end<-ind+500
if(end>nrow(poslist)){end<-nrow(poslist)}
small.array<-big.array[rilset,,st:end]
pos.set<-poslist[st:end,c('Ppos','chr','Ppos','Gpos')]
names(pos.set)<-c('SNP_ID','CHROM','Mb','cM')
pos.set$Mb<-pos.set$Mb/1e6
lod.set<-fast.qtlrel.hap(pheno=phenotype, probs=small.array, K=KK,snps=pos.set)
lod.matrix[[j]]<-lod.set[['lod']][,c('perc.var','lod','neg.log10.p')]
cat(j, Sys.time(),"\n")
}
oname<-paste("Data/DSPR/Obs_LODs/LODseed_biallelic_KLOCO_E",100*eff,"_S",samp.size,".rda",sep="")
save(lod.matrix,file=oname)
#}
| /Scripts/mapping_DSPR.R | permissive | egking/QTLbiasSIM | R | false | false | 1,834 | r | # Arguments: pheno: numeric vector with no NAs containing the phenotype.
# probs: 3D numeric array of haplotype probabilities.
# K: numeric matrix.
#fast.qtlrel.hap = function(pheno, probs, K, addcovar, snps) {
#single kinship A
args=(commandArgs(TRUE))
eff<-as.numeric(args[1])
samp.size<-as.numeric(args[2])
iseed<-as.numeric(args[3])
#eff<-0.2
#samp.size<-878
setwd("/home/kingeg/Projects/BeavisProj/")
source("Functions/hacked_DOQTL.R")
library(regress)
library(DSPRqtl)
data(positionlist_wgenetic)
load(file="Data/DSPR/pA_bigarray.rda")
load("Data/DSPR/Kin_LOCO_A.rda")
#all.equal(rownames(Phys.Out),dimnames(big.array)[[1]])
#mapall<-function(eff,samp.size)
#{
fname<-paste("Data/DSPR/SimPhenos/pheno_biallelic_",100*eff,".rda",sep="")
load(file=fname)
pheno$CHROM<-as.character(pheno$CHROM)
pheno<-pheno[1:1000,]
lod.matrix<-vector('list',nrow(pheno))
for (j in 1:nrow(pheno))
{
#sample
rilset<-sample(colnames(pheno[,4:ncol(pheno)]),samp.size)
phenotype<-as.numeric(pheno[j,rilset])
KK<-kinall[[pheno$CHROM[j]]][rilset,rilset]
pp<-pheno$POS[j]
ind<-which(poslist$chr==pheno$CHROM[j] & poslist$Ppos==(round(pp/1e4,0)*1e4))
st<-ind-500
if(st<0){st<-0}
end<-ind+500
if(end>nrow(poslist)){end<-nrow(poslist)}
small.array<-big.array[rilset,,st:end]
pos.set<-poslist[st:end,c('Ppos','chr','Ppos','Gpos')]
names(pos.set)<-c('SNP_ID','CHROM','Mb','cM')
pos.set$Mb<-pos.set$Mb/1e6
lod.set<-fast.qtlrel.hap(pheno=phenotype, probs=small.array, K=KK,snps=pos.set)
lod.matrix[[j]]<-lod.set[['lod']][,c('perc.var','lod','neg.log10.p')]
cat(j, Sys.time(),"\n")
}
oname<-paste("Data/DSPR/Obs_LODs/LODseed_biallelic_KLOCO_E",100*eff,"_S",samp.size,".rda",sep="")
save(lod.matrix,file=oname)
#}
|
library(assertive.matrices)
### Name: assert_is_diagonal_matrix
### Title: Is the input a diagonal matrix?
### Aliases: assert_is_diagonal_matrix is_diagonal_matrix
### ** Examples
x <- diag(3)
is_diagonal_matrix(x)
x[1, 2] <- 100 * .Machine$double.eps
is_diagonal_matrix(x)
x[2, 3] <- 101 * .Machine$double.eps
is_diagonal_matrix(x)
| /data/genthat_extracted_code/assertive.matrices/examples/is_diagonal_matrix.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 341 | r | library(assertive.matrices)
### Name: assert_is_diagonal_matrix
### Title: Is the input a diagonal matrix?
### Aliases: assert_is_diagonal_matrix is_diagonal_matrix
### ** Examples
x <- diag(3)
is_diagonal_matrix(x)
x[1, 2] <- 100 * .Machine$double.eps
is_diagonal_matrix(x)
x[2, 3] <- 101 * .Machine$double.eps
is_diagonal_matrix(x)
|
#' hear no evil
#'
#' A function that will only "hear" an input containing variants on the string `"no evil"`.
#' `Hear` is not case sensitive and will remove punctuation so `"no_Evil"` and `"nO- _eVil"` will also be heard.
#' `Hear` is also sensitive to value names so passing an object `no_evil <- "asdf"` containing any arbitrary strings will also be heard.
#' Anything that is not specifically `"no evil"` or some acceptable variant is assumed to be evil and will not be heard.
#'
#' @author Greg Pilgrim \email{gpilgrim2670@@gmail.com}
#'
#' @importFrom stringr str_replace_all
#' @importFrom stringr str_detect
#' @param x an input
#' @return The string \code{"No Evil."}.
#'
#' @export
#'
#' @examples
#' Hear(1)
#' Hear("No Evil")
#' no_evil <- "good stuff"
#' Hear(no_evil)
#' evil <- "good_stuff"
#' Hear(evil)
#' Hear(NA)
Hear <- function(x) {
var_name <- tolower(deparse(substitute(x)))
var_name <- stringr::str_replace_all(var_name, "[:punct:]", " ")
if(is.na(x) == TRUE) return("I can't hear you!")
x <- tolower(x)
x <- stringr::str_replace_all(x, "[:punct:]", " ")
if(stringr::str_detect(x, "no\\s{0,}evil") | stringr::str_detect(var_name, "no\\s{0,}evil")) {
return("I hear you!")
} else {
return("I can't hear you!")
}
}
| /R/Hear.R | no_license | cran/ThreeWiseMonkeys | R | false | false | 1,263 | r | #' hear no evil
#'
#' A function that will only "hear" an input containing variants on the string `"no evil"`.
#' `Hear` is not case sensitive and will remove punctuation so `"no_Evil"` and `"nO- _eVil"` will also be heard.
#' `Hear` is also sensitive to value names so passing an object `no_evil <- "asdf"` containing any arbitrary strings will also be heard.
#' Anything that is not specifically `"no evil"` or some acceptable variant is assumed to be evil and will not be heard.
#'
#' @author Greg Pilgrim \email{gpilgrim2670@@gmail.com}
#'
#' @importFrom stringr str_replace_all
#' @importFrom stringr str_detect
#' @param x an input
#' @return The string \code{"No Evil."}.
#'
#' @export
#'
#' @examples
#' Hear(1)
#' Hear("No Evil")
#' no_evil <- "good stuff"
#' Hear(no_evil)
#' evil <- "good_stuff"
#' Hear(evil)
#' Hear(NA)
Hear <- function(x) {
var_name <- tolower(deparse(substitute(x)))
var_name <- stringr::str_replace_all(var_name, "[:punct:]", " ")
if(is.na(x) == TRUE) return("I can't hear you!")
x <- tolower(x)
x <- stringr::str_replace_all(x, "[:punct:]", " ")
if(stringr::str_detect(x, "no\\s{0,}evil") | stringr::str_detect(var_name, "no\\s{0,}evil")) {
return("I hear you!")
} else {
return("I can't hear you!")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{remove_clutter}
\alias{remove_clutter}
\title{Utilities for the maRketingscience package}
\usage{
remove_clutter(input)
}
\description{
This function allows you to quickly create a dummy variable for controlling outliers
}
\examples{
write_quick_sd()
}
\keyword{for}
\keyword{maRketingscience}
\keyword{utilities}
| /man/remove_clutter.Rd | no_license | benetheking/maRketingscience | R | false | true | 406 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{remove_clutter}
\alias{remove_clutter}
\title{Utilities for the maRketingscience package}
\usage{
remove_clutter(input)
}
\description{
This function allows you to quickly create a dummy variable for controlling outliers
}
\examples{
write_quick_sd()
}
\keyword{for}
\keyword{maRketingscience}
\keyword{utilities}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_info.R, R/model_info.R
\name{data_info}
\alias{data_info}
\alias{data_info.pdb_posterior}
\alias{data_info.character}
\alias{pdb_data_info}
\alias{model_info}
\alias{model_info.pdb_posterior}
\alias{model_info.character}
\title{Access data and model information}
\usage{
data_info(x, ...)
\method{data_info}{pdb_posterior}(x, ...)
\method{data_info}{character}(x, pdb = pdb_default(), ...)
pdb_data_info(x, ...)
model_info(x, ...)
\method{model_info}{pdb_posterior}(x, ...)
\method{model_info}{character}(x, pdb = pdb_default(), ...)
}
\arguments{
\item{x}{a object to access information for.}
\item{...}{further arguments to methods.}
\item{pdb}{a \code{pdb} object.}
}
\description{
Access data and model information
}
| /rpackage/man/model_info.Rd | no_license | eerolinna/posterior_database | R | false | true | 812 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_info.R, R/model_info.R
\name{data_info}
\alias{data_info}
\alias{data_info.pdb_posterior}
\alias{data_info.character}
\alias{pdb_data_info}
\alias{model_info}
\alias{model_info.pdb_posterior}
\alias{model_info.character}
\title{Access data and model information}
\usage{
data_info(x, ...)
\method{data_info}{pdb_posterior}(x, ...)
\method{data_info}{character}(x, pdb = pdb_default(), ...)
pdb_data_info(x, ...)
model_info(x, ...)
\method{model_info}{pdb_posterior}(x, ...)
\method{model_info}{character}(x, pdb = pdb_default(), ...)
}
\arguments{
\item{x}{a object to access information for.}
\item{...}{further arguments to methods.}
\item{pdb}{a \code{pdb} object.}
}
\description{
Access data and model information
}
|
################################################################################################################
### After dN/dS Calculation for each cancre type, we need to bind the mutation table together.
### Then, we need to calculate the score for each possible codon for each significantly mutated gene.
################################################################################################################
cancername <- c("DLBC","GBM","KIRC","LAML","LIHC","OV","PCPG","THYM","UVM","CHOL","ESCA","KICH","KIRP","LGG","MESO","PAAD","TGCT","UCS",
"CESC","BRCA","HNSC","BLCA","STAD","LUSC","LUAD","COAD","UCEC","THCA","SARC","PRAD","READ","SKCM")
data.initial <- read.table(file="/Share/home/lanxun5/Data/TCGA/MAF_TCGA/csv_format/results/ACC",header=T,sep="\t")
colname <- colnames(data.initial)
data.initial <- as.matrix(data.initial)
for(i in 1:length(cancername)){
cancertype <- cancername[i]
data.between <- read.table(file=cancertype,header=F,sep="\t")
data.between <- data.between[,-1]
data.between <- as.matrix(data.between)
data.initial <- rbind(data.initial,data.between)
}
data.initial <- as.data.frame(data.initial)
genename <- as.character(data.initial$gene)
table <- as.data.frame(table(genename))
table[which(table$Freq==1),]
####################################################################################################################
strand.plus.missense <- data.initial
uniq.gene <- unique(strand.plus.missense$gene)
happy.results <- data.frame()
genename <- character()
for(i in 1:length(uniq.gene)){
genename[i] <- as.character(uniq.gene[i])
test <- strand.plus.missense[which(strand.plus.missense$gene==genename[i]),]
uniq.codon <- as.character(unique(test$protein.position))
count <- numeric()
total.value <- numeric()
codon.position <- character()
for(j in 1:length(uniq.codon)){
codon.position[j] <- uniq.codon[j]
test.1 <- test[which(test$protein.position==codon.position[j]),]
count[j] <- dim(test.1)[1]
total.value[j] <- sum(as.numeric(as.character((test.1$true.value))))
}
happy <- data.frame(gene = genename[i],codon.position = codon.position, count = count, total.value = total.value)
happy.results <- rbind.data.frame(happy.results,happy)
}
happy.results$absolute <- happy.results[,3]/happy.results[,4]
happy.results$`1/absolute` <- 1/happy.results[,5]
score.correction <- data.frame()
for (i in 1:length(uniq.gene)){
ctnnb1 <- happy.results[which(happy.results$gene==as.character(uniq.gene[i])),]
ctnnb1$scale <- (ctnnb1$`1/absolute`-min(ctnnb1$`1/absolute`))/(max(ctnnb1$`1/absolute`)-min(ctnnb1$`1/absolute`))
score.correction <- rbind.data.frame(score.correction,ctnnb1)
}
write.table(score.correction,file="score.correction",sep="\t") | /Sample_Mutation_Correction/2Scoring_Every_Possible_codon_for_Significantly_Mutated_gene.R | permissive | JiFansen/TCGA_Project | R | false | false | 2,761 | r | ################################################################################################################
### After dN/dS Calculation for each cancre type, we need to bind the mutation table together.
### Then, we need to calculate the score for each possible codon for each significantly mutated gene.
################################################################################################################
cancername <- c("DLBC","GBM","KIRC","LAML","LIHC","OV","PCPG","THYM","UVM","CHOL","ESCA","KICH","KIRP","LGG","MESO","PAAD","TGCT","UCS",
"CESC","BRCA","HNSC","BLCA","STAD","LUSC","LUAD","COAD","UCEC","THCA","SARC","PRAD","READ","SKCM")
data.initial <- read.table(file="/Share/home/lanxun5/Data/TCGA/MAF_TCGA/csv_format/results/ACC",header=T,sep="\t")
colname <- colnames(data.initial)
data.initial <- as.matrix(data.initial)
for(i in 1:length(cancername)){
cancertype <- cancername[i]
data.between <- read.table(file=cancertype,header=F,sep="\t")
data.between <- data.between[,-1]
data.between <- as.matrix(data.between)
data.initial <- rbind(data.initial,data.between)
}
data.initial <- as.data.frame(data.initial)
genename <- as.character(data.initial$gene)
table <- as.data.frame(table(genename))
table[which(table$Freq==1),]
####################################################################################################################
strand.plus.missense <- data.initial
uniq.gene <- unique(strand.plus.missense$gene)
happy.results <- data.frame()
genename <- character()
for(i in 1:length(uniq.gene)){
genename[i] <- as.character(uniq.gene[i])
test <- strand.plus.missense[which(strand.plus.missense$gene==genename[i]),]
uniq.codon <- as.character(unique(test$protein.position))
count <- numeric()
total.value <- numeric()
codon.position <- character()
for(j in 1:length(uniq.codon)){
codon.position[j] <- uniq.codon[j]
test.1 <- test[which(test$protein.position==codon.position[j]),]
count[j] <- dim(test.1)[1]
total.value[j] <- sum(as.numeric(as.character((test.1$true.value))))
}
happy <- data.frame(gene = genename[i],codon.position = codon.position, count = count, total.value = total.value)
happy.results <- rbind.data.frame(happy.results,happy)
}
happy.results$absolute <- happy.results[,3]/happy.results[,4]
happy.results$`1/absolute` <- 1/happy.results[,5]
score.correction <- data.frame()
for (i in 1:length(uniq.gene)){
ctnnb1 <- happy.results[which(happy.results$gene==as.character(uniq.gene[i])),]
ctnnb1$scale <- (ctnnb1$`1/absolute`-min(ctnnb1$`1/absolute`))/(max(ctnnb1$`1/absolute`)-min(ctnnb1$`1/absolute`))
score.correction <- rbind.data.frame(score.correction,ctnnb1)
}
write.table(score.correction,file="score.correction",sep="\t") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny_explorer.r
\name{shiny_vis}
\alias{shiny_vis}
\title{A Shiny app that enables the user to explore the cost model}
\usage{
shiny_vis(cost_model)
}
\description{
A Shiny app that enables the user to explore the cost model
}
| /man/shiny_vis.Rd | no_license | moj-analytical-services/costmodelr | R | false | true | 306 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny_explorer.r
\name{shiny_vis}
\alias{shiny_vis}
\title{A Shiny app that enables the user to explore the cost model}
\usage{
shiny_vis(cost_model)
}
\description{
A Shiny app that enables the user to explore the cost model
}
|
#' Learn a naive Bayes network structure.
#'
#' Learn a naive Bayes network structure.
#'
#' @export
#' @param class A character. Name of the class variable.
#' @param dataset The data frame from which to learn the classifier.
#' @param features A character vector. The names of the features. This argument
#' is ignored if \code{dataset} is provided.
#' @return A \code{\link{bnc_dag}} object.
#' @examples
#' data(car)
#' nb <- nb('class', car)
#' nb2 <- nb('class', features = letters[1:10])
#' \dontrun{plot(nb2)}
nb <- function(class, dataset = NULL, features = NULL) {
# # if dataset is provided features is ignored
if (!is.null(dataset)) {
features <- get_features(class = class, dataset = dataset)
}
nb <- bnc_dag(nb_dag(class, features), class)
add_dag_call_arg(nb, 'nb', call = match.call(), env = parent.frame())
}
#' @export
#' @rdname greedy_wrapper
fssj <- function(class, dataset, k, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
just_class_nb <- nb(class = class)
# Use just the structure, not the call
just_class_nb <- remove_dag_call_arg(just_class_nb)
features <- get_features(class, dataset)
x <- greedy_search(class = class, to_include = features, init = just_class_nb,
step = fssj_step, dataset = dataset, epsilon = epsilon, k = k,
smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'fssj', call = match.call(),
env = parent.frame(), force = TRUE)
}
#' @rdname greedy_wrapper
#' @export
bsej <- function(class, dataset, k, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
full_nb <- nb(class = class, dataset)
full_nb <- remove_dag_call_arg(full_nb)
x <- greedy_search(class = class, to_include = NULL, init = full_nb,
step = bsej_step, dataset = dataset, epsilon = epsilon, k = k,
smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'bsej', call = match.call(), env = parent.frame())
}
#' @export
#' @rdname greedy_wrapper
tan_hc <- function(class, dataset, k, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
full_nb <- nb(class = class, dataset)
full_nb <- remove_dag_call_arg(full_nb)
x <- greedy_search(class = class, to_include = NULL, init = full_nb,
step = augment_ode, dataset = dataset, epsilon = epsilon, k = k,
smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'tan_hc', call = match.call(),
env = parent.frame(), force = TRUE)
}
#' @export
#' @rdname greedy_wrapper
kdb <- function(class, dataset, k, kdbk = 2, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
full_nb <- nb(class = class, dataset)
full_nb <- remove_dag_call_arg(full_nb)
x <- greedy_search(class = class, to_include = NULL, init = full_nb,
step = augment_kdb(kdbk), dataset = dataset, epsilon = epsilon, k = k,
smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'kdb', call = match.call(),
env = parent.frame(), force = TRUE)
}
#' @export
#' @export
#' @rdname greedy_wrapper
tan_hcsp <- function(class, dataset, k, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
full_nb <- nb(class = class, dataset)
full_nb <- remove_dag_call_arg(full_nb)
x <- greedy_search(class = class, to_include = NULL, init = full_nb,
step = augment_ode_sp, dataset = dataset, epsilon = epsilon,
k = k, smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'tan_hcsp', call = match.call(), env = parent.frame(),
force = TRUE)
}
#' @export
#' @rdname tan_chowliu
tan_cl <- function(class, dataset, score='loglik', root = NULL) {
x <- chowliu(class, dataset, score = score, blacklist = NULL,
root = root)
add_dag_call_arg(x, fun_name = 'tan_cl', call = match.call(),
env = parent.frame(), force = TRUE)
}
#' Learn an AODE ensemble.
#'
#' If there is a single predictor then returns a naive Bayes.
#'
#' @export
#' @inheritParams nb
#' @return A \code{bnc_aode} or a \code{bnc_dag} (if returning a naive Bayes)
aode <- function(class, dataset, features = NULL) {
if (!is.null(dataset)) {
features <- get_features(class = class, dataset = dataset)
}
if (length(features) == 1) return(nb(class = class, features = features))
names(features) <- features
models <- lapply(features, spode, features, class)
x <- bnc_aode(models = models, class_var = class, features = features)
add_dag_call_arg(x, fun_name = 'aode', call = match.call(),
env = parent.frame(), force = TRUE)
} | /R/learn-struct.R | no_license | cheng270/bnclassify | R | false | false | 4,788 | r | #' Learn a naive Bayes network structure.
#'
#' Learn a naive Bayes network structure.
#'
#' @export
#' @param class A character. Name of the class variable.
#' @param dataset The data frame from which to learn the classifier.
#' @param features A character vector. The names of the features. This argument
#' is ignored if \code{dataset} is provided.
#' @return A \code{\link{bnc_dag}} object.
#' @examples
#' data(car)
#' nb <- nb('class', car)
#' nb2 <- nb('class', features = letters[1:10])
#' \dontrun{plot(nb2)}
nb <- function(class, dataset = NULL, features = NULL) {
# # if dataset is provided features is ignored
if (!is.null(dataset)) {
features <- get_features(class = class, dataset = dataset)
}
nb <- bnc_dag(nb_dag(class, features), class)
add_dag_call_arg(nb, 'nb', call = match.call(), env = parent.frame())
}
#' @export
#' @rdname greedy_wrapper
fssj <- function(class, dataset, k, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
just_class_nb <- nb(class = class)
# Use just the structure, not the call
just_class_nb <- remove_dag_call_arg(just_class_nb)
features <- get_features(class, dataset)
x <- greedy_search(class = class, to_include = features, init = just_class_nb,
step = fssj_step, dataset = dataset, epsilon = epsilon, k = k,
smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'fssj', call = match.call(),
env = parent.frame(), force = TRUE)
}
#' @rdname greedy_wrapper
#' @export
bsej <- function(class, dataset, k, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
full_nb <- nb(class = class, dataset)
full_nb <- remove_dag_call_arg(full_nb)
x <- greedy_search(class = class, to_include = NULL, init = full_nb,
step = bsej_step, dataset = dataset, epsilon = epsilon, k = k,
smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'bsej', call = match.call(), env = parent.frame())
}
#' @export
#' @rdname greedy_wrapper
tan_hc <- function(class, dataset, k, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
full_nb <- nb(class = class, dataset)
full_nb <- remove_dag_call_arg(full_nb)
x <- greedy_search(class = class, to_include = NULL, init = full_nb,
step = augment_ode, dataset = dataset, epsilon = epsilon, k = k,
smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'tan_hc', call = match.call(),
env = parent.frame(), force = TRUE)
}
#' @export
#' @rdname greedy_wrapper
kdb <- function(class, dataset, k, kdbk = 2, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
full_nb <- nb(class = class, dataset)
full_nb <- remove_dag_call_arg(full_nb)
x <- greedy_search(class = class, to_include = NULL, init = full_nb,
step = augment_kdb(kdbk), dataset = dataset, epsilon = epsilon, k = k,
smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'kdb', call = match.call(),
env = parent.frame(), force = TRUE)
}
#' @export
#' @export
#' @rdname greedy_wrapper
tan_hcsp <- function(class, dataset, k, epsilon = 0.01, smooth = 0,
cache_reset = NULL) {
full_nb <- nb(class = class, dataset)
full_nb <- remove_dag_call_arg(full_nb)
x <- greedy_search(class = class, to_include = NULL, init = full_nb,
step = augment_ode_sp, dataset = dataset, epsilon = epsilon,
k = k, smooth = smooth, cache_reset = cache_reset)
add_dag_call_arg(x, fun_name = 'tan_hcsp', call = match.call(), env = parent.frame(),
force = TRUE)
}
#' @export
#' @rdname tan_chowliu
tan_cl <- function(class, dataset, score='loglik', root = NULL) {
x <- chowliu(class, dataset, score = score, blacklist = NULL,
root = root)
add_dag_call_arg(x, fun_name = 'tan_cl', call = match.call(),
env = parent.frame(), force = TRUE)
}
#' Learn an AODE ensemble.
#'
#' If there is a single predictor then returns a naive Bayes.
#'
#' @export
#' @inheritParams nb
#' @return A \code{bnc_aode} or a \code{bnc_dag} (if returning a naive Bayes)
aode <- function(class, dataset, features = NULL) {
if (!is.null(dataset)) {
features <- get_features(class = class, dataset = dataset)
}
if (length(features) == 1) return(nb(class = class, features = features))
names(features) <- features
models <- lapply(features, spode, features, class)
x <- bnc_aode(models = models, class_var = class, features = features)
add_dag_call_arg(x, fun_name = 'aode', call = match.call(),
env = parent.frame(), force = TRUE)
} |
calculate_periods_irr <-
function(dates = c(
"2016-06-01",
"2017-05-31",
"2018-05-31",
"2019-05-31",
"2020-05-31",
"2021-05-31",
"2022-05-31",
"2023-05-31",
"2024-05-31",
"2025-05-31",
"2026-05-31"
),
cash_flows = c(
-3000,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547278
),
date_format = '%Y-%m-%d',
scale_to_100 = F,
return_percentage = F,
return_df = T,
return_wide = T,
return_message = T) {
secant <-
function(par,
fn,
tol = 1.e-07,
itmax = 100,
trace = TRUE,
...) {
# par = a starting vector with 2 starting values
# fn = a function whose first argument is the variable of interest
#
if (length(par) != 2)
stop("You must specify a starting parameter vector of length 2")
p.2 <- par[1]
p.1 <- par[2]
f <- rep(NA, length(par))
f[1] <- fn(p.1, ...)
f[2] <- fn(p.2, ...)
iter <- 1
pchg <- abs(p.2 - p.1)
fval <- f[2]
if (trace)
# cat("par: ", par, "fval: ", f, "\n")
while (pchg >= tol & abs(fval) > tol & iter <= itmax) {
p.new <- p.2 - (p.2 - p.1) * f[2] / (f[2] - f[1])
pchg <- abs(p.new - p.2)
fval <- fn(p.new, ...)
p.1 <- p.2
p.2 <- p.new
f[1] <- f[2]
f[2] <- fval
iter <- iter + 1
}
list(par = p.new,
value = fval,
iter = iter)
}
npv <-
function (irr, cashFlow, times)
sum(cashFlow / (1 + irr) ^ times)
cfDate <-
dates %>%
as.Date(format = date_format)
times <-
difftime(cfDate, cfDate[1], units = "days") %>% as.numeric() / 365.24
s <-
secant(
par = c(0, 0.1),
fn = npv,
cashFlow = cash_flows,
times = times
)
irr <-
s$par
if (return_percentage == T & scale_to_100 == T) {
stop("Sorry you cannot return a percentage and scale to 100")
}
if (return_percentage) {
irr <-
irr %>% formattable::percent()
}
if (scale_to_100) {
irr <-
irr * 100
}
dateStart <-
min(dates) %>% ymd
dateEnd <-
max(dates) %>% ymd
equityContributions <-
cash_flows[cash_flows < 0] %>%
sum %>%
formattable::currency(digits = 2)
equityDistributions <-
cash_flows[cash_flows > 0] %>%
sum %>%
formattable::currency(digits = 2)
multipleCapital <-
-(equityDistributions / equityContributions) %>% digits(digits = 3)
valueProfit <-
equityDistributions + equityContributions
if (return_df == T)
data <-
data_frame(
dateStart,
dateEnd,
equityContributions,
equityDistributions,
pctIRR = irr,
valueProfit,
multipleCapital,
dateTimeCF = Sys.time()
)
else {
data <-
irr
}
if (return_message) {
"Cash Flow Produces a " %>%
paste0(
irr * 100,
'% irr\nFrom ',
dateStart,
' to ',
dateEnd,
'\n',
'Profit of ',
valueProfit,
'\nCapital Multiple of ',
multipleCapital
) %>%
message()
}
if (return_wide == F) {
data %>%
gather(metric, value, -c(dateStart, dateEnd, dateTimeCF))
}
return(data)
}
parse_for_currency_value <-
function(x) {
value <-
x %>%
readr::parse_number() %>%
formattable::currency()
return(value)
}
parse_for_percentage <-
function(x) {
value <-
x %>%
parse_number()
if (value >= 1) {
value <-
value / 100
}
value <-
value %>%
percent()
return(value)
}
parse_multiple <-
function(x) {
value <-
x %>%
parse_number
return(value)
}
cap_rate_valuation <-
function(cap_rate = .0615,
net_operating_income = "$27,500,000",
cost_of_sale = "5%",
debt_balance = "$350,000,000",
return_wide = T) {
noi <-
net_operating_income %>%
parse_for_currency_value
debt <-
debt_balance %>%
parse_for_currency_value()
pct_cap_rate <-
cap_rate %>%
parse_for_percentage()
pct_sale_cost <-
cost_of_sale %>%
parse_for_percentage()
amountValuationGross <-
noi / cap_rate
amountCostSale <-
-((pct_sale_cost %>% as.numeric) * amountValuationGross)
amountValuationNet <-
amountValuationGross + amountCostSale
amountDebtRepayment <-
-min(amountValuationNet, debt)
amountEquityDistribution <-
-max(0, amountValuationNet + amountDebtRepayment) %>% currency
cash_check <-
!amountValuationGross + amountCostSale + amountDebtRepayment + amountEquityDistribution == 0
if (cash_check) {
stop("Cash waterfall does not tie")
}
value_df <-
data_frame(
pctCapRate = pct_cap_rate,
pctCostSale = pct_sale_cost,
amountNetOperatingIncome = noi,
amountDebtBalance = debt,
amountValuationGross,
amountCostSale,
amountValuationNet,
amountDebtRepayment,
amountEquityDistribution
)
if (!return_wide) {
value_df <-
value_df %>%
dplyr::select(-amountValuationNet) %>%
gather(
item,
value,
-c(
pctCapRate,
amountNetOperatingIncome,
amountDebtBalance,
pctCostSale
)
) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(value_df)
}
ebtida_multiple_value <-
function(ebitda_multiple = 10,
ebitda = "$27,500,000",
cost_of_sale = "5%",
debt_balance = "$350,000,000",
return_wide = T) {
ebitda <-
ebitda %>%
parse_for_currency_value
debt <-
debt_balance %>%
parse_for_currency_value()
multipleEBITDA <-
ebitda_multiple %>%
parse_multiple()
pct_sale_cost <-
cost_of_sale %>%
parse_for_percentage()
amountValuationGross <-
ebitda * multipleEBITDA
amountCostSale <-
-((pct_sale_cost %>% as.numeric) * amountValuationGross)
amountValuationNet <-
amountValuationGross + amountCostSale
amountDebtRepayment <-
-min(amountValuationNet, debt)
amountEquityDistribution <-
-max(0, amountValuationNet + amountDebtRepayment) %>% currency
cash_check <-
!amountValuationGross + amountCostSale + amountDebtRepayment + amountEquityDistribution == 0
if (cash_check) {
stop("Cash waterfall does not tie")
}
value_df <-
data_frame(
multipleEBITDA,
pctCostSale = pct_sale_cost,
amountEBITDA = ebitda,
amountDebtBalance = debt,
amountValuationGross,
amountCostSale,
amountValuationNet,
amountDebtRepayment,
amountEquityDistribution
)
if (!return_wide) {
value_df <-
value_df %>%
dplyr::select(-amountValuationNet) %>%
gather(item,
value,
-c(
multipleEBITDA,
amountEBITDA,
amountDebtBalance,
pctCostSale
)) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(value_df)
}
#' Calculate residual value
#'
#' @param cap_rates Vector of Capitalization Rates in percent or character percent form
#' @param net_operating_income Vector of Net Operating Income in numeric or character numeric/currency form
#' @param cost_of_sale Vector of Cost of Sale in percent or character percent form
#' @param debt_balance Vector of anticipated Debt Balance at sale in numeric or character numeric/currency form
#' @param return_wide
#' @import readr dplyr purrr formattable
#' @return
#' @export
#'
#' @examples
#' calculate_residual_valuation_cap_rates()
calculate_residual_valuation_cap_rates <-
function(cap_rates = c(.05, .0525, .06, .2),
net_operating_income = "$27,500,000",
cost_of_sale = "5%",
debt_balance = "$350,000,000",
return_wide = T) {
scenario_matrix <-
expand.grid(
cap_rate = cap_rates,
noi = net_operating_income,
cost_sale = cost_of_sale,
debt = debt_balance,
stringsAsFactors = F
) %>%
as_data_frame
scenario_df <-
1:nrow(scenario_matrix) %>%
map_df(function(x) {
cap_rate_valuation(
cap_rate = scenario_matrix$cap_rate[[x]],
net_operating_income = scenario_matrix$noi[[x]],
cost_of_sale = scenario_matrix$cost_sale[[x]],
debt_balance = scenario_matrix$debt[[x]],
return_wide = T
)
}) %>%
mutate(idScenario = 1:n()) %>%
dplyr::select(idScenario, everything())
scenario_df <-
scenario_df %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^pct")) %>% names,
funs(. %>% percent(digits = 2))) %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^amount")) %>% names,
funs(. %>% currency(digits = 2)))
if (!return_wide) {
scenario_df <-
scenario_df %>%
dplyr::select(-amountValuationNet) %>%
gather(
item,
value,
-c(
idScenario,
pctCapRate,
amountNetOperatingIncome,
amountDebtBalance,
pctCostSale
)
) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(scenario_df)
}
#' Calculate residual value for a set of given EBITDA based inputs
#'
#' @param ebitda_multiples Vector of EBITDA Multiples in numeric or character
#' @param ebitda Vector of EBITDA in numeric or character numeric/currency form
#' @param cost_of_sale Vector of Cost of Sale in percent or character percent form
#' @param debt_balance Vector of anticipated Debt Balance at sale in numeric or character numeric/currency form
#' @param return_wide Return data in wide or long form
#' @import readr dplyr purrr formattable
#' @return
#' @export
#'
#' @examples
#' calculate_residual_valuation_ebitda_multiples(ebitda_multiples = c(5, 10, 15, 20), ebitda = "$27,500,000", cost_of_sale = "5%", debt_balance = "$350,000,000", return_wide = T)
calculate_residual_valuation_ebitda_multiples <-
function(ebitda_multiples = c(5, 10, 15, 20),
ebitda = "$27,500,000",
cost_of_sale = "5%",
debt_balance = "$350,000,000",
return_wide = T) {
scenario_matrix <-
expand.grid(
ebitda_multiple = ebitda_multiples,
ebitda = ebitda,
cost_sale = cost_of_sale,
debt = debt_balance,
stringsAsFactors = F
) %>%
as_data_frame
scenario_df <-
1:nrow(scenario_matrix) %>%
map_df(function(x) {
ebtida_multiple_value(
ebitda_multiple = scenario_matrix$ebitda_multiple[[x]],
ebitda = scenario_matrix$ebitda[[x]],
cost_of_sale = scenario_matrix$cost_sale[[x]],
debt_balance = scenario_matrix$debt[[x]],
return_wide = T
)
}) %>%
mutate(idScenario = 1:n()) %>%
dplyr::select(idScenario, everything())
scenario_df <-
scenario_df %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^pct")) %>% names,
funs(. %>% percent(digits = 2))) %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^amount")) %>% names,
funs(. %>% currency(digits = 2)))
if (!return_wide) {
scenario_df <-
scenario_df %>%
dplyr::select(-amountValuationNet) %>%
gather(item,
value,
-c(
multipleEBITDA,
amountEBITDA,
amountDebtBalance,
pctCostSale
)) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(scenario_df)
}
post_money_valuation <-
function(pre_money_valuation = "$45,000,000",
percent_sold = "10%") {
options(scipen = 999999)
pre_money <-
pre_money_valuation %>%
parse_for_currency_value()
pct_sold <-
percent_sold %>%
parse_for_percentage() %>%
as.numeric
new_capital <-
(pct_sold * pre_money) %>% currency
total_val <-
pre_money + new_capital
valuation_data <-
data_frame(valuationPreMoney = pre_money,
amountCapitalInvestment = new_capital) %>%
mutate(
pctOwnershipExistingShareholders = (pre_money / total_val) %>% percent,
pctOwnershipNewInvestment = (new_capital / total_val) %>% percent
)
return(valuation_data)
}
#' Calculates range of post investment valuations
#'
#' @param pre_money_valuation Vector of of valuations in numeric or character
#' @param percent_sold Vector of of amount of business sold in percent or character percent form
#' @param return_wide
#' @param return_wide Return data in wide or long form
#' @import readr dplyr purrr formattable
#' @return
#' @export
#'
#' @examples
#' calculate_valuation_post_money(pre_money_valuation = "$45,000,000", percent_sold = "10%", return_wide = T)
calculate_valuation_post_money <-
function(pre_money_valuation = "$45,000,000",
percent_sold = "10%",
return_wide = T) {
scenario_matrix <-
expand.grid(
pre_money_valuation = pre_money_valuation,
percent_sold = percent_sold,
stringsAsFactors = F
) %>%
as_data_frame
scenario_df <-
1:nrow(scenario_matrix) %>%
map_df(function(x) {
post_money_valuation(
pre_money_valuation = scenario_matrix$pre_money_valuation[[x]],
percent_sold = scenario_matrix$percent_sold[[x]]
)
}) %>%
mutate(idScenario = 1:n()) %>%
dplyr::select(idScenario, everything())
scenario_df <-
scenario_df %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^pct")) %>% names,
funs(. %>% percent(digits = 2))) %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^amount|valuation")) %>% names,
funs(. %>% currency(digits = 2)))
if (!return_wide) {
scenario_df <-
scenario_df %>%
dplyr::select(-matches("pct")) %>%
gather(item, value, -c(idScenario)) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(scenario_df)
}
#' Calculate proceeds from share sale given specified price and share amount
#'
#' @param price
#' @param shares
#'
#' @return
#' @export
#' @importFrom formattable currency
#' @examples
#' calculate_share_proceeds(price = 9, shares = 150000)
calculate_share_proceeds <-
function(price = 10, shares = 1000000) {
proceeds <-
(price * shares) %>% currency()
return(proceeds)
}
calculate_basis <-
function(purchase_price = "$10,000,000",
capitalized_acquisition_costs = "$300,0000",
capital_investment = "$1,200,000") {
options(scipen = 999)
if (purchase_price %>% is_null) {
stop("Please enter a purchase price")
}
amountPurchasePrice <-
purchase_price %>%
parse_for_currency_value()
if (!capitalized_acquisition_costs %>% is_null) {
amountCapitalizedCosts <-
capitalized_acquisition_costs %>%
parse_for_currency_value()
} else {
amountCapitalizedCosts <-
0
}
if (!capitalized_acquisition_costs %>% is_null) {
amountCapitalInvestment <-
capital_investment %>%
parse_for_currency_value()
} else {
capital_investment <-
0
}
basis_df <-
data_frame(amountPurchasePrice, amountCapitalInvestment, amountCapitalizedCosts) %>%
mutate(amountBasis = amountPurchasePrice + amountCapitalInvestment + amountCapitalizedCosts)
return(basis_df)
}
calculate_capitalization <-
function(purchase_price = "$9,700,000",
capitalized_acquisition_costs = "$300,000",
capital_investment = "$0",
loan_to_cost = .7,
borrow_capital_investment = T,
include_capitalized_cost = F,
leverage_threshold = .95) {
if (loan_to_cost %>% is_null()) {
stop("Please enter a loan to cost even if it is zero")
}
if (leverage_threshold %>% is_null()) {
leverage_threshold <-
1
}
pct_ltc <-
loan_to_cost %>% parse_for_percentage()
if (pct_ltc > leverage_threshold) {
leverage_message <-
"\nDon't be a reckless idiot, remember what happend to Lehman Brothers???\nDon't know Lehman Brothers, Google it\n" %>%
paste0(pct_ltc, ' is unprudent leverage\nA more reasonable amount of leverage is: ', leverage_threshold %>% formattable::percent(), '\nChange your leverage assumptions and try again')
stop(leverage_message)
}
basis_df <-
calculate_basis(purchase_price = purchase_price, capitalized_acquisition_costs = capitalized_acquisition_costs, capital_investment = capital_investment)
if (include_capitalized_cost) {
debt_basis <-
basis_df %>% mutate(amountDebtBasis = amountPurchasePrice + amountCapitalizedCosts) %>%
.$amountDebtBasis
} else {
debt_basis <-
basis_df$amountPurchasePrice
}
loan_proceeds <-
debt_basis * pct_ltc
if (borrow_capital_investment) {
loan_proceeds <-
loan_proceeds + basis_df$amountCapitalInvestment
}
capital_stack_df <-
basis_df %>%
mutate(
amountLoanProceeds = -loan_proceeds,
amountEquity = -(amountBasis + amountLoanProceeds)
)
return(capital_stack_df)
}
get_data_monthly_periods <-
function(start_date = "2016-06-01",
term_years = 25,
term_months = 0){
periods <-
term_years * 12 + term_months
periods <-
0:periods
start_date <-
start_date %>% lubridate::ymd %>% as.Date()
get_end_of_period <-
function(period = 0) {
if (period == 0) {
period_date <-
start_date %m+% months(period) %>%
as.character() %>%
as.Date()
}
if (period == 1) {
period_date <-
start_date %m+% months(0) %>%
timeDate::timeLastDayInMonth %>%
as.character() %>%
as.Date()
}
if (period > 1) {
period_date <-
start_date %m+% months(period - 1) %>%
timeDate::timeLastDayInMonth %>%
as.character() %>%
as.Date()
}
period_df <-
data_frame(idPeriod = period, datePeriod = period_date)
return(period_df)
}
all_periods <-
periods %>%
purrr::map(function(x) {
get_end_of_period(period = x)
}) %>%
compact %>%
bind_rows()
all_periods <-
all_periods %>%
mutate(yearPeriod = ifelse(idPeriod == 0, 0, (idPeriod %/% 12) + 1)) %>%
dplyr::select(idPeriod, yearPeriod, everything())
return(all_periods)
}
pmt <-
function (r, n, pv, fv, type = 0) {
if (type != 0 && type != 1) {
print("Error: type should be 0 or 1!")
}
else {
pmt <- (pv + fv / (1 + r) ^ n) * r / (1 - 1 / (1 + r) ^ n) * (-1) *
(1 + r) ^ (-1 * type)
return(pmt)
}
}
calculate_loan_payment <-
function(loan_start_date = "2016-06-01",
amount_initial_draw = 3000,
is_interest_only = F,
interest_only_periods = 24,
interest_rate = "10%",
is_actual_360 = T,
amortization_years = 10,
amortization_months = 0,
term_years = 10,
term_months = 0,
pct_loan_fee = 0,
balloon_year = 10,
override_monthly_interest = F,
interest_reserve_period = 0,
balloon_month = 0,
return_annual_summary = F) {
options(scipen = 99999)
options(digits = 10)
interest_rate <-
interest_rate %>%
parse_for_percentage()
if (is_actual_360 == T) {
daily_interest <-
interest_rate / 360
net_rate <-
interest_rate / 360 * 365
} else {
daily_interest <-
interest_rate / 365
net_rate <-
interest_rate
}
amortization_periods <-
(amortization_years * 12) + amortization_months
loan_periods <-
(balloon_year * 12) + balloon_month
loan_period_df <-
get_data_monthly_periods(start_date = loan_start_date,
term_years = term_years,
term_months = term_months)
loan_period_df <-
loan_period_df %>%
mutate(
isIO = ifelse(is_interest_only == T &
idPeriod <= interest_only_periods, T, F),
isActiveLoan = ifelse(idPeriod <= loan_periods, T, F),
amountInitialDraw = ifelse(idPeriod == 0, amount_initial_draw, 0),
amountLoanFee = amountInitialDraw * pct_loan_fee
)
loan_period_df <-
loan_period_df %>%
dplyr::filter(isActiveLoan == T)
periods <-
loan_period_df$idPeriod
all_payment_data <-
data_frame()
for (period in periods) {
period_index <-
period + 1
datePeriod <-
loan_period_df$datePeriod[period_index]
drawInitial <-
loan_period_df$amountInitialDraw[period_index]
drawAdditional <-
0 # loan_period_df$amountAdditionalDraw[period_index] -- layer in eventully
is_interest_only <-
loan_period_df$isIO[period_index]
periodFee <-
loan_period_df$amountLoanFee[period_index]
if (period == 0) {
month_df <-
data_frame(
idPeriod = period,
dateStartPeriod = datePeriod,
dateEndPeriod = datePeriod,
isIO = is_interest_only,
balanceInitial = 0 %>% currency(digits = 2),
amountInitialDraw = drawInitial %>% currency(digits = 2),
amountAdditionalDraw = drawAdditional %>% currency(digits = 2),
paymentInterest = 0 %>% currency(digits = 2),
paymentPrincipal = 0 %>% currency(digits = 2),
amountRepayment = 0 %>% currency(digits = 2),
amountLoanFee = periodFee %>% currency(digits = 2)
) %>%
mutate(balanceEnd = amountInitialDraw + amountAdditionalDraw)
}
if (period > 0) {
initial_balance <-
all_payment_data$balanceEnd[period_index - 1]
total_balance <-
initial_balance + drawInitial + drawAdditional
balance_basis <-
(all_payment_data$amountInitialDraw %>% sum()) +
all_payment_data$amountAdditionalDraw %>% sum()
start_month <-
timeDate::timeFirstDayInMonth(datePeriod) %>% as.Date()
days <-
(datePeriod - start_month) + 1
month_days <-
as.numeric(days, units = 'days')
if (override_monthly_interest == T) {
monthly_interest <-
net_rate / 12
} else {
monthly_interest <-
daily_interest * month_days
}
paymentInterest <-
monthly_interest * (total_balance)
if (interest_reserve_period > 0 &
period <= interest_reserve_period) {
paymentInterest <-
0
}
if (is_interest_only == T) {
paymentTotal <-
paymentInterest
} else {
paymentTotal <-
pmt(
r = monthly_interest,
pv = balance_basis,
fv = 0,
n = amortization_periods
)
}
paymentPrincipal <-
abs(paymentTotal) - paymentInterest
if (period == loan_periods) {
amountRepayment <-
initial_balance + drawInitial + drawAdditional - paymentPrincipal
} else {
amountRepayment <-
0
}
month_df <-
data_frame(
idPeriod = period,
dateStartPeriod = start_month,
dateEndPeriod = datePeriod,
isIO = is_interest_only,
balanceInitial = initial_balance %>% currency(digits = 2),
amountInitialDraw = drawInitial %>% currency(digits = 2),
amountAdditionalDraw = drawAdditional %>% currency(digits = 2),
paymentInterest = -paymentInterest %>% currency(digits = 2),
paymentPrincipal = -paymentPrincipal %>% currency(digits = 2),
amountRepayment = -amountRepayment %>% currency(digits = 2),
amountLoanFee = periodFee %>% currency(digits = 2)
) %>%
mutate(
balanceEnd =
balanceInitial + amountInitialDraw + amountAdditionalDraw +
paymentPrincipal + amountRepayment
)
}
all_payment_data <-
month_df %>%
bind_rows(all_payment_data) %>%
arrange((idPeriod))
}
all_payment_data <-
all_payment_data %>%
mutate(
balanceInitial = balanceInitial %>% currency(digits = 2),
amountInitialDraw = amountInitialDraw %>% currency(digits = 2),
amountAdditionalDraw = amountAdditionalDraw %>% currency(digits = 2),
paymentInterest = paymentInterest %>% currency(digits = 2),
paymentPrincipal = paymentPrincipal %>% currency(digits = 2),
amountRepayment = amountRepayment %>% currency(digits = 2),
amountLoanFee = amountLoanFee %>% currency(digits = 2),
balanceEnd = balanceEnd %>% currency(digits = 2)
) %>%
mutate(yearPeriod = ifelse(idPeriod > 0,
((idPeriod - 1) %/% 12) + 1,
0)) %>%
dplyr::select(yearPeriod, everything())
if (return_annual_summary == T) {
all_payment_data <-
all_payment_data %>%
group_by(yearPeriod) %>%
summarise(
dateStartPeriod = min(dateStartPeriod),
dateEndPeriod = max(dateEndPeriod),
amountInitialDraw = sum(amountInitialDraw),
amountAdditionalDraw = sum(amountAdditionalDraw),
paymentInterest = sum(paymentInterest),
paymentPrincipal = sum(paymentPrincipal),
amountRepayment = sum(amountRepayment),
amountLoanFee = sum(amountLoanFee),
cfLoan = (
amountInitialDraw + amountAdditionalDraw + paymentInterest + paymentPrincipal + amountRepayment + amountLoanFee
) %>% currency(digits = 2)
) %>%
mutate(
balanceEnd = cumsum(amountInitialDraw) + cumsum(paymentPrincipal) + cumsum(amountRepayment)
) %>%
ungroup
}
return(all_payment_data)
}
calculate_average_payment <-
function(amount_initial_draw = 3000,
is_interest_only = F,
interest_only_periods = 24,
interest_rate = "10%",
is_actual_360 = T,
amortization_years = 10,
amortization_months = 0,
term_years = 10,
term_months = 0,
pct_loan_fee = 0,
balloon_year = 10,
override_monthly_interest = F,
interest_reserve_period = 0,
balloon_month = 0) {
library(lubridate)
first_of_the_month <-
(ceiling_date((Sys.Date() %m+% months(0)), "month") - days(1)) + 1
pmt_df <-
calculate_loan_payment(
loan_start_date = first_of_the_month,
amount_initial_draw = amount_initial_draw,
is_interest_only = is_interest_only,
interest_only_periods = interest_only_periods,
interest_rate = interest_rate,
is_actual_360 = is_actual_360,
amortization_years = amortization_years,
amortization_months = amortization_months,
term_years = term_years,
term_months = term_months,
pct_loan_fee = pct_loan_fee,
balloon_year = balloon_year,
balloon_month = balloon_month,
return_annual_summary = F
)
pmt_df <-
pmt_df %>%
dplyr::filter(!idPeriod == 0) %>%
group_by(yearPeriod) %>%
summarise(
paymentPrincipal = sum(paymentPrincipal, na.rm = T),
paymentInterest = sum(paymentInterest, na.rm = T)
) %>%
ungroup %>%
summarise(
meanPrincipal = mean(paymentPrincipal, na.rm = T) %>% formattable::currency(),
meanInterest = mean(paymentInterest, na.rm = T) %>% formattable::currency()
) %>%
mutate(meanPayment = meanPrincipal + meanInterest)
return(pmt_df)
}
calculate_leverage_metric <-
function(purchase_price = "$9,700,000",
capitalized_acquisition_costs = "$300,000",
capital_investment = "0",
revenue = "$1,500,000",
expenses = "$115,000",
loan_to_cost = .7,
borrow_capital_investment = F,
include_capitalized_cost = T,
leverage_threshold = .95,
is_interest_only = TRUE,
interest_only_periods = 12,
interest_rate = "5%",
is_actual_360 = TRUE,
amortization_years = 30,
amortization_months = 0,
term_years = 10,
term_months = 0,
pct_loan_fee = 0,
balloon_year = 10,
balloon_month = 0,
return_message = F) {
basis_df <-
calculate_capitalization(
purchase_price = purchase_price,
capitalized_acquisition_costs = capitalized_acquisition_costs,
capital_investment = capital_investment,
loan_to_cost = loan_to_cost,
borrow_capital_investment = borrow_capital_investment,
include_capitalized_cost = include_capitalized_cost,
)
revenue_amount <-
revenue %>%
parse_for_currency_value()
expense_amount <-
expenses %>%
parse_for_currency_value()
if (expense_amount > 0) {
expense_amount <-
-expense_amount
}
operating_income <-
revenue_amount + expense_amount
interest_rate <-
interest_rate %>%
parse_for_percentage()
pct_loan_fee <-
pct_loan_fee %>%
parse_for_percentage()
average_pmt <-
calculate_average_payment(
amount_initial_draw = basis_df$amountLoanProceeds %>% abs(),
is_interest_only = is_interest_only,
interest_only_periods = interest_only_periods,
interest_rate = interest_rate,
is_actual_360 = is_actual_360,
amortization_years = amortization_years,
amortization_months = amortization_months,
term_years = term_years,
term_months = term_months,
pct_loan_fee = pct_loan_fee,
balloon_year = balloon_year,
override_monthly_interest = override_monthly_interest,
interest_reserve_period = interest_reserve_period
)
data <-
basis_df %>%
mutate(
amountRevenue = revenue_amount,
amountExpense = expense_amount,
amountEBITDA_NOI = operating_income
) %>%
bind_cols(average_pmt) %>%
mutate(
amountLNCFMean = amountEBITDA_NOI + meanPayment,
pctLeverage = (-amountLoanProceeds / amountBasis) %>% formattable::percent(),
pctMarginEBITDA_NOI = (amountEBITDA_NOI / amountRevenue) %>% formattable::percent(),
pctReturnOnCost = (amountEBITDA_NOI / amountBasis) %>% formattable::percent(),
pctDebtYieldInitial = -(amountEBITDA_NOI / amountLoanProceeds) %>% formattable::percent(),
ratioDSCRMean = (amountEBITDA_NOI / -meanPayment) %>% as.numeric() %>% formattable::digits(3),
pctCashOnCashMean = (amountLNCFMean / -amountEquity) %>% formattable::percent(),
pctReturnOnEquity = ((amountEBITDA_NOI + meanInterest) / -amountEquity) %>% formattable::percent(),
rule72Multiple2x = (72 / (pctCashOnCashMean * 100)) %>% as.numeric()
)
if (return_message) {
metric_message <-
"Basis: " %>%
paste0(
data$amountBasis,
'\n',
'Leverage: ',
data$pctLeverage,
'\n',
'Interest Rate: ',
interest_rate,
'\n',
'Amortization: ',
((amortization_years * 12) + amortization_months),
' periods\n',
'Return on Cost: ',
data$pctReturnOnCost,
'\nCash on Cash: ', data$pctCashOnCashMean,
"\nReturn on Equity: ", data$pctReturnOnEquity,
"\nRule of 72: Equity Doubles in ", data$rule72Multiple2x, ' years\n'
)
metric_message %>% message()
}
return(data)
}
#' Calculate leveraged return metrics
#'
#' @param purchase_price Vector of Purchase Prices
#' @param capitalized_acquisition_costs Vector of Capitalized Acquisition Costs
#' @param capital_investment Vector of Capital Investment
#' @param revenue Vector of revenue amounts
#' @param expenses Vector of expenses
#' @param loan_to_cost Vector of loan to cost
#' @param interest_rate Interest Rate
#' @param borrow_capital_investment Borrow Investment \code{TRUE, FALSE}
#' @param include_capitalized_cost Include capitalized costs in leverage calculations \code{TRUE, FALSE}
#' @param leverage_threshold Maximum Leverage
#' @param is_interest_only Does loan have interst only periods \code{TRUE, FALSE}
#' @param interest_only_periods Interest Only Periods
#' @param is_actual_360 Is loan calcuated on actual/360 basis \code{TRUE, FALSE}
#' @param amortization_years Loan amortization years
#' @param amortization_months Loan amortization months
#' @param term_years Term of the loan, years
#' @param term_months Term of the loan, months
#' @param pct_loan_fee Loan pee, percent
#' @param return_wide
#' @param return_message
#'
#' @return
#' @export
#' @import readr dplyr lubridate stringr purrr tidyr formattable
#' @examples
calculate_leverage_metrics <-
function(purchase_price = 0,
capitalized_acquisition_costs = 0,
capital_investment = 0,
revenue = 0,
expenses = 0,
loan_to_cost = 0,
interest_rate = 0,
borrow_capital_investment = F,
include_capitalized_cost = T,
leverage_threshold = .95,
is_interest_only = FALSE,
interest_only_periods = 0,
is_actual_360 = TRUE,
amortization_years = 30,
amortization_months = 0,
term_years = 30,
term_months = 0,
pct_loan_fee = 0,
return_wide = T,
return_message = T) {
variable_matrix <-
expand.grid(
purchase_price = purchase_price,
capitalized_acquisition_costs = capitalized_acquisition_costs,
capital_investment = capital_investment,
revenue = revenue,
expenses = expenses,
loan_to_cost = loan_to_cost,
borrow_capital_investment = borrow_capital_investment,
include_capitalized_cost = include_capitalized_cost,
leverage_threshold = leverage_threshold,
is_interest_only = is_interest_only,
interest_only_periods = interest_only_periods,
interest_rate = interest_rate,
is_actual_360 = is_actual_360,
amortization_years = amortization_years,
amortization_months = amortization_months,
term_years = term_years,
term_months = term_months,
pct_loan_fee = pct_loan_fee,
stringsAsFactors = F
) %>%
as_data_frame()
all_data <-
1:nrow(variable_matrix) %>%
map_df(function(x){
calculate_leverage_metric(
purchase_price = variable_matrix$purchase_price[[x]],
capitalized_acquisition_costs = variable_matrix$capitalized_acquisition_costs[[x]],
capital_investment = variable_matrix$capital_investment[[x]],
revenue = variable_matrix$revenue[[x]],
expenses = variable_matrix$expenses[[x]],
loan_to_cost = variable_matrix$loan_to_cost[[x]],
borrow_capital_investment = variable_matrix$borrow_capital_investment[[x]],
include_capitalized_cost = variable_matrix$include_capitalized_cost[[x]],
leverage_threshold = variable_matrix$leverage_threshold[[x]],
is_interest_only = variable_matrix$is_interest_only[[x]],
interest_only_periods = variable_matrix$interest_only_periods[[x]],
is_actual_360 = variable_matrix$is_actual_360[[x]],
amortization_years = variable_matrix$amortization_years[[x]],
amortization_months = variable_matrix$amortization_months[[x]],
term_years = variable_matrix$term_years[[x]],
term_months = variable_matrix$term_months[[x]],
pct_loan_fee = variable_matrix$pct_loan_fee[[x]],
balloon_year = variable_matrix$term_years[[x]],
balloon_month = variable_matrix$term_months[[x]],
return_message = return_message
) %>%
mutate(idScenario = x) %>%
dplyr::select(idScenario, everything())
})
if (!return_wide) {
all_data <-
all_data %>%
gather(item, value, -c(idScenario))
} else {
all_data <-
all_data %>%
mutate_at(.cols =
all_data %>% dplyr::select(matches("^amount[A-Z]|^mean[A-Z]")) %>% names(),
funs(. %>% formattable::currency(digits = 0))) %>%
mutate_at(.cols =
all_data %>% dplyr::select(matches("^pct[A-Z]")) %>% names(),
funs(. %>% formattable::percent(digits = 2))) %>%
mutate_at(.cols =
all_data %>% dplyr::select(matches("^ratio[A-Z]|^rule")) %>% names(),
funs(. %>% formattable::comma(digits = 4)))
}
return(all_data)
} | /R/cf_functions.R | permissive | fxcebx/fundManageR | R | false | false | 38,285 | r | calculate_periods_irr <-
function(dates = c(
"2016-06-01",
"2017-05-31",
"2018-05-31",
"2019-05-31",
"2020-05-31",
"2021-05-31",
"2022-05-31",
"2023-05-31",
"2024-05-31",
"2025-05-31",
"2026-05-31"
),
cash_flows = c(
-3000,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547242,
478.515738547278
),
date_format = '%Y-%m-%d',
scale_to_100 = F,
return_percentage = F,
return_df = T,
return_wide = T,
return_message = T) {
secant <-
function(par,
fn,
tol = 1.e-07,
itmax = 100,
trace = TRUE,
...) {
# par = a starting vector with 2 starting values
# fn = a function whose first argument is the variable of interest
#
if (length(par) != 2)
stop("You must specify a starting parameter vector of length 2")
p.2 <- par[1]
p.1 <- par[2]
f <- rep(NA, length(par))
f[1] <- fn(p.1, ...)
f[2] <- fn(p.2, ...)
iter <- 1
pchg <- abs(p.2 - p.1)
fval <- f[2]
if (trace)
# cat("par: ", par, "fval: ", f, "\n")
while (pchg >= tol & abs(fval) > tol & iter <= itmax) {
p.new <- p.2 - (p.2 - p.1) * f[2] / (f[2] - f[1])
pchg <- abs(p.new - p.2)
fval <- fn(p.new, ...)
p.1 <- p.2
p.2 <- p.new
f[1] <- f[2]
f[2] <- fval
iter <- iter + 1
}
list(par = p.new,
value = fval,
iter = iter)
}
npv <-
function (irr, cashFlow, times)
sum(cashFlow / (1 + irr) ^ times)
cfDate <-
dates %>%
as.Date(format = date_format)
times <-
difftime(cfDate, cfDate[1], units = "days") %>% as.numeric() / 365.24
s <-
secant(
par = c(0, 0.1),
fn = npv,
cashFlow = cash_flows,
times = times
)
irr <-
s$par
if (return_percentage == T & scale_to_100 == T) {
stop("Sorry you cannot return a percentage and scale to 100")
}
if (return_percentage) {
irr <-
irr %>% formattable::percent()
}
if (scale_to_100) {
irr <-
irr * 100
}
dateStart <-
min(dates) %>% ymd
dateEnd <-
max(dates) %>% ymd
equityContributions <-
cash_flows[cash_flows < 0] %>%
sum %>%
formattable::currency(digits = 2)
equityDistributions <-
cash_flows[cash_flows > 0] %>%
sum %>%
formattable::currency(digits = 2)
multipleCapital <-
-(equityDistributions / equityContributions) %>% digits(digits = 3)
valueProfit <-
equityDistributions + equityContributions
if (return_df == T)
data <-
data_frame(
dateStart,
dateEnd,
equityContributions,
equityDistributions,
pctIRR = irr,
valueProfit,
multipleCapital,
dateTimeCF = Sys.time()
)
else {
data <-
irr
}
if (return_message) {
"Cash Flow Produces a " %>%
paste0(
irr * 100,
'% irr\nFrom ',
dateStart,
' to ',
dateEnd,
'\n',
'Profit of ',
valueProfit,
'\nCapital Multiple of ',
multipleCapital
) %>%
message()
}
if (return_wide == F) {
data %>%
gather(metric, value, -c(dateStart, dateEnd, dateTimeCF))
}
return(data)
}
parse_for_currency_value <-
function(x) {
value <-
x %>%
readr::parse_number() %>%
formattable::currency()
return(value)
}
parse_for_percentage <-
function(x) {
value <-
x %>%
parse_number()
if (value >= 1) {
value <-
value / 100
}
value <-
value %>%
percent()
return(value)
}
parse_multiple <-
function(x) {
value <-
x %>%
parse_number
return(value)
}
cap_rate_valuation <-
function(cap_rate = .0615,
net_operating_income = "$27,500,000",
cost_of_sale = "5%",
debt_balance = "$350,000,000",
return_wide = T) {
noi <-
net_operating_income %>%
parse_for_currency_value
debt <-
debt_balance %>%
parse_for_currency_value()
pct_cap_rate <-
cap_rate %>%
parse_for_percentage()
pct_sale_cost <-
cost_of_sale %>%
parse_for_percentage()
amountValuationGross <-
noi / cap_rate
amountCostSale <-
-((pct_sale_cost %>% as.numeric) * amountValuationGross)
amountValuationNet <-
amountValuationGross + amountCostSale
amountDebtRepayment <-
-min(amountValuationNet, debt)
amountEquityDistribution <-
-max(0, amountValuationNet + amountDebtRepayment) %>% currency
cash_check <-
!amountValuationGross + amountCostSale + amountDebtRepayment + amountEquityDistribution == 0
if (cash_check) {
stop("Cash waterfall does not tie")
}
value_df <-
data_frame(
pctCapRate = pct_cap_rate,
pctCostSale = pct_sale_cost,
amountNetOperatingIncome = noi,
amountDebtBalance = debt,
amountValuationGross,
amountCostSale,
amountValuationNet,
amountDebtRepayment,
amountEquityDistribution
)
if (!return_wide) {
value_df <-
value_df %>%
dplyr::select(-amountValuationNet) %>%
gather(
item,
value,
-c(
pctCapRate,
amountNetOperatingIncome,
amountDebtBalance,
pctCostSale
)
) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(value_df)
}
ebtida_multiple_value <-
function(ebitda_multiple = 10,
ebitda = "$27,500,000",
cost_of_sale = "5%",
debt_balance = "$350,000,000",
return_wide = T) {
ebitda <-
ebitda %>%
parse_for_currency_value
debt <-
debt_balance %>%
parse_for_currency_value()
multipleEBITDA <-
ebitda_multiple %>%
parse_multiple()
pct_sale_cost <-
cost_of_sale %>%
parse_for_percentage()
amountValuationGross <-
ebitda * multipleEBITDA
amountCostSale <-
-((pct_sale_cost %>% as.numeric) * amountValuationGross)
amountValuationNet <-
amountValuationGross + amountCostSale
amountDebtRepayment <-
-min(amountValuationNet, debt)
amountEquityDistribution <-
-max(0, amountValuationNet + amountDebtRepayment) %>% currency
cash_check <-
!amountValuationGross + amountCostSale + amountDebtRepayment + amountEquityDistribution == 0
if (cash_check) {
stop("Cash waterfall does not tie")
}
value_df <-
data_frame(
multipleEBITDA,
pctCostSale = pct_sale_cost,
amountEBITDA = ebitda,
amountDebtBalance = debt,
amountValuationGross,
amountCostSale,
amountValuationNet,
amountDebtRepayment,
amountEquityDistribution
)
if (!return_wide) {
value_df <-
value_df %>%
dplyr::select(-amountValuationNet) %>%
gather(item,
value,
-c(
multipleEBITDA,
amountEBITDA,
amountDebtBalance,
pctCostSale
)) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(value_df)
}
#' Calculate residual value
#'
#' @param cap_rates Vector of Capitalization Rates in percent or character percent form
#' @param net_operating_income Vector of Net Operating Income in numeric or character numeric/currency form
#' @param cost_of_sale Vector of Cost of Sale in percent or character percent form
#' @param debt_balance Vector of anticipated Debt Balance at sale in numeric or character numeric/currency form
#' @param return_wide
#' @import readr dplyr purrr formattable
#' @return
#' @export
#'
#' @examples
#' calculate_residual_valuation_cap_rates()
calculate_residual_valuation_cap_rates <-
function(cap_rates = c(.05, .0525, .06, .2),
net_operating_income = "$27,500,000",
cost_of_sale = "5%",
debt_balance = "$350,000,000",
return_wide = T) {
scenario_matrix <-
expand.grid(
cap_rate = cap_rates,
noi = net_operating_income,
cost_sale = cost_of_sale,
debt = debt_balance,
stringsAsFactors = F
) %>%
as_data_frame
scenario_df <-
1:nrow(scenario_matrix) %>%
map_df(function(x) {
cap_rate_valuation(
cap_rate = scenario_matrix$cap_rate[[x]],
net_operating_income = scenario_matrix$noi[[x]],
cost_of_sale = scenario_matrix$cost_sale[[x]],
debt_balance = scenario_matrix$debt[[x]],
return_wide = T
)
}) %>%
mutate(idScenario = 1:n()) %>%
dplyr::select(idScenario, everything())
scenario_df <-
scenario_df %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^pct")) %>% names,
funs(. %>% percent(digits = 2))) %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^amount")) %>% names,
funs(. %>% currency(digits = 2)))
if (!return_wide) {
scenario_df <-
scenario_df %>%
dplyr::select(-amountValuationNet) %>%
gather(
item,
value,
-c(
idScenario,
pctCapRate,
amountNetOperatingIncome,
amountDebtBalance,
pctCostSale
)
) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(scenario_df)
}
#' Calculate residual value for a set of given EBITDA based inputs
#'
#' @param ebitda_multiples Vector of EBITDA Multiples in numeric or character
#' @param ebitda Vector of EBITDA in numeric or character numeric/currency form
#' @param cost_of_sale Vector of Cost of Sale in percent or character percent form
#' @param debt_balance Vector of anticipated Debt Balance at sale in numeric or character numeric/currency form
#' @param return_wide Return data in wide or long form
#' @import readr dplyr purrr formattable
#' @return
#' @export
#'
#' @examples
#' calculate_residual_valuation_ebitda_multiples(ebitda_multiples = c(5, 10, 15, 20), ebitda = "$27,500,000", cost_of_sale = "5%", debt_balance = "$350,000,000", return_wide = T)
calculate_residual_valuation_ebitda_multiples <-
function(ebitda_multiples = c(5, 10, 15, 20),
ebitda = "$27,500,000",
cost_of_sale = "5%",
debt_balance = "$350,000,000",
return_wide = T) {
scenario_matrix <-
expand.grid(
ebitda_multiple = ebitda_multiples,
ebitda = ebitda,
cost_sale = cost_of_sale,
debt = debt_balance,
stringsAsFactors = F
) %>%
as_data_frame
scenario_df <-
1:nrow(scenario_matrix) %>%
map_df(function(x) {
ebtida_multiple_value(
ebitda_multiple = scenario_matrix$ebitda_multiple[[x]],
ebitda = scenario_matrix$ebitda[[x]],
cost_of_sale = scenario_matrix$cost_sale[[x]],
debt_balance = scenario_matrix$debt[[x]],
return_wide = T
)
}) %>%
mutate(idScenario = 1:n()) %>%
dplyr::select(idScenario, everything())
scenario_df <-
scenario_df %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^pct")) %>% names,
funs(. %>% percent(digits = 2))) %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^amount")) %>% names,
funs(. %>% currency(digits = 2)))
if (!return_wide) {
scenario_df <-
scenario_df %>%
dplyr::select(-amountValuationNet) %>%
gather(item,
value,
-c(
multipleEBITDA,
amountEBITDA,
amountDebtBalance,
pctCostSale
)) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(scenario_df)
}
post_money_valuation <-
function(pre_money_valuation = "$45,000,000",
percent_sold = "10%") {
options(scipen = 999999)
pre_money <-
pre_money_valuation %>%
parse_for_currency_value()
pct_sold <-
percent_sold %>%
parse_for_percentage() %>%
as.numeric
new_capital <-
(pct_sold * pre_money) %>% currency
total_val <-
pre_money + new_capital
valuation_data <-
data_frame(valuationPreMoney = pre_money,
amountCapitalInvestment = new_capital) %>%
mutate(
pctOwnershipExistingShareholders = (pre_money / total_val) %>% percent,
pctOwnershipNewInvestment = (new_capital / total_val) %>% percent
)
return(valuation_data)
}
#' Calculates range of post investment valuations
#'
#' @param pre_money_valuation Vector of of valuations in numeric or character
#' @param percent_sold Vector of of amount of business sold in percent or character percent form
#' @param return_wide
#' @param return_wide Return data in wide or long form
#' @import readr dplyr purrr formattable
#' @return
#' @export
#'
#' @examples
#' calculate_valuation_post_money(pre_money_valuation = "$45,000,000", percent_sold = "10%", return_wide = T)
calculate_valuation_post_money <-
function(pre_money_valuation = "$45,000,000",
percent_sold = "10%",
return_wide = T) {
scenario_matrix <-
expand.grid(
pre_money_valuation = pre_money_valuation,
percent_sold = percent_sold,
stringsAsFactors = F
) %>%
as_data_frame
scenario_df <-
1:nrow(scenario_matrix) %>%
map_df(function(x) {
post_money_valuation(
pre_money_valuation = scenario_matrix$pre_money_valuation[[x]],
percent_sold = scenario_matrix$percent_sold[[x]]
)
}) %>%
mutate(idScenario = 1:n()) %>%
dplyr::select(idScenario, everything())
scenario_df <-
scenario_df %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^pct")) %>% names,
funs(. %>% percent(digits = 2))) %>%
mutate_at(.cols =
scenario_df %>% dplyr::select(matches("^amount|valuation")) %>% names,
funs(. %>% currency(digits = 2)))
if (!return_wide) {
scenario_df <-
scenario_df %>%
dplyr::select(-matches("pct")) %>%
gather(item, value, -c(idScenario)) %>%
mutate(value = value %>% currency(digits = 2)) %>%
suppressWarnings()
}
return(scenario_df)
}
#' Calculate proceeds from share sale given specified price and share amount
#'
#' @param price
#' @param shares
#'
#' @return
#' @export
#' @importFrom formattable currency
#' @examples
#' calculate_share_proceeds(price = 9, shares = 150000)
calculate_share_proceeds <-
function(price = 10, shares = 1000000) {
proceeds <-
(price * shares) %>% currency()
return(proceeds)
}
calculate_basis <-
function(purchase_price = "$10,000,000",
capitalized_acquisition_costs = "$300,0000",
capital_investment = "$1,200,000") {
options(scipen = 999)
if (purchase_price %>% is_null) {
stop("Please enter a purchase price")
}
amountPurchasePrice <-
purchase_price %>%
parse_for_currency_value()
if (!capitalized_acquisition_costs %>% is_null) {
amountCapitalizedCosts <-
capitalized_acquisition_costs %>%
parse_for_currency_value()
} else {
amountCapitalizedCosts <-
0
}
if (!capitalized_acquisition_costs %>% is_null) {
amountCapitalInvestment <-
capital_investment %>%
parse_for_currency_value()
} else {
capital_investment <-
0
}
basis_df <-
data_frame(amountPurchasePrice, amountCapitalInvestment, amountCapitalizedCosts) %>%
mutate(amountBasis = amountPurchasePrice + amountCapitalInvestment + amountCapitalizedCosts)
return(basis_df)
}
calculate_capitalization <-
function(purchase_price = "$9,700,000",
capitalized_acquisition_costs = "$300,000",
capital_investment = "$0",
loan_to_cost = .7,
borrow_capital_investment = T,
include_capitalized_cost = F,
leverage_threshold = .95) {
if (loan_to_cost %>% is_null()) {
stop("Please enter a loan to cost even if it is zero")
}
if (leverage_threshold %>% is_null()) {
leverage_threshold <-
1
}
pct_ltc <-
loan_to_cost %>% parse_for_percentage()
if (pct_ltc > leverage_threshold) {
leverage_message <-
"\nDon't be a reckless idiot, remember what happend to Lehman Brothers???\nDon't know Lehman Brothers, Google it\n" %>%
paste0(pct_ltc, ' is unprudent leverage\nA more reasonable amount of leverage is: ', leverage_threshold %>% formattable::percent(), '\nChange your leverage assumptions and try again')
stop(leverage_message)
}
basis_df <-
calculate_basis(purchase_price = purchase_price, capitalized_acquisition_costs = capitalized_acquisition_costs, capital_investment = capital_investment)
if (include_capitalized_cost) {
debt_basis <-
basis_df %>% mutate(amountDebtBasis = amountPurchasePrice + amountCapitalizedCosts) %>%
.$amountDebtBasis
} else {
debt_basis <-
basis_df$amountPurchasePrice
}
loan_proceeds <-
debt_basis * pct_ltc
if (borrow_capital_investment) {
loan_proceeds <-
loan_proceeds + basis_df$amountCapitalInvestment
}
capital_stack_df <-
basis_df %>%
mutate(
amountLoanProceeds = -loan_proceeds,
amountEquity = -(amountBasis + amountLoanProceeds)
)
return(capital_stack_df)
}
get_data_monthly_periods <-
function(start_date = "2016-06-01",
term_years = 25,
term_months = 0){
periods <-
term_years * 12 + term_months
periods <-
0:periods
start_date <-
start_date %>% lubridate::ymd %>% as.Date()
get_end_of_period <-
function(period = 0) {
if (period == 0) {
period_date <-
start_date %m+% months(period) %>%
as.character() %>%
as.Date()
}
if (period == 1) {
period_date <-
start_date %m+% months(0) %>%
timeDate::timeLastDayInMonth %>%
as.character() %>%
as.Date()
}
if (period > 1) {
period_date <-
start_date %m+% months(period - 1) %>%
timeDate::timeLastDayInMonth %>%
as.character() %>%
as.Date()
}
period_df <-
data_frame(idPeriod = period, datePeriod = period_date)
return(period_df)
}
all_periods <-
periods %>%
purrr::map(function(x) {
get_end_of_period(period = x)
}) %>%
compact %>%
bind_rows()
all_periods <-
all_periods %>%
mutate(yearPeriod = ifelse(idPeriod == 0, 0, (idPeriod %/% 12) + 1)) %>%
dplyr::select(idPeriod, yearPeriod, everything())
return(all_periods)
}
pmt <-
function (r, n, pv, fv, type = 0) {
if (type != 0 && type != 1) {
print("Error: type should be 0 or 1!")
}
else {
pmt <- (pv + fv / (1 + r) ^ n) * r / (1 - 1 / (1 + r) ^ n) * (-1) *
(1 + r) ^ (-1 * type)
return(pmt)
}
}
calculate_loan_payment <-
function(loan_start_date = "2016-06-01",
amount_initial_draw = 3000,
is_interest_only = F,
interest_only_periods = 24,
interest_rate = "10%",
is_actual_360 = T,
amortization_years = 10,
amortization_months = 0,
term_years = 10,
term_months = 0,
pct_loan_fee = 0,
balloon_year = 10,
override_monthly_interest = F,
interest_reserve_period = 0,
balloon_month = 0,
return_annual_summary = F) {
options(scipen = 99999)
options(digits = 10)
interest_rate <-
interest_rate %>%
parse_for_percentage()
if (is_actual_360 == T) {
daily_interest <-
interest_rate / 360
net_rate <-
interest_rate / 360 * 365
} else {
daily_interest <-
interest_rate / 365
net_rate <-
interest_rate
}
amortization_periods <-
(amortization_years * 12) + amortization_months
loan_periods <-
(balloon_year * 12) + balloon_month
loan_period_df <-
get_data_monthly_periods(start_date = loan_start_date,
term_years = term_years,
term_months = term_months)
loan_period_df <-
loan_period_df %>%
mutate(
isIO = ifelse(is_interest_only == T &
idPeriod <= interest_only_periods, T, F),
isActiveLoan = ifelse(idPeriod <= loan_periods, T, F),
amountInitialDraw = ifelse(idPeriod == 0, amount_initial_draw, 0),
amountLoanFee = amountInitialDraw * pct_loan_fee
)
loan_period_df <-
loan_period_df %>%
dplyr::filter(isActiveLoan == T)
periods <-
loan_period_df$idPeriod
all_payment_data <-
data_frame()
for (period in periods) {
period_index <-
period + 1
datePeriod <-
loan_period_df$datePeriod[period_index]
drawInitial <-
loan_period_df$amountInitialDraw[period_index]
drawAdditional <-
0 # loan_period_df$amountAdditionalDraw[period_index] -- layer in eventully
is_interest_only <-
loan_period_df$isIO[period_index]
periodFee <-
loan_period_df$amountLoanFee[period_index]
if (period == 0) {
month_df <-
data_frame(
idPeriod = period,
dateStartPeriod = datePeriod,
dateEndPeriod = datePeriod,
isIO = is_interest_only,
balanceInitial = 0 %>% currency(digits = 2),
amountInitialDraw = drawInitial %>% currency(digits = 2),
amountAdditionalDraw = drawAdditional %>% currency(digits = 2),
paymentInterest = 0 %>% currency(digits = 2),
paymentPrincipal = 0 %>% currency(digits = 2),
amountRepayment = 0 %>% currency(digits = 2),
amountLoanFee = periodFee %>% currency(digits = 2)
) %>%
mutate(balanceEnd = amountInitialDraw + amountAdditionalDraw)
}
if (period > 0) {
initial_balance <-
all_payment_data$balanceEnd[period_index - 1]
total_balance <-
initial_balance + drawInitial + drawAdditional
balance_basis <-
(all_payment_data$amountInitialDraw %>% sum()) +
all_payment_data$amountAdditionalDraw %>% sum()
start_month <-
timeDate::timeFirstDayInMonth(datePeriod) %>% as.Date()
days <-
(datePeriod - start_month) + 1
month_days <-
as.numeric(days, units = 'days')
if (override_monthly_interest == T) {
monthly_interest <-
net_rate / 12
} else {
monthly_interest <-
daily_interest * month_days
}
paymentInterest <-
monthly_interest * (total_balance)
if (interest_reserve_period > 0 &
period <= interest_reserve_period) {
paymentInterest <-
0
}
if (is_interest_only == T) {
paymentTotal <-
paymentInterest
} else {
paymentTotal <-
pmt(
r = monthly_interest,
pv = balance_basis,
fv = 0,
n = amortization_periods
)
}
paymentPrincipal <-
abs(paymentTotal) - paymentInterest
if (period == loan_periods) {
amountRepayment <-
initial_balance + drawInitial + drawAdditional - paymentPrincipal
} else {
amountRepayment <-
0
}
month_df <-
data_frame(
idPeriod = period,
dateStartPeriod = start_month,
dateEndPeriod = datePeriod,
isIO = is_interest_only,
balanceInitial = initial_balance %>% currency(digits = 2),
amountInitialDraw = drawInitial %>% currency(digits = 2),
amountAdditionalDraw = drawAdditional %>% currency(digits = 2),
paymentInterest = -paymentInterest %>% currency(digits = 2),
paymentPrincipal = -paymentPrincipal %>% currency(digits = 2),
amountRepayment = -amountRepayment %>% currency(digits = 2),
amountLoanFee = periodFee %>% currency(digits = 2)
) %>%
mutate(
balanceEnd =
balanceInitial + amountInitialDraw + amountAdditionalDraw +
paymentPrincipal + amountRepayment
)
}
all_payment_data <-
month_df %>%
bind_rows(all_payment_data) %>%
arrange((idPeriod))
}
all_payment_data <-
all_payment_data %>%
mutate(
balanceInitial = balanceInitial %>% currency(digits = 2),
amountInitialDraw = amountInitialDraw %>% currency(digits = 2),
amountAdditionalDraw = amountAdditionalDraw %>% currency(digits = 2),
paymentInterest = paymentInterest %>% currency(digits = 2),
paymentPrincipal = paymentPrincipal %>% currency(digits = 2),
amountRepayment = amountRepayment %>% currency(digits = 2),
amountLoanFee = amountLoanFee %>% currency(digits = 2),
balanceEnd = balanceEnd %>% currency(digits = 2)
) %>%
mutate(yearPeriod = ifelse(idPeriod > 0,
((idPeriod - 1) %/% 12) + 1,
0)) %>%
dplyr::select(yearPeriod, everything())
if (return_annual_summary == T) {
all_payment_data <-
all_payment_data %>%
group_by(yearPeriod) %>%
summarise(
dateStartPeriod = min(dateStartPeriod),
dateEndPeriod = max(dateEndPeriod),
amountInitialDraw = sum(amountInitialDraw),
amountAdditionalDraw = sum(amountAdditionalDraw),
paymentInterest = sum(paymentInterest),
paymentPrincipal = sum(paymentPrincipal),
amountRepayment = sum(amountRepayment),
amountLoanFee = sum(amountLoanFee),
cfLoan = (
amountInitialDraw + amountAdditionalDraw + paymentInterest + paymentPrincipal + amountRepayment + amountLoanFee
) %>% currency(digits = 2)
) %>%
mutate(
balanceEnd = cumsum(amountInitialDraw) + cumsum(paymentPrincipal) + cumsum(amountRepayment)
) %>%
ungroup
}
return(all_payment_data)
}
calculate_average_payment <-
function(amount_initial_draw = 3000,
is_interest_only = F,
interest_only_periods = 24,
interest_rate = "10%",
is_actual_360 = T,
amortization_years = 10,
amortization_months = 0,
term_years = 10,
term_months = 0,
pct_loan_fee = 0,
balloon_year = 10,
override_monthly_interest = F,
interest_reserve_period = 0,
balloon_month = 0) {
library(lubridate)
first_of_the_month <-
(ceiling_date((Sys.Date() %m+% months(0)), "month") - days(1)) + 1
pmt_df <-
calculate_loan_payment(
loan_start_date = first_of_the_month,
amount_initial_draw = amount_initial_draw,
is_interest_only = is_interest_only,
interest_only_periods = interest_only_periods,
interest_rate = interest_rate,
is_actual_360 = is_actual_360,
amortization_years = amortization_years,
amortization_months = amortization_months,
term_years = term_years,
term_months = term_months,
pct_loan_fee = pct_loan_fee,
balloon_year = balloon_year,
balloon_month = balloon_month,
return_annual_summary = F
)
pmt_df <-
pmt_df %>%
dplyr::filter(!idPeriod == 0) %>%
group_by(yearPeriod) %>%
summarise(
paymentPrincipal = sum(paymentPrincipal, na.rm = T),
paymentInterest = sum(paymentInterest, na.rm = T)
) %>%
ungroup %>%
summarise(
meanPrincipal = mean(paymentPrincipal, na.rm = T) %>% formattable::currency(),
meanInterest = mean(paymentInterest, na.rm = T) %>% formattable::currency()
) %>%
mutate(meanPayment = meanPrincipal + meanInterest)
return(pmt_df)
}
calculate_leverage_metric <-
function(purchase_price = "$9,700,000",
capitalized_acquisition_costs = "$300,000",
capital_investment = "0",
revenue = "$1,500,000",
expenses = "$115,000",
loan_to_cost = .7,
borrow_capital_investment = F,
include_capitalized_cost = T,
leverage_threshold = .95,
is_interest_only = TRUE,
interest_only_periods = 12,
interest_rate = "5%",
is_actual_360 = TRUE,
amortization_years = 30,
amortization_months = 0,
term_years = 10,
term_months = 0,
pct_loan_fee = 0,
balloon_year = 10,
balloon_month = 0,
return_message = F) {
basis_df <-
calculate_capitalization(
purchase_price = purchase_price,
capitalized_acquisition_costs = capitalized_acquisition_costs,
capital_investment = capital_investment,
loan_to_cost = loan_to_cost,
borrow_capital_investment = borrow_capital_investment,
include_capitalized_cost = include_capitalized_cost,
)
revenue_amount <-
revenue %>%
parse_for_currency_value()
expense_amount <-
expenses %>%
parse_for_currency_value()
if (expense_amount > 0) {
expense_amount <-
-expense_amount
}
operating_income <-
revenue_amount + expense_amount
interest_rate <-
interest_rate %>%
parse_for_percentage()
pct_loan_fee <-
pct_loan_fee %>%
parse_for_percentage()
average_pmt <-
calculate_average_payment(
amount_initial_draw = basis_df$amountLoanProceeds %>% abs(),
is_interest_only = is_interest_only,
interest_only_periods = interest_only_periods,
interest_rate = interest_rate,
is_actual_360 = is_actual_360,
amortization_years = amortization_years,
amortization_months = amortization_months,
term_years = term_years,
term_months = term_months,
pct_loan_fee = pct_loan_fee,
balloon_year = balloon_year,
override_monthly_interest = override_monthly_interest,
interest_reserve_period = interest_reserve_period
)
data <-
basis_df %>%
mutate(
amountRevenue = revenue_amount,
amountExpense = expense_amount,
amountEBITDA_NOI = operating_income
) %>%
bind_cols(average_pmt) %>%
mutate(
amountLNCFMean = amountEBITDA_NOI + meanPayment,
pctLeverage = (-amountLoanProceeds / amountBasis) %>% formattable::percent(),
pctMarginEBITDA_NOI = (amountEBITDA_NOI / amountRevenue) %>% formattable::percent(),
pctReturnOnCost = (amountEBITDA_NOI / amountBasis) %>% formattable::percent(),
pctDebtYieldInitial = -(amountEBITDA_NOI / amountLoanProceeds) %>% formattable::percent(),
ratioDSCRMean = (amountEBITDA_NOI / -meanPayment) %>% as.numeric() %>% formattable::digits(3),
pctCashOnCashMean = (amountLNCFMean / -amountEquity) %>% formattable::percent(),
pctReturnOnEquity = ((amountEBITDA_NOI + meanInterest) / -amountEquity) %>% formattable::percent(),
rule72Multiple2x = (72 / (pctCashOnCashMean * 100)) %>% as.numeric()
)
if (return_message) {
metric_message <-
"Basis: " %>%
paste0(
data$amountBasis,
'\n',
'Leverage: ',
data$pctLeverage,
'\n',
'Interest Rate: ',
interest_rate,
'\n',
'Amortization: ',
((amortization_years * 12) + amortization_months),
' periods\n',
'Return on Cost: ',
data$pctReturnOnCost,
'\nCash on Cash: ', data$pctCashOnCashMean,
"\nReturn on Equity: ", data$pctReturnOnEquity,
"\nRule of 72: Equity Doubles in ", data$rule72Multiple2x, ' years\n'
)
metric_message %>% message()
}
return(data)
}
#' Calculate leveraged return metrics
#'
#' @param purchase_price Vector of Purchase Prices
#' @param capitalized_acquisition_costs Vector of Capitalized Acquisition Costs
#' @param capital_investment Vector of Capital Investment
#' @param revenue Vector of revenue amounts
#' @param expenses Vector of expenses
#' @param loan_to_cost Vector of loan to cost
#' @param interest_rate Interest Rate
#' @param borrow_capital_investment Borrow Investment \code{TRUE, FALSE}
#' @param include_capitalized_cost Include capitalized costs in leverage calculations \code{TRUE, FALSE}
#' @param leverage_threshold Maximum Leverage
#' @param is_interest_only Does loan have interst only periods \code{TRUE, FALSE}
#' @param interest_only_periods Interest Only Periods
#' @param is_actual_360 Is loan calcuated on actual/360 basis \code{TRUE, FALSE}
#' @param amortization_years Loan amortization years
#' @param amortization_months Loan amortization months
#' @param term_years Term of the loan, years
#' @param term_months Term of the loan, months
#' @param pct_loan_fee Loan pee, percent
#' @param return_wide
#' @param return_message
#'
#' @return
#' @export
#' @import readr dplyr lubridate stringr purrr tidyr formattable
#' @examples
calculate_leverage_metrics <-
function(purchase_price = 0,
capitalized_acquisition_costs = 0,
capital_investment = 0,
revenue = 0,
expenses = 0,
loan_to_cost = 0,
interest_rate = 0,
borrow_capital_investment = F,
include_capitalized_cost = T,
leverage_threshold = .95,
is_interest_only = FALSE,
interest_only_periods = 0,
is_actual_360 = TRUE,
amortization_years = 30,
amortization_months = 0,
term_years = 30,
term_months = 0,
pct_loan_fee = 0,
return_wide = T,
return_message = T) {
variable_matrix <-
expand.grid(
purchase_price = purchase_price,
capitalized_acquisition_costs = capitalized_acquisition_costs,
capital_investment = capital_investment,
revenue = revenue,
expenses = expenses,
loan_to_cost = loan_to_cost,
borrow_capital_investment = borrow_capital_investment,
include_capitalized_cost = include_capitalized_cost,
leverage_threshold = leverage_threshold,
is_interest_only = is_interest_only,
interest_only_periods = interest_only_periods,
interest_rate = interest_rate,
is_actual_360 = is_actual_360,
amortization_years = amortization_years,
amortization_months = amortization_months,
term_years = term_years,
term_months = term_months,
pct_loan_fee = pct_loan_fee,
stringsAsFactors = F
) %>%
as_data_frame()
all_data <-
1:nrow(variable_matrix) %>%
map_df(function(x){
calculate_leverage_metric(
purchase_price = variable_matrix$purchase_price[[x]],
capitalized_acquisition_costs = variable_matrix$capitalized_acquisition_costs[[x]],
capital_investment = variable_matrix$capital_investment[[x]],
revenue = variable_matrix$revenue[[x]],
expenses = variable_matrix$expenses[[x]],
loan_to_cost = variable_matrix$loan_to_cost[[x]],
borrow_capital_investment = variable_matrix$borrow_capital_investment[[x]],
include_capitalized_cost = variable_matrix$include_capitalized_cost[[x]],
leverage_threshold = variable_matrix$leverage_threshold[[x]],
is_interest_only = variable_matrix$is_interest_only[[x]],
interest_only_periods = variable_matrix$interest_only_periods[[x]],
is_actual_360 = variable_matrix$is_actual_360[[x]],
amortization_years = variable_matrix$amortization_years[[x]],
amortization_months = variable_matrix$amortization_months[[x]],
term_years = variable_matrix$term_years[[x]],
term_months = variable_matrix$term_months[[x]],
pct_loan_fee = variable_matrix$pct_loan_fee[[x]],
balloon_year = variable_matrix$term_years[[x]],
balloon_month = variable_matrix$term_months[[x]],
return_message = return_message
) %>%
mutate(idScenario = x) %>%
dplyr::select(idScenario, everything())
})
if (!return_wide) {
all_data <-
all_data %>%
gather(item, value, -c(idScenario))
} else {
all_data <-
all_data %>%
mutate_at(.cols =
all_data %>% dplyr::select(matches("^amount[A-Z]|^mean[A-Z]")) %>% names(),
funs(. %>% formattable::currency(digits = 0))) %>%
mutate_at(.cols =
all_data %>% dplyr::select(matches("^pct[A-Z]")) %>% names(),
funs(. %>% formattable::percent(digits = 2))) %>%
mutate_at(.cols =
all_data %>% dplyr::select(matches("^ratio[A-Z]|^rule")) %>% names(),
funs(. %>% formattable::comma(digits = 4)))
}
return(all_data)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{umx-deprecated}
\alias{umx-deprecated}
\title{Deprecated. May already stop() code and ask to be updated. May be dropped entirely in future.}
\arguments{
\item{...}{the old function's parameters (now stripped out to avoid telling people how to do it the wrong way :-)}
}
\description{
xmuMakeThresholdsMatrices should be replaced with \code{\link[=umxThresholdMatrix]{umxThresholdMatrix()}}
umxSaturated should be replaced with \code{\link[=mxRefModels]{mxRefModels()}}
umx_grep_labels should be replaced with \code{\link[=umx_grep]{umx_grep()}}
grepSPSS_labels should be replaced with \code{\link[=umx_grep]{umx_grep()}}
umxStart should be replaced with \code{\link[=umxValues]{umxValues()}}
umxTryHard is deprecated: use \code{\link[=umxRun]{umxRun()}} instead
genEpi_Jiggle is deprecated: use \code{\link[=umxJiggle]{umxJiggle()}} instead
umxLabels Is deprecated: use \code{\link[=umxLabel]{umxLabel()}} instead
umxLabels Is deprecated: use \code{\link[=umxLabel]{umxLabel()}} instead
umxPath is deprecated: Use \code{\link[=mxPath]{mxPath()}} and \code{\link[=umxLabel]{umxLabel()}} instead
umxReportFit is deprecated: use \code{\link[=umxSummary]{umxSummary()}} instead
umxGetLabels is deprecated: use \code{\link[=umxGetParameters]{umxGetParameters()}} instead
stringToMxAlgebra is deprecated: please use \code{\link[=umx_string_to_algebra]{umx_string_to_algebra()}} instead
genEpi_EvalQuote is deprecated: please use \code{\link[=mxEvalByName]{mxEvalByName()}} instead
umxReportCIs is deprecated: please use \code{\link[=umxCI]{umxCI()}} instead
hasSquareBrackets is deprecated: please use \code{\link[=umx_has_square_brackets]{umx_has_square_brackets()}} instead
xmuHasSquareBrackets is deprecated: please use \code{\link[=umx_has_square_brackets]{umx_has_square_brackets()}} instead
replace umxReportFit with \code{\link[=umxSummary]{umxSummary()}}
Replace umxGraph_RAM with \code{\link[=plot]{plot()}}
Replace tryHard with \code{\link[=mxTryHard]{mxTryHard()}}
Replace genEpi_ReRun with \code{\link[=umxModify]{umxModify()}}
Replace mxStart with \code{\link[=umxValues]{umxValues()}}
Replace umxLabeler with \code{\link[=umxLabel]{umxLabel()}}
Replace standardizeRAM with \code{\link[=umx_standardize_RAM]{umx_standardize_RAM()}}
Replace genEpi_equate with \code{\link[=umxEquate]{umxEquate()}}
Replace genEpi_Path with \code{\link[=umxPath]{umxPath()}}
Replace genEpiCompare with \code{\link[=umxCompare]{umxCompare()}}
Replace mxLatent with \code{\link[=umxLatent]{umxLatent()}}
Change col.as.numeric to \code{\link[=umx_as_numeric]{umx_as_numeric()}}
Change cor.prob to \code{\link[=umx_cor]{umx_cor()}}
Change umx_u_APA_pval to \code{\link[=umx_APA_pval]{umx_APA_pval()}}
}
\references{
\itemize{
\item \url{https://tbates.github.io}, \url{https://github.com/tbates/umx}
}
}
\concept{umx deprecated}
| /man/umx-deprecated.Rd | no_license | qingwending/umx | R | false | true | 2,941 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{umx-deprecated}
\alias{umx-deprecated}
\title{Deprecated. May already stop() code and ask to be updated. May be dropped entirely in future.}
\arguments{
\item{...}{the old function's parameters (now stripped out to avoid telling people how to do it the wrong way :-)}
}
\description{
xmuMakeThresholdsMatrices should be replaced with \code{\link[=umxThresholdMatrix]{umxThresholdMatrix()}}
umxSaturated should be replaced with \code{\link[=mxRefModels]{mxRefModels()}}
umx_grep_labels should be replaced with \code{\link[=umx_grep]{umx_grep()}}
grepSPSS_labels should be replaced with \code{\link[=umx_grep]{umx_grep()}}
umxStart should be replaced with \code{\link[=umxValues]{umxValues()}}
umxTryHard is deprecated: use \code{\link[=umxRun]{umxRun()}} instead
genEpi_Jiggle is deprecated: use \code{\link[=umxJiggle]{umxJiggle()}} instead
umxLabels Is deprecated: use \code{\link[=umxLabel]{umxLabel()}} instead
umxLabels Is deprecated: use \code{\link[=umxLabel]{umxLabel()}} instead
umxPath is deprecated: Use \code{\link[=mxPath]{mxPath()}} and \code{\link[=umxLabel]{umxLabel()}} instead
umxReportFit is deprecated: use \code{\link[=umxSummary]{umxSummary()}} instead
umxGetLabels is deprecated: use \code{\link[=umxGetParameters]{umxGetParameters()}} instead
stringToMxAlgebra is deprecated: please use \code{\link[=umx_string_to_algebra]{umx_string_to_algebra()}} instead
genEpi_EvalQuote is deprecated: please use \code{\link[=mxEvalByName]{mxEvalByName()}} instead
umxReportCIs is deprecated: please use \code{\link[=umxCI]{umxCI()}} instead
hasSquareBrackets is deprecated: please use \code{\link[=umx_has_square_brackets]{umx_has_square_brackets()}} instead
xmuHasSquareBrackets is deprecated: please use \code{\link[=umx_has_square_brackets]{umx_has_square_brackets()}} instead
replace umxReportFit with \code{\link[=umxSummary]{umxSummary()}}
Replace umxGraph_RAM with \code{\link[=plot]{plot()}}
Replace tryHard with \code{\link[=mxTryHard]{mxTryHard()}}
Replace genEpi_ReRun with \code{\link[=umxModify]{umxModify()}}
Replace mxStart with \code{\link[=umxValues]{umxValues()}}
Replace umxLabeler with \code{\link[=umxLabel]{umxLabel()}}
Replace standardizeRAM with \code{\link[=umx_standardize_RAM]{umx_standardize_RAM()}}
Replace genEpi_equate with \code{\link[=umxEquate]{umxEquate()}}
Replace genEpi_Path with \code{\link[=umxPath]{umxPath()}}
Replace genEpiCompare with \code{\link[=umxCompare]{umxCompare()}}
Replace mxLatent with \code{\link[=umxLatent]{umxLatent()}}
Change col.as.numeric to \code{\link[=umx_as_numeric]{umx_as_numeric()}}
Change cor.prob to \code{\link[=umx_cor]{umx_cor()}}
Change umx_u_APA_pval to \code{\link[=umx_APA_pval]{umx_APA_pval()}}
}
\references{
\itemize{
\item \url{https://tbates.github.io}, \url{https://github.com/tbates/umx}
}
}
\concept{umx deprecated}
|
#' Retrieve archived reports for each of the linked accounts.
#'
#' \code{getArchivedReport} provides access to archived reports
#' from the appFigures web API
#'
#' @param type Character. Choose between \code{"all", "daily",
#' "weekly", "monthly", "monthlyfree", "finance", or "payment"}.
#' Defaults to \code{"all"}.
#'
#' @param date Character. Only return reports with a timestamp on
#' this date. Defaults to all dates.
#'
#' @param curlHandle Provide an instance of the CURLHandle-class
#' from the RCurl package. The default will create a curl handle
#' specific to the function call.
#'
#' @param verbose Logical. Should details of the web request
#' print to the console? Defaults to \code{FALSE}.
#'
#' @param orgJSON Logical. Should the JSON string be returned
#' without being converted to R objects? Defaults to \code{FALSE}.
#'
#' @return A dataframe containing the requested reports.
#'
#' @seealso Official documentation:
#' \url{http://docs.appfigures.com/archive}.
#'
getArchivedReport <- function(type = c("all", "daily", "weekly", "monthly",
"monthlyfree", "finance", "payment"),
date, curlHandle, verbose = FALSE,
orgJSON = FALSE) {
type <- match.arg(type)
date <- if (!missing(date)) {
as.character(as.Date(date))
}
parList <- c(type = type, date = date, format = 'json')
uri <- paste(BASE_URI, "archive", sep = "/")
if (missing(curlHandle)) {
opts <- list(userpwd = paste(USERNAME, PASSWORD, sep = ":"),
httpheader = c('X-Client-Key' = API_KEY),
httpauth = 1L, verbose = verbose, ssl.verifypeer = FALSE)
curlHandle <- getCurlHandle(.opts = opts)
} else if (!inherits(curlHandle, "CURLHandle")) {
stop("curlHandle must be of class 'CURLHandle'.")
} else {
curlHandle <- curlHandle
}
jsonText <- getForm(uri, curl = curlHandle, .params = parList)
if (orgJSON) {
return(jsonText)
}
if (!validate(jsonText)) {
stop("appFigures API yielded invalid JSON!")
}
parseArchiveReport(jsonText)
}
#' Retrieve latest report for each of the linked accounts.
#'
#' \code{getLatestReport} provides access to the latest reports
#' from the appFigures web API
#'
#' @param type Character. Choose between \code{"all", "daily",
#' "weekly", "monthly", "yearly", "finance", "payment"}. Defaults
#' to \code{"all"}.
#'
#' @param curlHandle Provide an instance of the CURLHandle-class
#' from the RCurl package. The default will create a curl handle
#' specific to the function call.
#'
#' @param verbose Logical. Should details of the web request
#' print to the console? Defaults to \code{FALSE}.
#'
#' @param orgJSON Logical. Should the JSON string be returned
#' without being converted to R objects? Defaults to \code{FALSE}.
#'
#' @return A dataframe containing the requested reports.
#'
#' @seealso Official documentation:
#' \url{http://docs.appfigures.com/archive}.
#'
getLatestReport <- function(type = c("all", "daily", "weekly", "monthly",
"yearly", "finance", "payment"),
curlHandle, verbose = FALSE, orgJSON = FALSE) {
type <- match.arg(type)
parList <- c(type = type, format = 'flat')
uri <- paste(BASE_URI, "archive", "latest", sep = "/")
if (missing(curlHandle)) {
opts <- list(userpwd = paste(USERNAME, PASSWORD, sep = ":"),
httpheader = c('X-Client-Key' = API_KEY),
httpauth = 1L, verbose = verbose, ssl.verifypeer = FALSE)
curlHandle <- getCurlHandle(.opts = opts)
} else if (class(curlHandle) != "CURLHandle") {
stop("curlHandle must be of class 'CURLHandle'.")
} else {
curlHandle <- curlHandle
}
jsonText <- getForm(uri, curl = curlHandle, .params = parList)
if (orgJSON) {
return(jsonText)
}
if (!validate(jsonText)) {
stop("appFigures API yielded invalid JSON!")
}
parseArchiveReport(jsonText)
}
#' Retrieve the raw report data.
#'
#' \code{getRawReport} provides access to the raw report data
#' from the appFigures web API
#'
#' @param id Numeric. The id of requested report.
#'
#' @param curlHandle Provide an instance of the CURLHandle-class
#' from the RCurl package. The default will create a curl handle
#' specific to the function call.
#'
#' @param verbose Logical. Should details of the web request
#' print to the console? Defaults to \code{FALSE}.
#'
#' @return A character string containing the raw report.
#'
#' @seealso Official documentation:
#' \url{http://docs.appfigures.com/archive}.
getRawReport <- function(id, curlHandle, verbose = FALSE) {
parList <- c(format = 'json')
uri <- paste(BASE_URI, "archive", "raw", id, sep = "/")
if (missing(curlHandle)) {
opts <- list(userpwd = paste(USERNAME, PASSWORD, sep = ":"),
httpheader = c('X-Client-Key' = API_KEY),
httpauth = 1L, verbose = verbose, ssl.verifypeer = FALSE)
curlHandle <- getCurlHandle(.opts = opts)
} else if (class(curlHandle) != "CURLHandle") {
stop("curlHandle must be of class 'CURLHandle'.")
} else {
curlHandle <- curlHandle
}
getForm(uri, curl = curlHandle, .params = parList)
}
#' Map JSON string to an R data frame.
#'
#' \code{parseArchiveReport} parses the JSON returned by a
#' featured request made to the appFigures web API.
#'
parseArchiveReport <- function(jsonText) {
datr <- fromJSON(jsonText)
out <- datr[[2]]
names(out) <- c("report_id", "type", "ext_acct_id", "report_date", "import_date",
"region", "import_method")
out$report_date <- as.POSIXct(out$report_date, "UTC",
format = "%Y-%m-%dT%H:%M:%S")
out$import_date <- as.POSIXct(out$import_date, "UTC",
format = "%Y-%m-%dT%H:%M:%S")
out
}
| /R/getArchive.R | no_license | appfigures/afapi | R | false | false | 5,862 | r | #' Retrieve archived reports for each of the linked accounts.
#'
#' \code{getArchivedReport} provides access to archived reports
#' from the appFigures web API
#'
#' @param type Character. Choose between \code{"all", "daily",
#' "weekly", "monthly", "monthlyfree", "finance", or "payment"}.
#' Defaults to \code{"all"}.
#'
#' @param date Character. Only return reports with a timestamp on
#' this date. Defaults to all dates.
#'
#' @param curlHandle Provide an instance of the CURLHandle-class
#' from the RCurl package. The default will create a curl handle
#' specific to the function call.
#'
#' @param verbose Logical. Should details of the web request
#' print to the console? Defaults to \code{FALSE}.
#'
#' @param orgJSON Logical. Should the JSON string be returned
#' without being converted to R objects? Defaults to \code{FALSE}.
#'
#' @return A dataframe containing the requested reports.
#'
#' @seealso Official documentation:
#' \url{http://docs.appfigures.com/archive}.
#'
getArchivedReport <- function(type = c("all", "daily", "weekly", "monthly",
"monthlyfree", "finance", "payment"),
date, curlHandle, verbose = FALSE,
orgJSON = FALSE) {
type <- match.arg(type)
date <- if (!missing(date)) {
as.character(as.Date(date))
}
parList <- c(type = type, date = date, format = 'json')
uri <- paste(BASE_URI, "archive", sep = "/")
if (missing(curlHandle)) {
opts <- list(userpwd = paste(USERNAME, PASSWORD, sep = ":"),
httpheader = c('X-Client-Key' = API_KEY),
httpauth = 1L, verbose = verbose, ssl.verifypeer = FALSE)
curlHandle <- getCurlHandle(.opts = opts)
} else if (!inherits(curlHandle, "CURLHandle")) {
stop("curlHandle must be of class 'CURLHandle'.")
} else {
curlHandle <- curlHandle
}
jsonText <- getForm(uri, curl = curlHandle, .params = parList)
if (orgJSON) {
return(jsonText)
}
if (!validate(jsonText)) {
stop("appFigures API yielded invalid JSON!")
}
parseArchiveReport(jsonText)
}
#' Retrieve latest report for each of the linked accounts.
#'
#' \code{getLatestReport} provides access to the latest reports
#' from the appFigures web API
#'
#' @param type Character. Choose between \code{"all", "daily",
#' "weekly", "monthly", "yearly", "finance", "payment"}. Defaults
#' to \code{"all"}.
#'
#' @param curlHandle Provide an instance of the CURLHandle-class
#' from the RCurl package. The default will create a curl handle
#' specific to the function call.
#'
#' @param verbose Logical. Should details of the web request
#' print to the console? Defaults to \code{FALSE}.
#'
#' @param orgJSON Logical. Should the JSON string be returned
#' without being converted to R objects? Defaults to \code{FALSE}.
#'
#' @return A dataframe containing the requested reports.
#'
#' @seealso Official documentation:
#' \url{http://docs.appfigures.com/archive}.
#'
getLatestReport <- function(type = c("all", "daily", "weekly", "monthly",
"yearly", "finance", "payment"),
curlHandle, verbose = FALSE, orgJSON = FALSE) {
type <- match.arg(type)
parList <- c(type = type, format = 'flat')
uri <- paste(BASE_URI, "archive", "latest", sep = "/")
if (missing(curlHandle)) {
opts <- list(userpwd = paste(USERNAME, PASSWORD, sep = ":"),
httpheader = c('X-Client-Key' = API_KEY),
httpauth = 1L, verbose = verbose, ssl.verifypeer = FALSE)
curlHandle <- getCurlHandle(.opts = opts)
} else if (class(curlHandle) != "CURLHandle") {
stop("curlHandle must be of class 'CURLHandle'.")
} else {
curlHandle <- curlHandle
}
jsonText <- getForm(uri, curl = curlHandle, .params = parList)
if (orgJSON) {
return(jsonText)
}
if (!validate(jsonText)) {
stop("appFigures API yielded invalid JSON!")
}
parseArchiveReport(jsonText)
}
#' Retrieve the raw report data.
#'
#' \code{getRawReport} provides access to the raw report data
#' from the appFigures web API
#'
#' @param id Numeric. The id of requested report.
#'
#' @param curlHandle Provide an instance of the CURLHandle-class
#' from the RCurl package. The default will create a curl handle
#' specific to the function call.
#'
#' @param verbose Logical. Should details of the web request
#' print to the console? Defaults to \code{FALSE}.
#'
#' @return A character string containing the raw report.
#'
#' @seealso Official documentation:
#' \url{http://docs.appfigures.com/archive}.
getRawReport <- function(id, curlHandle, verbose = FALSE) {
parList <- c(format = 'json')
uri <- paste(BASE_URI, "archive", "raw", id, sep = "/")
if (missing(curlHandle)) {
opts <- list(userpwd = paste(USERNAME, PASSWORD, sep = ":"),
httpheader = c('X-Client-Key' = API_KEY),
httpauth = 1L, verbose = verbose, ssl.verifypeer = FALSE)
curlHandle <- getCurlHandle(.opts = opts)
} else if (class(curlHandle) != "CURLHandle") {
stop("curlHandle must be of class 'CURLHandle'.")
} else {
curlHandle <- curlHandle
}
getForm(uri, curl = curlHandle, .params = parList)
}
#' Map JSON string to an R data frame.
#'
#' \code{parseArchiveReport} parses the JSON returned by a
#' featured request made to the appFigures web API.
#'
parseArchiveReport <- function(jsonText) {
datr <- fromJSON(jsonText)
out <- datr[[2]]
names(out) <- c("report_id", "type", "ext_acct_id", "report_date", "import_date",
"region", "import_method")
out$report_date <- as.POSIXct(out$report_date, "UTC",
format = "%Y-%m-%dT%H:%M:%S")
out$import_date <- as.POSIXct(out$import_date, "UTC",
format = "%Y-%m-%dT%H:%M:%S")
out
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(ggplot2)
header <- dashboardHeader(title = "Diabetic Foot Application")
# side bar with input parameters
sidebar <- dashboardSidebar(
selectInput("algorithm", "Choose an Algorithm:", list("Hierarchical Clustering"="hclust", "Partitioning around Medoids"="pam", "Density Based Clustering"="dbscan")),
conditionalPanel(
condition = "input.algorithm =='hclust'",
radioButtons("linkage", "Choose a Linkage Type:", c("Complete Linkage"="complete","Single Linkage"="single","Centroid Linkage"="centroid")),
sliderInput("hclustnum", "Number of Clusters",1, 10, 0, step = 1, value=4)
),
conditionalPanel(
condition = "input.algorithm =='pam'",
sliderInput("pclustnum", "Number of Clusters",1, 10, 0, step = 1, value=4)
),
conditionalPanel(
condition = "input.algorithm =='dbscan'",
numericInput("epsilon", "Epsilon Neightborhood",3),
sliderInput("minPoints", "Minimum Number of Points",1, 10, 0, step = 1,value=4)
),
fluidRow(
column(1,tableOutput(outputId = "summary"))
)
)
# body consisting of plots
body <- dashboardBody(
fluidPage(
fluidRow(
column(6,plotOutput(outputId="mtk1_plot")),
column(6,plotOutput(outputId="mtk2_plot"))
),
fluidRow(
column(6,plotOutput(outputId="mtk3_plot")),
column(6,plotOutput(outputId="mtk4_plot"))
),
fluidRow(
column(6,plotOutput(outputId="mtk5_plot")),
column(6,plotOutput(outputId="D1.T"))
),
fluidRow(
column(6,plotOutput(outputId="L.T")),
column(6,plotOutput(outputId="C.T"))
)
)
)
ui <- dashboardPage(header, sidebar, body)
| /Framework/ui.R | no_license | stecklin/DiabeticFootAnalysis | R | false | false | 2,002 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(ggplot2)
header <- dashboardHeader(title = "Diabetic Foot Application")
# side bar with input parameters
sidebar <- dashboardSidebar(
selectInput("algorithm", "Choose an Algorithm:", list("Hierarchical Clustering"="hclust", "Partitioning around Medoids"="pam", "Density Based Clustering"="dbscan")),
conditionalPanel(
condition = "input.algorithm =='hclust'",
radioButtons("linkage", "Choose a Linkage Type:", c("Complete Linkage"="complete","Single Linkage"="single","Centroid Linkage"="centroid")),
sliderInput("hclustnum", "Number of Clusters",1, 10, 0, step = 1, value=4)
),
conditionalPanel(
condition = "input.algorithm =='pam'",
sliderInput("pclustnum", "Number of Clusters",1, 10, 0, step = 1, value=4)
),
conditionalPanel(
condition = "input.algorithm =='dbscan'",
numericInput("epsilon", "Epsilon Neightborhood",3),
sliderInput("minPoints", "Minimum Number of Points",1, 10, 0, step = 1,value=4)
),
fluidRow(
column(1,tableOutput(outputId = "summary"))
)
)
# body consisting of plots
body <- dashboardBody(
fluidPage(
fluidRow(
column(6,plotOutput(outputId="mtk1_plot")),
column(6,plotOutput(outputId="mtk2_plot"))
),
fluidRow(
column(6,plotOutput(outputId="mtk3_plot")),
column(6,plotOutput(outputId="mtk4_plot"))
),
fluidRow(
column(6,plotOutput(outputId="mtk5_plot")),
column(6,plotOutput(outputId="D1.T"))
),
fluidRow(
column(6,plotOutput(outputId="L.T")),
column(6,plotOutput(outputId="C.T"))
)
)
)
ui <- dashboardPage(header, sidebar, body)
|
/R/CrispRVariants/Analisis_laboratorio.R | no_license | yolandabq/CRISPR-diversity-analysis | R | false | false | 22,722 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdcRNAMerge.R
\name{gdcRNAMerge}
\alias{gdcRNAMerge}
\title{Merge RNA/miRNAs raw counts data}
\usage{
gdcRNAMerge(metadata, path, data.type, organized = FALSE)
}
\arguments{
\item{metadata}{metadata parsed from \code{\link{gdcParseMetadata}}}
\item{path}{path to downloaded files for merging}
\item{data.type}{one of \code{'RNAseq'} and \code{'miRNAs'}}
\item{organized}{logical, whether the raw counts data have already
been organized into a single folder (eg., data downloaded by the
'GenomicDataCommons' method are already organized).
Default is \code{FALSE}.}
}
\value{
A dataframe or numeric matrix of raw counts data with rows
are genes or miRNAs and columns are samples
}
\description{
Merge raw counts data that is downloaded from GDC to a
single expression matrix
}
\examples{
####### Merge RNA expression data #######
metaMatrix <- gdcParseMetadata(project.id='TARGET-RT',
data.type='RNAseq')
\dontrun{rnaExpr <- gdcRNAMerge(metadata=metaMatrix, path='RNAseq/',
data.type='RNAseq')}
}
\author{
Ruidong Li and Han Qu
}
| /man/gdcRNAMerge.Rd | permissive | rli012/GDCRNATools | R | false | true | 1,130 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gdcRNAMerge.R
\name{gdcRNAMerge}
\alias{gdcRNAMerge}
\title{Merge RNA/miRNAs raw counts data}
\usage{
gdcRNAMerge(metadata, path, data.type, organized = FALSE)
}
\arguments{
\item{metadata}{metadata parsed from \code{\link{gdcParseMetadata}}}
\item{path}{path to downloaded files for merging}
\item{data.type}{one of \code{'RNAseq'} and \code{'miRNAs'}}
\item{organized}{logical, whether the raw counts data have already
been organized into a single folder (eg., data downloaded by the
'GenomicDataCommons' method are already organized).
Default is \code{FALSE}.}
}
\value{
A dataframe or numeric matrix of raw counts data with rows
are genes or miRNAs and columns are samples
}
\description{
Merge raw counts data that is downloaded from GDC to a
single expression matrix
}
\examples{
####### Merge RNA expression data #######
metaMatrix <- gdcParseMetadata(project.id='TARGET-RT',
data.type='RNAseq')
\dontrun{rnaExpr <- gdcRNAMerge(metadata=metaMatrix, path='RNAseq/',
data.type='RNAseq')}
}
\author{
Ruidong Li and Han Qu
}
|
library(KronaR)
shinyServer(function(input, output, session) {
#parameter post of temporary shaman file of Krona data
values <- reactiveValues(query ="tmp")
output$KronaR <- renderKronaR({
values$query <- parseQueryString(session$clientData$url_search)
KronaR(values$query[["parameter"]])
})
})
| /server.R | no_license | aghozlane/KronaRShy | R | false | false | 320 | r | library(KronaR)
shinyServer(function(input, output, session) {
#parameter post of temporary shaman file of Krona data
values <- reactiveValues(query ="tmp")
output$KronaR <- renderKronaR({
values$query <- parseQueryString(session$clientData$url_search)
KronaR(values$query[["parameter"]])
})
})
|
library(MCMCpack)
#library(rstan)
#rstan_options(auto_write=T)
#options(mc.cores = parallel::detectCores())
Posdef <- function (n, ev = runif(n, 0, 10))
{
Z <- matrix(ncol=n, rnorm(n^2))
decomp <- qr(Z)
Q <- qr.Q(decomp)
R <- qr.R(decomp)
d <- diag(R)
ph <- d / abs(d)
O <- Q %*% diag(ph)
Z <- t(O) %*% diag(ev) %*% O
return(Z)
}
sigmoid <- function(x){
1/(1 + exp(-x))
}
mse <- function(yhat, y, root=T){
if(root)
sqrt(mean((yhat - y)^2))
else
mean((yhat - y)^2)
}
g = 10
w = 5
k = 10
m = 3
ng = 10
r = 500
n = g * w * k * ng
M = matrix(NA, k, m)
for(j in 1:k){
M[j, ] = sample(0:1, m, replace=TRUE, prob = c(0.25, 0.75))
}
W = rep(1:w, each=ng*g)
K = rep(1:k, ng*g*w)
R = rep(rep(1:(ng*g*w), each=k))
G = rep(1:g, each=ng*w)
true_kappa = rinvgamma(1, 5, 5)
true_tau = rinvgamma(1, 5, 5)
true_phi = rinvgamma(1, 5, 5)
true_Sigma = Posdef(m)
true_Omega = Posdef(k)
true_beta = rnorm(m, 0, true_tau)
true_nu = rnorm(g, 0, 10)
true_T = matrix(NA, g, w)
for(j in 1:g)
true_T[j, ] = rnorm(w, true_nu[j], true_phi)
true_a = mvrnorm(1, rep(0,k), true_Omega)
true_b = rinvgamma(k, 5, 5)
true_lambda = rnorm(m, 0, true_kappa)
true_theta = matrix(NA, r, m)
for(i in 1:r){
true_theta[i, ] = mvrnorm(1, true_T[G[i], W[i]] %*% true_beta, true_Sigma)
true_theta[i, !M[G[i], ]] = 0
}
Y = rep(NA, n)
for(i in 1:n){
mu = sigmoid((true_b[K[i]]) * (t(true_theta[R[i], ]) %*% true_lambda - true_a[K[i]]))
Y[i] = rbinom(1, 1, mu)
}
write.csv(data.frame(Y, W, K, R, G), '~/Desktop/School/Bayesian/vectors.csv')
write.csv(data.frame(M), '~/Desktop/School/Bayesian/matrix.csv')
#model = stan_model(file = "irt_factor_2.0.stan")
#fit = sampling(model, data=list(N=n, G=g, W=w, K=k, R=r, M=m, IG=G, IW=W, IK=K, IR=R, IM=M, Y=Y), chains=4,
#iter=2000, verbose=TRUE)
#save.image(test_2.0.RData)
| /test_irt_2.0.R | no_license | ecmargo/GibbsSampler | R | false | false | 1,840 | r | library(MCMCpack)
#library(rstan)
#rstan_options(auto_write=T)
#options(mc.cores = parallel::detectCores())
Posdef <- function (n, ev = runif(n, 0, 10))
{
Z <- matrix(ncol=n, rnorm(n^2))
decomp <- qr(Z)
Q <- qr.Q(decomp)
R <- qr.R(decomp)
d <- diag(R)
ph <- d / abs(d)
O <- Q %*% diag(ph)
Z <- t(O) %*% diag(ev) %*% O
return(Z)
}
sigmoid <- function(x){
1/(1 + exp(-x))
}
mse <- function(yhat, y, root=T){
if(root)
sqrt(mean((yhat - y)^2))
else
mean((yhat - y)^2)
}
g = 10
w = 5
k = 10
m = 3
ng = 10
r = 500
n = g * w * k * ng
M = matrix(NA, k, m)
for(j in 1:k){
M[j, ] = sample(0:1, m, replace=TRUE, prob = c(0.25, 0.75))
}
W = rep(1:w, each=ng*g)
K = rep(1:k, ng*g*w)
R = rep(rep(1:(ng*g*w), each=k))
G = rep(1:g, each=ng*w)
true_kappa = rinvgamma(1, 5, 5)
true_tau = rinvgamma(1, 5, 5)
true_phi = rinvgamma(1, 5, 5)
true_Sigma = Posdef(m)
true_Omega = Posdef(k)
true_beta = rnorm(m, 0, true_tau)
true_nu = rnorm(g, 0, 10)
true_T = matrix(NA, g, w)
for(j in 1:g)
true_T[j, ] = rnorm(w, true_nu[j], true_phi)
true_a = mvrnorm(1, rep(0,k), true_Omega)
true_b = rinvgamma(k, 5, 5)
true_lambda = rnorm(m, 0, true_kappa)
true_theta = matrix(NA, r, m)
for(i in 1:r){
true_theta[i, ] = mvrnorm(1, true_T[G[i], W[i]] %*% true_beta, true_Sigma)
true_theta[i, !M[G[i], ]] = 0
}
Y = rep(NA, n)
for(i in 1:n){
mu = sigmoid((true_b[K[i]]) * (t(true_theta[R[i], ]) %*% true_lambda - true_a[K[i]]))
Y[i] = rbinom(1, 1, mu)
}
write.csv(data.frame(Y, W, K, R, G), '~/Desktop/School/Bayesian/vectors.csv')
write.csv(data.frame(M), '~/Desktop/School/Bayesian/matrix.csv')
#model = stan_model(file = "irt_factor_2.0.stan")
#fit = sampling(model, data=list(N=n, G=g, W=w, K=k, R=r, M=m, IG=G, IW=W, IK=K, IR=R, IM=M, Y=Y), chains=4,
#iter=2000, verbose=TRUE)
#save.image(test_2.0.RData)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runSimulation.R
\name{runSimulation}
\alias{runSimulation}
\alias{print.SimDesign}
\alias{head.SimDesign}
\alias{tail.SimDesign}
\alias{summary.SimDesign}
\alias{as.data.frame.SimDesign}
\title{Run a Monte Carlo simulation given a data.frame of conditions and simulation functions}
\usage{
runSimulation(design, replications, generate, analyse, summarise,
fixed_objects = NULL, packages = NULL, filename = "SimDesign-results",
warnings_as_errors = FALSE, save = FALSE, save_results = FALSE,
save_seeds = FALSE, load_seed = NULL, seed = NULL, parallel = FALSE,
ncores = parallel::detectCores(), cl = NULL, MPI = FALSE,
max_errors = 50, as.factor = TRUE, save_generate_data = FALSE,
save_details = list(), edit = "none", progress = FALSE,
verbose = TRUE)
\method{print}{SimDesign}(x, drop.extras = FALSE, drop.design = FALSE,
format.time = TRUE, ...)
\method{head}{SimDesign}(x, ...)
\method{tail}{SimDesign}(x, ...)
\method{summary}{SimDesign}(object, ...)
\method{as.data.frame}{SimDesign}(x, ...)
}
\arguments{
\item{design}{a \code{data.frame} object containing the Monte Carlo simulation conditions to
be studied, where each row represents a unique condition and each column a factor to be varied}
\item{replications}{number of replication to perform per condition (i.e., each row in \code{design}).
Must be greater than 0}
\item{generate}{user-defined data and parameter generating function.
See \code{\link{Generate}} for details}
\item{analyse}{user-defined computation function which acts on the data generated from
\code{\link{Generate}}. See \code{\link{Analyse}} for details}
\item{summarise}{optional (but recommended) user-defined summary function to be used
after all the replications have completed within each \code{design} condition. Omitting this function
will return a list of matrices (or a single matrix, if only one row in \code{design} is supplied)
or, for more general objects (such as lists), a list containing the results returned form \code{\link{Analyse}}.
Ommiting this function is only recommended for didactic purposes because it leaves out a large amount of
information (e.g., try-errors, warning messages, etc), can witness memory related issues,
and generally is not as flexible internally. See
the \code{save_results} option for a better alternative to storing the Generate-Analyse results}
\item{fixed_objects}{(optional) an object (usually a named \code{list})
containing additional user-defined objects
that should remain fixed across conditions. This is useful when including
long fixed vectors/matrices of population parameters, data
that should be used across all conditions and replications (e.g., including a fixed design matrix
for linear regression), or simply control constant global elements such as sample size}
\item{packages}{a character vector of external packages to be used during the simulation (e.g.,
\code{c('MASS', 'mvtnorm', 'simsem')} ). Use this input when \code{parallel = TRUE} or
\code{MPI = TRUE} to use non-standard functions from additional packages,
otherwise the functions must be made available by using explicit
\code{\link{library}} or \code{\link{require}} calls within the provided simulation functions.
Alternatively, functions can be called explicitly without attaching the package with the \code{::} operator
(e.g., \code{mvtnorm::rmvnorm()})}
\item{filename}{(optional) the name of the \code{.rds} file to save the final simulation results to
when \code{save = TRUE}. If the same file name already exists in the working
directly at the time of saving then a new
file will be generated instead and a warning will be thrown. This helps to avoid accidentally overwriting
existing files. Default is \code{'SimDesign-results'}}
\item{warnings_as_errors}{logical; treat warning messages as errors during the simulation? Default is FALSE,
therefore warnings are only collected and not used to restart the data generation step}
\item{save}{logical; save the simulation state and final results to the hard-drive? This is useful
for simulations which require an extended amount of time. When \code{TRUE}, a temp file
will be created in the working directory which allows the simulation state to be saved
and recovered (in case of power outages, crashes, etc). To recover your simulation at the last known
location simply re-run the code you used to initially define the simulation and the external file
will automatically be detected and read-in. Upon completion, the final results will
be saved to the working directory, and the temp file will be removed. Default is \code{FALSE}}
\item{save_results}{logical; save the results returned from \code{\link{Analyse}} to external
\code{.rds} files located in the defined \code{save_results_dirname} directory/folder?
Use this if you would like to keep track of the individual parameters returned from the analyses.
Each saved object will contain a list of three elements containing the condition (row from \code{design}),
results (as a \code{list} or \code{matrix}), and try-errors. When \code{TRUE}, a temp file will be used to track the simulation
state (in case of power outages, crashes, etc). When \code{TRUE}, temporary files will also be saved
to the working directory (in the same way as when \code{save = TRUE}).
See \code{\link{SimResults}} for an example of how to read these \code{.rds} files back into R
after the simulation is complete. Default is \code{FALSE}.
WARNING: saving results to your hard-drive can fill up space very quickly for larger simulations. Be sure to
test this option using a smaller number of replications before the full Monte Carlo simulation is performed.}
\item{save_seeds}{logical; save the \code{.Random.seed} states prior to performing each replication into
plain text files located in the defined \code{save_seeds_dirname} directory/folder?
Use this if you would like to keep track of the simulation state within each replication and design
condition. Primarily, this is useful for completely replicating any cell in the simulation if need be,
especially when tracking down hard-to-find errors and bugs. As well, see the \code{load_seed} input
to load a given \code{.Random.seed} to exactly replicate the generated data and analysis state (handy
for debugging). When \code{TRUE}, temporary files will also be saved
to the working directory (in the same way as when \code{save = TRUE}).
Default is \code{FALSE}}
\item{load_seed}{a character object indicating which file to load from when the \code{.Random.seed}s have
be saved (after a call with \code{save_seeds = TRUE}). E.g., \code{load_seed = 'design-row-2/seed-1'}
will load the first seed in the second row of the \code{design} input. Note that it is important NOT
to modify the \code{design} input object, otherwise the path may not point to the correct saved location.
Default is \code{NULL}}
\item{seed}{a vector of integers to be used for reproducibility.
The length of the vector must be equal the number of rows in \code{design}.
This argument calls \code{\link{set.seed}} or
\code{\link{clusterSetRNGStream}} for each condition, respectively,
but will not be run when \code{MPI = TRUE}.
Default is \code{NULL}, indicating that no seed is set for each condition}
\item{parallel}{logical; use parallel processing from the \code{parallel} package over each
unique condition?}
\item{ncores}{number of cores to be used in parallel execution. Default uses all available}
\item{cl}{cluster object defined by \code{\link{makeCluster}} used to run code in parallel.
If \code{NULL} and \code{parallel = TRUE}, a local cluster object will be defined which
selects the maximum number cores available
and will be stop the cluster when the simulation is complete. Note that supplying a \code{cl}
object will automatically set the \code{parallel} argument to \code{TRUE}}
\item{MPI}{logical; use the \code{foreach} package in a form usable by MPI to run simulation
in parallel on a cluster? Default is \code{FALSE}}
\item{max_errors}{the simulation will terminate when more than this number of consecutive errors are thrown in any
given condition. The purpose of this is to indicate that something fatally problematic is likely going
wrong in the generate-analyse phases and should be inspected. Default is 50}
\item{as.factor}{logical; coerce the input \code{design} elements into \code{factor}s when the
simulation is complete? If the columns inputs are numeric then these will be treated
as \code{ordered}. Default is \code{TRUE}}
\item{save_generate_data}{logical; save the data returned from \code{\link{Generate}} to external \code{.rds} files
located in the defined \code{save_generate_data_dirname} directory/folder?
When \code{TRUE}, temporary files will also be saved to the working directory
(in the same way as when \code{save = TRUE}). Default is \code{FALSE}
WARNING: saving data to your hard-drive can fill up space very quickly for larger simulations. Be sure to
test this option using a smaller number of replications before the full Monte Carlo simulation is performed.
It is generally recommended to leave this argument as \code{FALSE} because saving datasets will often consume
a needless amount of disk space, and by-and-large saving data is not required for simulations.}
\item{save_details}{a list pertaining to information regarding how and where files should be saved
when the \code{save}, \code{save_results}, or \code{save_generate_data} flags are triggered.
\describe{
\item{\code{safe}}{logical; trigger whether safe-saving should be performed. When \code{TRUE} files
will never be overwritten accidentally, and where appropriate the program will either stop or generate
new files with unique names. Default is \code{TRUE}}
\item{\code{compname}}{name of the computer running the simulation. Normally this doesn't need
to be modified, but in the event that a manual node breaks down while running a simulation the
results from the temp files may be resumed on another computer by changing the name of the
node to match the broken computer. Default is the result of evaluating \code{unname(Sys.info()['nodename'])}}
\item{\code{tmpfilename}}{the name of the temporary \code{.rds} file when any of the \code{save} flags are used.
This file will be read-in if it is in the working directory and the simulation will continue
at the last point this file was saved
(useful in case of power outages or broken nodes). Finally, this file will be deleted when the
simulation is complete. Default is the system name (\code{compname}) appended
to \code{'SIMDESIGN-TEMPFILE_'}}
\item{\code{save_results_dirname}}{a string indicating the name of the folder to save
result objects to when \code{save_results = TRUE}. If a directory/folder does not exist
in the current working directory then a unique one will be created automatically. Default is
\code{'SimDesign-results_'} with the associated \code{compname} appended}
\item{\code{save_seeds_dirname}}{a string indicating the name of the folder to save
\code{.Random.seed} objects to when \code{save_seeds = TRUE}. If a directory/folder does not exist
in the current working directory then one will be created automatically. Default is
\code{'SimDesign-seeds_'} with the associated \code{compname} appended}
\item{\code{save_generate_data_dirname}}{a string indicating the name of the folder to save
data objects to when \code{save_generate_data = TRUE}. If a directory/folder does not exist
in the current working directory then one will be created automatically.
Within this folder nested directories will be created associated with each row in \code{design}.
Default is \code{'SimDesign-generate-data_'} with the \code{compname} appended}
}}
\item{edit}{a string indicating where to initiate a \code{browser()} call for editing and debugging.
General options are \code{'none'} (default) and \code{'all'}, which are used
to disable debugging and to debug all the user defined functions, respectively.
Specific options include: \code{'generate'}
to edit the data simulation function, \code{'analyse'} to edit the computational function, and
\code{'summarise'} to edit the aggregation function.
Alternatively, users may place \code{\link{browser}} calls within the respective functions for
debugging at specific lines (note: parallel computation flags will automatically be disabled
when a \code{browser()} is detected)}
\item{progress}{logical; display a progress bar for each simulation condition?
This is useful when simulations conditions take a long time to run.
Uses the \code{pbapply} package to display the progress. Default is \code{FALSE}}
\item{verbose}{logical; print messages to the R console? Default is \code{TRUE}}
\item{x}{SimDesign object returned from \code{\link{runSimulation}}}
\item{drop.extras}{logical; don't print information about warnings, errors, simulation time, and replications?
Default is \code{FALSE}}
\item{drop.design}{logical; don't include information about the (potentially factorized) simulation design?
This may be useful if you wish to \code{cbind()} the original design \code{data.frame} to the simulation
results instead of using the auto-factorized version. Default is \code{FALSE}}
\item{format.time}{logical; format \code{SIM_TIME} into a day/hour/min/sec character vector? Default is
\code{TRUE}}
\item{...}{additional arguments}
\item{object}{SimDesign object returned from \code{\link{runSimulation}}}
}
\value{
a \code{data.frame} (also of class \code{'SimDesign'})
with the original \code{design} conditions in the left-most columns,
simulation results and ERROR/WARNING's (if applicable) in the middle columns,
and additional information (such as REPLICATIONS, SIM_TIME, COMPLETED, and SEED) in the right-most
columns.
}
\description{
This function runs a Monte Carlo simulation study given a set of predefined simulation functions,
design conditions, and number of replications. Results can be saved as temporary files in case of interruptions
and may be restored by re-running \code{runSimulation}, provided that the respective temp
file can be found in the working directory. \code{runSimulation} supports parallel
and cluster computing, global and local debugging, error handling (including fail-safe
stopping when functions fail too often, even across nodes), and tracking of error and warning messages.
For convenience, all functions available in the R workspace are exported across all computational nodes
so that they are more easily accessible (however, other R objects are not, and therefore
must be passed to the \code{fixed_objects} input to become available across nodes).
For a didactic presentation of the package refer to Sigal and Chalmers
(2016; \url{http://www.tandfonline.com/doi/full/10.1080/10691898.2016.1246953}), and see the associated
wiki on Github (\url{https://github.com/philchalmers/SimDesign/wiki})
for other tutorial material, examples, and applications of \code{SimDesign} to real-world simulations.
}
\details{
The strategy for organizing the Monte Carlo simulation work-flow is to
\describe{
\item{1)}{Define a suitable \code{design} data.frame object containing fixed conditional
information about the Monte Carlo simulations. This is often expedited by using the
\code{\link{expand.grid}} function, and if necessary using the \code{\link{subset}}
function to remove redundant or non-applicable rows}
\item{2)}{Define the three step functions to generate the data (\code{\link{Generate}}; see also
\url{https://CRAN.R-project.org/view=Distributions} for a list of distributions in R),
analyse the generated data by computing the respective parameter estimates, detection rates,
etc (\code{\link{Analyse}}), and finally summarise the results across the total
number of replications (\code{\link{Summarise}}). Note that these functions can be
automatically generated by using the \code{\link{SimFunctions}} function.
}
\item{3)}{Pass the above objects to the \code{runSimulation} function, and declare the
number of replications to perform with the \code{replications} input. This function will accept
a \code{design} data.frame object and will return a suitable data.frame object with the
simulation results}
\item{4)}{Analyze the output from \code{runSimulation}, possibly using ANOVA techniques
(\code{\link{SimAnova}}) and generating suitable plots and tables}
}
For a skeleton version of the work-flow, which is often useful when initially defining a simulation,
see \code{\link{SimFunctions}}. This function will write template simulation code
to one/two files so that modifying the required functions and objects can begin immediately
with minimal error. This means that you can focus on your Monte Carlo simulation immediately rather
than worrying about the administrative code-work required to organize the simulation work-flow.
Additional information for each condition are also contained in the \code{data.frame} object returned by
\code{runSimulation}: \code{REPLICATIONS} to indicate the number of Monte Carlo replications,
\code{SIM_TIME} to indicate how long (in seconds) it took to complete
all the Monte Carlo replications for each respective design condition,
\code{COMPLETED} to indicate the date in which the given simulation condition completed,
\code{SEED} if the \code{seed} argument
was used, columns containing the number of replications which had to be re-run due to errors (where the error messages
represent the names of the columns prefixed with a \code{ERROR:} string), and
columns containing the number of warnings prefixed with a \code{WARNING:} string.
Additional examples, presentation files, and tutorials can be found on the package wiki located at
\url{https://github.com/philchalmers/SimDesign/wiki}.
}
\section{Saving data, results, seeds, and the simulation state}{
To conserve RAM, temporary objects (such as data generated across conditions and replications)
are discarded; however, these can be saved to the hard-disk by passing the appropriate flags.
For longer simulations it is recommended to use \code{save = TRUE} to temporarily save the
simulation state, and to use the \code{save_results} flag to write the analysis results
the to hard-disc.
The generated data can be saved by passing
\code{save_generate_data = TRUE}, however it is often more memory efficient to use the
\code{save_seeds} option instead to only save R's \code{.Random.seed} state instead (still
allowing for complete reproducibility); individual \code{.Random.seed} terms may also be read in with the
\code{load_seed} input to reproduce the exact simulation state at any given replication. Finally,
providing a vector of \code{seeds} is also possible to ensure
that each simulation condition is completely reproducible under the single/multi-core method selected.
Finally, when the Monte Carlo simulation is complete
it is recommended to write the results to a hard-drive for safe keeping, particularly with the
\code{save} and \code{filename} arguments provided (for reasons that are more obvious in the parallel computation
descriptions below). Using the \code{filename} argument (along with \code{save = TRUE})
supplied is much safer than using something
like \code{\link{saveRDS}} directly because files will never accidentally be overwritten,
and instead a new file name will be created when a conflict arises; this type of safety
is prevalent in many aspects of the package and helps to avoid many unrecoverable (yet surprisingly common)
mistakes.
}
\section{Resuming temporary results}{
In the event of a computer crash, power outage, etc, if \code{save = TRUE} was used
then the original code used to execute \code{runSimulation()} need only be re-run to resume the simulation.
The saved temp file will be read into the function automatically, and the simulation will continue
one the condition where it left off before the simulation state was terminated.
}
\section{A note on parallel computing}{
When running simulations in parallel (either with \code{parallel = TRUE} or \code{MPI = TRUE})
R objects defined in the global environment will generally \emph{not} be visible across nodes.
Hence, you may see errors such as \code{Error: object 'something' not found} if you try to use an object
that is defined in the workspace but is not passed to \code{runSimulation}.
To avoid this type or error, simply pass additional objects to the
\code{fixed_objects} input (usually it's convenient to supply a named list of these objects).
Fortunately, however, \emph{custom functions defined in the global environment are exported across
nodes automatically}. This makes it convenient when writing code because custom functions will
always be available across nodes if they are visible in the R workspace. As well, note the
\code{packages} input to declare packages which must be loaded via \code{library()} in order to make
specific non-standard R functions available across nodes.
}
\section{Cluster computing}{
SimDesign code may be released to a computing system which supports parallel cluster computations using
the industry standard Message Passing Interface (MPI) form. This simply
requires that the computers be setup using the usual MPI requirements (typically, running some flavor
of Linux, have password-less open-SSH access, IP addresses have been added to the \code{/etc/hosts} file
or \code{~/.ssh/config}, etc).
More generally though, these resources are widely available through professional
organizations dedicated to super-computing.
To setup the R code for an MPI cluster one need only add the argument \code{MPI = TRUE},
wrap the appropriate MPI directives around \code{runSimulation}, and submit the
files using the suitable BASH commands to execute the \code{mpirun} tool. For example,
\describe{
\item{\code{library(doMPI)}}{}
\item{\code{cl <- startMPIcluster()}}{}
\item{\code{registerDoMPI(cl)}}{}
\item{\code{runSimulation(design=Design, replications=1000, save=TRUE, filename='mysimulation',
generate=Generate, analyse=Analyse, summarise=Summarise, MPI=TRUE)}}{}
\item{\code{closeCluster(cl)}}{}
\item{\code{mpi.quit()}}{}
}
The necessary \code{SimDesign} files must be uploaded to the dedicated master node
so that a BASH call to \code{mpirun} can be used to distribute the work across slaves.
For instance, if the following BASH command is run on the master node then 16 processes
will be summoned (1 master, 15 slaves) across the computers named \code{localhost}, \code{slave1},
and \code{slave2} in the ssh \code{config} file.
\code{mpirun -np 16 -H localhost,slave1,slave2 R --slave -f simulation.R}
}
\section{Network computing}{
If you access have to a set of computers which can be linked via secure-shell (ssh) on the same LAN network then
Network computing (a.k.a., a Beowulf cluster) may be a viable and useful option.
This approach is similar to MPI computing approach
except that it offers more localized control and requires more hands-on administrative access to the master
and slave nodes. The setup generally requires that the master node
has \code{SimDesign} installed and the slave/master nodes have all the required R packages pre-installed
(Unix utilities such as \code{dsh} are very useful for this purpose). Finally,
the master node must have ssh access to the slave nodes, each slave node must have ssh access
with the master node, and a cluster object (\code{cl}) from the \code{parallel} package must be defined on the
master node.
Setup for network computing is generally more straightforward and controlled
than the setup for MPI jobs in that it only requires the specification of a) the respective
IP addresses within a defined R script, and b) the user name
(if different from the master node's user name. Otherwise, only a) is required).
However, on Linux I have found it is also important to include relevant information about the host names
and IP addresses in the \code{/etc/hosts} file on the master and slave nodes, and to ensure that
the selected port (passed to \code{\link{makeCluster}}) on the master node is not hindered by a firewall.
As an example, using the following code the master node (primary) will spawn 7 slaves and 1 master,
while a separate computer on the network with the associated IP address will spawn an additional 6 slaves.
Information will be collected on the master node, which is also where the files
and objects will be saved using the \code{save} inputs (if requested).
\describe{
\item{\code{library(parallel)}}{}
\item{\code{primary <- '192.168.2.1'}}{}
\item{\code{IPs <- list(list(host=primary, user='myname', ncore=8), list(host='192.168.2.2', user='myname', ncore=6))}}{}
\item{\code{spec <- lapply(IPs, function(IP) rep(list(list(host=IP$host, user=IP$user)), IP$ncore))}}{}
\item{\code{spec <- unlist(spec, recursive=FALSE)}}{}
\item{\code{cl <- makeCluster(master=primary, spec=spec)}}{}
\item{\code{Final <- runSimulation(..., cl=cl)}}{}
\item{\code{stopCluster(cl)}}{}
}
The object \code{cl} is passed to \code{runSimulation} on the master node
and the computations are distributed across the respective
IP addresses. Finally, it's usually good practice to use \code{stopCluster(cl)}
when all the simulations are said and done to release the communication between the computers,
which is what the above code shows.
Alternatively, if you have provided suitable names for each respective slave node, as well as the master,
then you can define the \code{cl} object using these instead (rather than supplying the IP addresses in
your R script). This requires that the master node has itself and all the slave nodes defined in the
\code{/etc/hosts} and \code{~/.ssh/config} files, while the slave nodes require themselves and the
master node in the same files (only 2 IP addresses required on each slave).
Following this setup, and assuming the user name is the same across all nodes,
the \code{cl} object could instead be defined with
\describe{
\item{\code{library(parallel)}}{}
\item{\code{primary <- 'master'}}{}
\item{\code{IPs <- list(list(host=primary, ncore=8), list(host='slave', ncore=6))}}{}
\item{\code{spec <- lapply(IPs, function(IP) rep(list(list(host=IP$host)), IP$ncore))}}{}
\item{\code{spec <- unlist(spec, recursive=FALSE)}}{}
\item{\code{cl <- makeCluster(master=primary, spec=spec)}}{}
\item{\code{Final <- runSimulation(..., cl=cl)}}{}
\item{\code{stopCluster(cl)}}{}
}
Or, even more succinctly if all communication elements required are identical to the master node,
\describe{
\item{\code{library(parallel)}}{}
\item{\code{primary <- 'master'}}{}
\item{\code{spec <- c(rep(primary, 8), rep('slave', 6))}}{}
\item{\code{cl <- makeCluster(master=primary, spec=spec)}}{}
\item{\code{Final <- runSimulation(..., cl=cl)}}{}
\item{\code{stopCluster(cl)}}{}
}
}
\section{Poor man's cluster computing for independent nodes}{
In the event that you do not have access to a Beowulf-type cluster (described in the section on
"Network Computing") but have multiple personal
computers then the simulation code can be manually distributed across each independent computer instead.
This simply requires passing a smaller value to the \code{replications} argument on each computer and later
aggregating the results using the \code{\link{aggregate_simulations}} function.
For instance, if you have two computers available on different networks and wanted a total of 500 replications you
could pass \code{replications = 300} to one computer and \code{replications = 200} to the other along
with a \code{filename} argument (or simply saving the final objects as \code{.rds} files manually after
\code{runSimulation()} has finished). This will create two distinct \code{.rds} files which can be
combined later with the \code{\link{aggregate_simulations}} function. The benefit of this approach over
MPI or setting up a Beowulf cluster is that computers need not be linked on the same network,
and, should the need arise, the temporary
simulation results can be migrated to another computer in case of a complete hardware failure by moving the
saved temp files to another node, modifying
the suitable \code{compname} input to \code{save_details} (or, if the \code{filename} and \code{tmpfilename}
were modified, matching those files accordingly), and resuming the simulation as normal.
Note that this is also a useful tactic if the MPI or Network computing options require you to
submit smaller jobs due to time and resource constraint-related reasons,
where fewer replications/nodes should be requested. After all the jobs are completed and saved to their
respective files, \code{\link{aggregate_simulations}}
can then collapse the files as if the simulations were run all at once. Hence, SimDesign makes submitting
smaller jobs to super-computing resources considerably less error prone than managing a number of smaller
jobs manually .
}
\examples{
#-------------------------------------------------------------------------------
# Example 1: Sampling distribution of mean
# This example demonstrate some of the simpler uses of SimDesign,
# particularly for classroom settings. The only factor varied in this simulation
# is sample size.
# skeleton functions to be saved and edited
SimFunctions()
#### Step 1 --- Define your conditions under study and create design data.frame
Design <- data.frame(N = c(10, 20, 30))
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 2 --- Define generate, analyse, and summarise functions
# help(Generate)
Generate <- function(condition, fixed_objects = NULL){
dat <- with(condition, rnorm(N, 10, 5)) # distributed N(10, 5)
dat
}
# help(Analyse)
Analyse <- function(condition, dat, fixed_objects = NULL){
ret <- mean(dat) # mean of the sample data vector
ret
}
# help(Summarise)
Summarise <- function(condition, results, fixed_objects = NULL){
ret <- c(mu=mean(results), SE=sd(results)) # mean and SD summary of the sample means
ret
}
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 3 --- Collect results by looping over the rows in design
# run the simulation
Final <- runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise)
Final
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Extras
# compare SEs estimates to the true SEs from the formula sigma/sqrt(N)
5 / sqrt(Design$N)
# To store the results from the analyse function either
# a) omit a definition of of summarise(), or
# b) pass save_results = TRUE to runSimulation() and read the results in with SimResults()
# e.g., the a) approach
results <- runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse)
str(results)
head(results[[1]])
# or b) approach
Final <- runSimulation(design=Design, replications=1000, save_results=TRUE,
generate=Generate, analyse=Analyse, summarise=Summarise)
results <- SimResults(Final)
str(results)
head(results[[1]]$results)
# remove the saved results from the hard-drive if you no longer want them
SimClean(results = TRUE)
#-------------------------------------------------------------------------------
# Example 2: t-test and Welch test when varying sample size, group sizes, and SDs
# skeleton functions to be saved and edited
SimFunctions()
\dontrun{
# in real-world simulations it's often better/easier to save
# these functions directly to your hard-drive with
SimFunctions('my-simulation')
}
#### Step 1 --- Define your conditions under study and create design data.frame
Design <- expand.grid(sample_size = c(30, 60, 90, 120),
group_size_ratio = c(1, 4, 8),
standard_deviation_ratio = c(.5, 1, 2))
dim(Design)
head(Design)
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 2 --- Define generate, analyse, and summarise functions
Generate <- function(condition, fixed_objects = NULL){
N <- condition$sample_size # alternatively, could use Attach() to make objects available
grs <- condition$group_size_ratio
sd <- condition$standard_deviation_ratio
if(grs < 1){
N2 <- N / (1/grs + 1)
N1 <- N - N2
} else {
N1 <- N / (grs + 1)
N2 <- N - N1
}
group1 <- rnorm(N1)
group2 <- rnorm(N2, sd=sd)
dat <- data.frame(group = c(rep('g1', N1), rep('g2', N2)), DV = c(group1, group2))
dat
}
Analyse <- function(condition, dat, fixed_objects = NULL){
welch <- t.test(DV ~ group, dat)
ind <- t.test(DV ~ group, dat, var.equal=TRUE)
# In this function the p values for the t-tests are returned,
# and make sure to name each element, for future reference
ret <- c(welch = welch$p.value, independent = ind$p.value)
ret
}
Summarise <- function(condition, results, fixed_objects = NULL){
#find results of interest here (e.g., alpha < .1, .05, .01)
ret <- EDR(results, alpha = .05)
ret
}
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 3 --- Collect results by looping over the rows in design
# first, test to see if it works
Final <- runSimulation(design=Design, replications=5,
generate=Generate, analyse=Analyse, summarise=Summarise)
head(Final)
\dontrun{
# complete run with 1000 replications per condition
Final <- runSimulation(design=Design, replications=1000, parallel=TRUE,
generate=Generate, analyse=Analyse, summarise=Summarise)
head(Final, digits = 3)
View(Final)
## save final results to a file upon completion (not run)
runSimulation(design=Design, replications=1000, parallel=TRUE, save=TRUE, filename = 'mysim',
generate=Generate, analyse=Analyse, summarise=Summarise)
## Debug the generate function. See ?browser for help on debugging
## Type help to see available commands (e.g., n, c, where, ...),
## ls() to see what has been defined, and type Q to quit the debugger
runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise,
parallel=TRUE, edit='generate')
## Alternatively, place a browser() within the desired function line to
## jump to a specific location
Summarise <- function(condition, results, fixed_objects = NULL){
#find results of interest here (e.g., alpha < .1, .05, .01)
ret <- EDR(results[,nms], alpha = .05)
browser()
ret
}
runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise,
parallel=TRUE)
## EXTRA: To run the simulation on a MPI cluster, use the following setup on each node (not run)
# library(doMPI)
# cl <- startMPIcluster()
# registerDoMPI(cl)
# Final <- runSimulation(design=Design, replications=1000, MPI=TRUE, save=TRUE,
# generate=Generate, analyse=Analyse, summarise=Summarise)
# saveRDS(Final, 'mysim.rds')
# closeCluster(cl)
# mpi.quit()
## Similarly, run simulation on a network linked via ssh
## (two way ssh key-paired connection must be possible between master and slave nodes)
##
## define IP addresses, including primary IP
# primary <- '192.168.2.20'
# IPs <- list(
# list(host=primary, user='phil', ncore=8),
# list(host='192.168.2.17', user='phil', ncore=8)
# )
# spec <- lapply(IPs, function(IP)
# rep(list(list(host=IP$host, user=IP$user)), IP$ncore))
# spec <- unlist(spec, recursive=FALSE)
#
# cl <- parallel::makeCluster(type='PSOCK', master=primary, spec=spec)
# Final <- runSimulation(design=Design, replications=1000, parallel = TRUE, save=TRUE,
# generate=Generate, analyse=Analyse, summarise=Summarise, cl=cl)
#~~~~~~~~~~~~~~~~~~~~~~~~
###### Post-analysis: Analyze the results via functions like lm() or SimAnova(), and create
###### tables(dplyr) or plots (ggplot2) to help visualize the results.
###### This is where you get to be a data analyst!
library(dplyr)
Final2 <- tbl_df(Final)
Final2 \%>\% summarise(mean(welch), mean(independent))
Final2 \%>\% group_by(standard_deviation_ratio, group_size_ratio) \%>\%
summarise(mean(welch), mean(independent))
# quick ANOVA analysis method with all two-way interactions
SimAnova( ~ (sample_size + group_size_ratio + standard_deviation_ratio)^2, Final)
# or more specific anovas
SimAnova(independent ~ (group_size_ratio + standard_deviation_ratio)^2,
Final)
# make some plots
library(ggplot2)
library(reshape2)
welch_ind <- Final[,c('group_size_ratio', "standard_deviation_ratio",
"welch", "independent")]
dd <- melt(welch_ind, id.vars = names(welch_ind)[1:2])
ggplot(dd, aes(factor(group_size_ratio), value)) + geom_boxplot() +
geom_abline(intercept=0.05, slope=0, col = 'red') +
geom_abline(intercept=0.075, slope=0, col = 'red', linetype='dotted') +
geom_abline(intercept=0.025, slope=0, col = 'red', linetype='dotted') +
facet_wrap(~variable)
ggplot(dd, aes(factor(group_size_ratio), value, fill = factor(standard_deviation_ratio))) +
geom_boxplot() + geom_abline(intercept=0.05, slope=0, col = 'red') +
geom_abline(intercept=0.075, slope=0, col = 'red', linetype='dotted') +
geom_abline(intercept=0.025, slope=0, col = 'red', linetype='dotted') +
facet_grid(variable~standard_deviation_ratio) +
theme(legend.position = 'none')
}
}
\references{
Sigal, M. J., & Chalmers, R. P. (2016). Play it again: Teaching statistics with Monte
Carlo simulation. \code{Journal of Statistics Education, 24}(3), 136-156.
\url{http://www.tandfonline.com/doi/full/10.1080/10691898.2016.1246953}
}
\seealso{
\code{\link{Generate}}, \code{\link{Analyse}}, \code{\link{Summarise}},
\code{\link{SimFunctions}}, \code{\link{SimClean}}, \code{\link{SimAnova}}, \code{\link{SimResults}},
\code{\link{aggregate_simulations}}, \code{\link{Attach}}, \code{\link{SimShiny}}
}
| /man/runSimulation.Rd | no_license | pedroliman/SimDesign | R | false | true | 37,974 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runSimulation.R
\name{runSimulation}
\alias{runSimulation}
\alias{print.SimDesign}
\alias{head.SimDesign}
\alias{tail.SimDesign}
\alias{summary.SimDesign}
\alias{as.data.frame.SimDesign}
\title{Run a Monte Carlo simulation given a data.frame of conditions and simulation functions}
\usage{
runSimulation(design, replications, generate, analyse, summarise,
fixed_objects = NULL, packages = NULL, filename = "SimDesign-results",
warnings_as_errors = FALSE, save = FALSE, save_results = FALSE,
save_seeds = FALSE, load_seed = NULL, seed = NULL, parallel = FALSE,
ncores = parallel::detectCores(), cl = NULL, MPI = FALSE,
max_errors = 50, as.factor = TRUE, save_generate_data = FALSE,
save_details = list(), edit = "none", progress = FALSE,
verbose = TRUE)
\method{print}{SimDesign}(x, drop.extras = FALSE, drop.design = FALSE,
format.time = TRUE, ...)
\method{head}{SimDesign}(x, ...)
\method{tail}{SimDesign}(x, ...)
\method{summary}{SimDesign}(object, ...)
\method{as.data.frame}{SimDesign}(x, ...)
}
\arguments{
\item{design}{a \code{data.frame} object containing the Monte Carlo simulation conditions to
be studied, where each row represents a unique condition and each column a factor to be varied}
\item{replications}{number of replication to perform per condition (i.e., each row in \code{design}).
Must be greater than 0}
\item{generate}{user-defined data and parameter generating function.
See \code{\link{Generate}} for details}
\item{analyse}{user-defined computation function which acts on the data generated from
\code{\link{Generate}}. See \code{\link{Analyse}} for details}
\item{summarise}{optional (but recommended) user-defined summary function to be used
after all the replications have completed within each \code{design} condition. Omitting this function
will return a list of matrices (or a single matrix, if only one row in \code{design} is supplied)
or, for more general objects (such as lists), a list containing the results returned form \code{\link{Analyse}}.
Ommiting this function is only recommended for didactic purposes because it leaves out a large amount of
information (e.g., try-errors, warning messages, etc), can witness memory related issues,
and generally is not as flexible internally. See
the \code{save_results} option for a better alternative to storing the Generate-Analyse results}
\item{fixed_objects}{(optional) an object (usually a named \code{list})
containing additional user-defined objects
that should remain fixed across conditions. This is useful when including
long fixed vectors/matrices of population parameters, data
that should be used across all conditions and replications (e.g., including a fixed design matrix
for linear regression), or simply control constant global elements such as sample size}
\item{packages}{a character vector of external packages to be used during the simulation (e.g.,
\code{c('MASS', 'mvtnorm', 'simsem')} ). Use this input when \code{parallel = TRUE} or
\code{MPI = TRUE} to use non-standard functions from additional packages,
otherwise the functions must be made available by using explicit
\code{\link{library}} or \code{\link{require}} calls within the provided simulation functions.
Alternatively, functions can be called explicitly without attaching the package with the \code{::} operator
(e.g., \code{mvtnorm::rmvnorm()})}
\item{filename}{(optional) the name of the \code{.rds} file to save the final simulation results to
when \code{save = TRUE}. If the same file name already exists in the working
directly at the time of saving then a new
file will be generated instead and a warning will be thrown. This helps to avoid accidentally overwriting
existing files. Default is \code{'SimDesign-results'}}
\item{warnings_as_errors}{logical; treat warning messages as errors during the simulation? Default is FALSE,
therefore warnings are only collected and not used to restart the data generation step}
\item{save}{logical; save the simulation state and final results to the hard-drive? This is useful
for simulations which require an extended amount of time. When \code{TRUE}, a temp file
will be created in the working directory which allows the simulation state to be saved
and recovered (in case of power outages, crashes, etc). To recover your simulation at the last known
location simply re-run the code you used to initially define the simulation and the external file
will automatically be detected and read-in. Upon completion, the final results will
be saved to the working directory, and the temp file will be removed. Default is \code{FALSE}}
\item{save_results}{logical; save the results returned from \code{\link{Analyse}} to external
\code{.rds} files located in the defined \code{save_results_dirname} directory/folder?
Use this if you would like to keep track of the individual parameters returned from the analyses.
Each saved object will contain a list of three elements containing the condition (row from \code{design}),
results (as a \code{list} or \code{matrix}), and try-errors. When \code{TRUE}, a temp file will be used to track the simulation
state (in case of power outages, crashes, etc). When \code{TRUE}, temporary files will also be saved
to the working directory (in the same way as when \code{save = TRUE}).
See \code{\link{SimResults}} for an example of how to read these \code{.rds} files back into R
after the simulation is complete. Default is \code{FALSE}.
WARNING: saving results to your hard-drive can fill up space very quickly for larger simulations. Be sure to
test this option using a smaller number of replications before the full Monte Carlo simulation is performed.}
\item{save_seeds}{logical; save the \code{.Random.seed} states prior to performing each replication into
plain text files located in the defined \code{save_seeds_dirname} directory/folder?
Use this if you would like to keep track of the simulation state within each replication and design
condition. Primarily, this is useful for completely replicating any cell in the simulation if need be,
especially when tracking down hard-to-find errors and bugs. As well, see the \code{load_seed} input
to load a given \code{.Random.seed} to exactly replicate the generated data and analysis state (handy
for debugging). When \code{TRUE}, temporary files will also be saved
to the working directory (in the same way as when \code{save = TRUE}).
Default is \code{FALSE}}
\item{load_seed}{a character object indicating which file to load from when the \code{.Random.seed}s have
be saved (after a call with \code{save_seeds = TRUE}). E.g., \code{load_seed = 'design-row-2/seed-1'}
will load the first seed in the second row of the \code{design} input. Note that it is important NOT
to modify the \code{design} input object, otherwise the path may not point to the correct saved location.
Default is \code{NULL}}
\item{seed}{a vector of integers to be used for reproducibility.
The length of the vector must be equal the number of rows in \code{design}.
This argument calls \code{\link{set.seed}} or
\code{\link{clusterSetRNGStream}} for each condition, respectively,
but will not be run when \code{MPI = TRUE}.
Default is \code{NULL}, indicating that no seed is set for each condition}
\item{parallel}{logical; use parallel processing from the \code{parallel} package over each
unique condition?}
\item{ncores}{number of cores to be used in parallel execution. Default uses all available}
\item{cl}{cluster object defined by \code{\link{makeCluster}} used to run code in parallel.
If \code{NULL} and \code{parallel = TRUE}, a local cluster object will be defined which
selects the maximum number cores available
and will be stop the cluster when the simulation is complete. Note that supplying a \code{cl}
object will automatically set the \code{parallel} argument to \code{TRUE}}
\item{MPI}{logical; use the \code{foreach} package in a form usable by MPI to run simulation
in parallel on a cluster? Default is \code{FALSE}}
\item{max_errors}{the simulation will terminate when more than this number of consecutive errors are thrown in any
given condition. The purpose of this is to indicate that something fatally problematic is likely going
wrong in the generate-analyse phases and should be inspected. Default is 50}
\item{as.factor}{logical; coerce the input \code{design} elements into \code{factor}s when the
simulation is complete? If the columns inputs are numeric then these will be treated
as \code{ordered}. Default is \code{TRUE}}
\item{save_generate_data}{logical; save the data returned from \code{\link{Generate}} to external \code{.rds} files
located in the defined \code{save_generate_data_dirname} directory/folder?
When \code{TRUE}, temporary files will also be saved to the working directory
(in the same way as when \code{save = TRUE}). Default is \code{FALSE}
WARNING: saving data to your hard-drive can fill up space very quickly for larger simulations. Be sure to
test this option using a smaller number of replications before the full Monte Carlo simulation is performed.
It is generally recommended to leave this argument as \code{FALSE} because saving datasets will often consume
a needless amount of disk space, and by-and-large saving data is not required for simulations.}
\item{save_details}{a list pertaining to information regarding how and where files should be saved
when the \code{save}, \code{save_results}, or \code{save_generate_data} flags are triggered.
\describe{
\item{\code{safe}}{logical; trigger whether safe-saving should be performed. When \code{TRUE} files
will never be overwritten accidentally, and where appropriate the program will either stop or generate
new files with unique names. Default is \code{TRUE}}
\item{\code{compname}}{name of the computer running the simulation. Normally this doesn't need
to be modified, but in the event that a manual node breaks down while running a simulation the
results from the temp files may be resumed on another computer by changing the name of the
node to match the broken computer. Default is the result of evaluating \code{unname(Sys.info()['nodename'])}}
\item{\code{tmpfilename}}{the name of the temporary \code{.rds} file when any of the \code{save} flags are used.
This file will be read-in if it is in the working directory and the simulation will continue
at the last point this file was saved
(useful in case of power outages or broken nodes). Finally, this file will be deleted when the
simulation is complete. Default is the system name (\code{compname}) appended
to \code{'SIMDESIGN-TEMPFILE_'}}
\item{\code{save_results_dirname}}{a string indicating the name of the folder to save
result objects to when \code{save_results = TRUE}. If a directory/folder does not exist
in the current working directory then a unique one will be created automatically. Default is
\code{'SimDesign-results_'} with the associated \code{compname} appended}
\item{\code{save_seeds_dirname}}{a string indicating the name of the folder to save
\code{.Random.seed} objects to when \code{save_seeds = TRUE}. If a directory/folder does not exist
in the current working directory then one will be created automatically. Default is
\code{'SimDesign-seeds_'} with the associated \code{compname} appended}
\item{\code{save_generate_data_dirname}}{a string indicating the name of the folder to save
data objects to when \code{save_generate_data = TRUE}. If a directory/folder does not exist
in the current working directory then one will be created automatically.
Within this folder nested directories will be created associated with each row in \code{design}.
Default is \code{'SimDesign-generate-data_'} with the \code{compname} appended}
}}
\item{edit}{a string indicating where to initiate a \code{browser()} call for editing and debugging.
General options are \code{'none'} (default) and \code{'all'}, which are used
to disable debugging and to debug all the user defined functions, respectively.
Specific options include: \code{'generate'}
to edit the data simulation function, \code{'analyse'} to edit the computational function, and
\code{'summarise'} to edit the aggregation function.
Alternatively, users may place \code{\link{browser}} calls within the respective functions for
debugging at specific lines (note: parallel computation flags will automatically be disabled
when a \code{browser()} is detected)}
\item{progress}{logical; display a progress bar for each simulation condition?
This is useful when simulations conditions take a long time to run.
Uses the \code{pbapply} package to display the progress. Default is \code{FALSE}}
\item{verbose}{logical; print messages to the R console? Default is \code{TRUE}}
\item{x}{SimDesign object returned from \code{\link{runSimulation}}}
\item{drop.extras}{logical; don't print information about warnings, errors, simulation time, and replications?
Default is \code{FALSE}}
\item{drop.design}{logical; don't include information about the (potentially factorized) simulation design?
This may be useful if you wish to \code{cbind()} the original design \code{data.frame} to the simulation
results instead of using the auto-factorized version. Default is \code{FALSE}}
\item{format.time}{logical; format \code{SIM_TIME} into a day/hour/min/sec character vector? Default is
\code{TRUE}}
\item{...}{additional arguments}
\item{object}{SimDesign object returned from \code{\link{runSimulation}}}
}
\value{
a \code{data.frame} (also of class \code{'SimDesign'})
with the original \code{design} conditions in the left-most columns,
simulation results and ERROR/WARNING's (if applicable) in the middle columns,
and additional information (such as REPLICATIONS, SIM_TIME, COMPLETED, and SEED) in the right-most
columns.
}
\description{
This function runs a Monte Carlo simulation study given a set of predefined simulation functions,
design conditions, and number of replications. Results can be saved as temporary files in case of interruptions
and may be restored by re-running \code{runSimulation}, provided that the respective temp
file can be found in the working directory. \code{runSimulation} supports parallel
and cluster computing, global and local debugging, error handling (including fail-safe
stopping when functions fail too often, even across nodes), and tracking of error and warning messages.
For convenience, all functions available in the R workspace are exported across all computational nodes
so that they are more easily accessible (however, other R objects are not, and therefore
must be passed to the \code{fixed_objects} input to become available across nodes).
For a didactic presentation of the package refer to Sigal and Chalmers
(2016; \url{http://www.tandfonline.com/doi/full/10.1080/10691898.2016.1246953}), and see the associated
wiki on Github (\url{https://github.com/philchalmers/SimDesign/wiki})
for other tutorial material, examples, and applications of \code{SimDesign} to real-world simulations.
}
\details{
The strategy for organizing the Monte Carlo simulation work-flow is to
\describe{
\item{1)}{Define a suitable \code{design} data.frame object containing fixed conditional
information about the Monte Carlo simulations. This is often expedited by using the
\code{\link{expand.grid}} function, and if necessary using the \code{\link{subset}}
function to remove redundant or non-applicable rows}
\item{2)}{Define the three step functions to generate the data (\code{\link{Generate}}; see also
\url{https://CRAN.R-project.org/view=Distributions} for a list of distributions in R),
analyse the generated data by computing the respective parameter estimates, detection rates,
etc (\code{\link{Analyse}}), and finally summarise the results across the total
number of replications (\code{\link{Summarise}}). Note that these functions can be
automatically generated by using the \code{\link{SimFunctions}} function.
}
\item{3)}{Pass the above objects to the \code{runSimulation} function, and declare the
number of replications to perform with the \code{replications} input. This function will accept
a \code{design} data.frame object and will return a suitable data.frame object with the
simulation results}
\item{4)}{Analyze the output from \code{runSimulation}, possibly using ANOVA techniques
(\code{\link{SimAnova}}) and generating suitable plots and tables}
}
For a skeleton version of the work-flow, which is often useful when initially defining a simulation,
see \code{\link{SimFunctions}}. This function will write template simulation code
to one/two files so that modifying the required functions and objects can begin immediately
with minimal error. This means that you can focus on your Monte Carlo simulation immediately rather
than worrying about the administrative code-work required to organize the simulation work-flow.
Additional information for each condition are also contained in the \code{data.frame} object returned by
\code{runSimulation}: \code{REPLICATIONS} to indicate the number of Monte Carlo replications,
\code{SIM_TIME} to indicate how long (in seconds) it took to complete
all the Monte Carlo replications for each respective design condition,
\code{COMPLETED} to indicate the date in which the given simulation condition completed,
\code{SEED} if the \code{seed} argument
was used, columns containing the number of replications which had to be re-run due to errors (where the error messages
represent the names of the columns prefixed with a \code{ERROR:} string), and
columns containing the number of warnings prefixed with a \code{WARNING:} string.
Additional examples, presentation files, and tutorials can be found on the package wiki located at
\url{https://github.com/philchalmers/SimDesign/wiki}.
}
\section{Saving data, results, seeds, and the simulation state}{
To conserve RAM, temporary objects (such as data generated across conditions and replications)
are discarded; however, these can be saved to the hard-disk by passing the appropriate flags.
For longer simulations it is recommended to use \code{save = TRUE} to temporarily save the
simulation state, and to use the \code{save_results} flag to write the analysis results
the to hard-disc.
The generated data can be saved by passing
\code{save_generate_data = TRUE}, however it is often more memory efficient to use the
\code{save_seeds} option instead to only save R's \code{.Random.seed} state instead (still
allowing for complete reproducibility); individual \code{.Random.seed} terms may also be read in with the
\code{load_seed} input to reproduce the exact simulation state at any given replication. Finally,
providing a vector of \code{seeds} is also possible to ensure
that each simulation condition is completely reproducible under the single/multi-core method selected.
Finally, when the Monte Carlo simulation is complete
it is recommended to write the results to a hard-drive for safe keeping, particularly with the
\code{save} and \code{filename} arguments provided (for reasons that are more obvious in the parallel computation
descriptions below). Using the \code{filename} argument (along with \code{save = TRUE})
supplied is much safer than using something
like \code{\link{saveRDS}} directly because files will never accidentally be overwritten,
and instead a new file name will be created when a conflict arises; this type of safety
is prevalent in many aspects of the package and helps to avoid many unrecoverable (yet surprisingly common)
mistakes.
}
\section{Resuming temporary results}{
In the event of a computer crash, power outage, etc, if \code{save = TRUE} was used
then the original code used to execute \code{runSimulation()} need only be re-run to resume the simulation.
The saved temp file will be read into the function automatically, and the simulation will continue
one the condition where it left off before the simulation state was terminated.
}
\section{A note on parallel computing}{
When running simulations in parallel (either with \code{parallel = TRUE} or \code{MPI = TRUE})
R objects defined in the global environment will generally \emph{not} be visible across nodes.
Hence, you may see errors such as \code{Error: object 'something' not found} if you try to use an object
that is defined in the workspace but is not passed to \code{runSimulation}.
To avoid this type or error, simply pass additional objects to the
\code{fixed_objects} input (usually it's convenient to supply a named list of these objects).
Fortunately, however, \emph{custom functions defined in the global environment are exported across
nodes automatically}. This makes it convenient when writing code because custom functions will
always be available across nodes if they are visible in the R workspace. As well, note the
\code{packages} input to declare packages which must be loaded via \code{library()} in order to make
specific non-standard R functions available across nodes.
}
\section{Cluster computing}{
SimDesign code may be released to a computing system which supports parallel cluster computations using
the industry standard Message Passing Interface (MPI) form. This simply
requires that the computers be setup using the usual MPI requirements (typically, running some flavor
of Linux, have password-less open-SSH access, IP addresses have been added to the \code{/etc/hosts} file
or \code{~/.ssh/config}, etc).
More generally though, these resources are widely available through professional
organizations dedicated to super-computing.
To setup the R code for an MPI cluster one need only add the argument \code{MPI = TRUE},
wrap the appropriate MPI directives around \code{runSimulation}, and submit the
files using the suitable BASH commands to execute the \code{mpirun} tool. For example,
\describe{
\item{\code{library(doMPI)}}{}
\item{\code{cl <- startMPIcluster()}}{}
\item{\code{registerDoMPI(cl)}}{}
\item{\code{runSimulation(design=Design, replications=1000, save=TRUE, filename='mysimulation',
generate=Generate, analyse=Analyse, summarise=Summarise, MPI=TRUE)}}{}
\item{\code{closeCluster(cl)}}{}
\item{\code{mpi.quit()}}{}
}
The necessary \code{SimDesign} files must be uploaded to the dedicated master node
so that a BASH call to \code{mpirun} can be used to distribute the work across slaves.
For instance, if the following BASH command is run on the master node then 16 processes
will be summoned (1 master, 15 slaves) across the computers named \code{localhost}, \code{slave1},
and \code{slave2} in the ssh \code{config} file.
\code{mpirun -np 16 -H localhost,slave1,slave2 R --slave -f simulation.R}
}
\section{Network computing}{
If you access have to a set of computers which can be linked via secure-shell (ssh) on the same LAN network then
Network computing (a.k.a., a Beowulf cluster) may be a viable and useful option.
This approach is similar to MPI computing approach
except that it offers more localized control and requires more hands-on administrative access to the master
and slave nodes. The setup generally requires that the master node
has \code{SimDesign} installed and the slave/master nodes have all the required R packages pre-installed
(Unix utilities such as \code{dsh} are very useful for this purpose). Finally,
the master node must have ssh access to the slave nodes, each slave node must have ssh access
with the master node, and a cluster object (\code{cl}) from the \code{parallel} package must be defined on the
master node.
Setup for network computing is generally more straightforward and controlled
than the setup for MPI jobs in that it only requires the specification of a) the respective
IP addresses within a defined R script, and b) the user name
(if different from the master node's user name. Otherwise, only a) is required).
However, on Linux I have found it is also important to include relevant information about the host names
and IP addresses in the \code{/etc/hosts} file on the master and slave nodes, and to ensure that
the selected port (passed to \code{\link{makeCluster}}) on the master node is not hindered by a firewall.
As an example, using the following code the master node (primary) will spawn 7 slaves and 1 master,
while a separate computer on the network with the associated IP address will spawn an additional 6 slaves.
Information will be collected on the master node, which is also where the files
and objects will be saved using the \code{save} inputs (if requested).
\describe{
\item{\code{library(parallel)}}{}
\item{\code{primary <- '192.168.2.1'}}{}
\item{\code{IPs <- list(list(host=primary, user='myname', ncore=8), list(host='192.168.2.2', user='myname', ncore=6))}}{}
\item{\code{spec <- lapply(IPs, function(IP) rep(list(list(host=IP$host, user=IP$user)), IP$ncore))}}{}
\item{\code{spec <- unlist(spec, recursive=FALSE)}}{}
\item{\code{cl <- makeCluster(master=primary, spec=spec)}}{}
\item{\code{Final <- runSimulation(..., cl=cl)}}{}
\item{\code{stopCluster(cl)}}{}
}
The object \code{cl} is passed to \code{runSimulation} on the master node
and the computations are distributed across the respective
IP addresses. Finally, it's usually good practice to use \code{stopCluster(cl)}
when all the simulations are said and done to release the communication between the computers,
which is what the above code shows.
Alternatively, if you have provided suitable names for each respective slave node, as well as the master,
then you can define the \code{cl} object using these instead (rather than supplying the IP addresses in
your R script). This requires that the master node has itself and all the slave nodes defined in the
\code{/etc/hosts} and \code{~/.ssh/config} files, while the slave nodes require themselves and the
master node in the same files (only 2 IP addresses required on each slave).
Following this setup, and assuming the user name is the same across all nodes,
the \code{cl} object could instead be defined with
\describe{
\item{\code{library(parallel)}}{}
\item{\code{primary <- 'master'}}{}
\item{\code{IPs <- list(list(host=primary, ncore=8), list(host='slave', ncore=6))}}{}
\item{\code{spec <- lapply(IPs, function(IP) rep(list(list(host=IP$host)), IP$ncore))}}{}
\item{\code{spec <- unlist(spec, recursive=FALSE)}}{}
\item{\code{cl <- makeCluster(master=primary, spec=spec)}}{}
\item{\code{Final <- runSimulation(..., cl=cl)}}{}
\item{\code{stopCluster(cl)}}{}
}
Or, even more succinctly if all communication elements required are identical to the master node,
\describe{
\item{\code{library(parallel)}}{}
\item{\code{primary <- 'master'}}{}
\item{\code{spec <- c(rep(primary, 8), rep('slave', 6))}}{}
\item{\code{cl <- makeCluster(master=primary, spec=spec)}}{}
\item{\code{Final <- runSimulation(..., cl=cl)}}{}
\item{\code{stopCluster(cl)}}{}
}
}
\section{Poor man's cluster computing for independent nodes}{
In the event that you do not have access to a Beowulf-type cluster (described in the section on
"Network Computing") but have multiple personal
computers then the simulation code can be manually distributed across each independent computer instead.
This simply requires passing a smaller value to the \code{replications} argument on each computer and later
aggregating the results using the \code{\link{aggregate_simulations}} function.
For instance, if you have two computers available on different networks and wanted a total of 500 replications you
could pass \code{replications = 300} to one computer and \code{replications = 200} to the other along
with a \code{filename} argument (or simply saving the final objects as \code{.rds} files manually after
\code{runSimulation()} has finished). This will create two distinct \code{.rds} files which can be
combined later with the \code{\link{aggregate_simulations}} function. The benefit of this approach over
MPI or setting up a Beowulf cluster is that computers need not be linked on the same network,
and, should the need arise, the temporary
simulation results can be migrated to another computer in case of a complete hardware failure by moving the
saved temp files to another node, modifying
the suitable \code{compname} input to \code{save_details} (or, if the \code{filename} and \code{tmpfilename}
were modified, matching those files accordingly), and resuming the simulation as normal.
Note that this is also a useful tactic if the MPI or Network computing options require you to
submit smaller jobs due to time and resource constraint-related reasons,
where fewer replications/nodes should be requested. After all the jobs are completed and saved to their
respective files, \code{\link{aggregate_simulations}}
can then collapse the files as if the simulations were run all at once. Hence, SimDesign makes submitting
smaller jobs to super-computing resources considerably less error prone than managing a number of smaller
jobs manually .
}
\examples{
#-------------------------------------------------------------------------------
# Example 1: Sampling distribution of mean
# This example demonstrate some of the simpler uses of SimDesign,
# particularly for classroom settings. The only factor varied in this simulation
# is sample size.
# skeleton functions to be saved and edited
SimFunctions()
#### Step 1 --- Define your conditions under study and create design data.frame
Design <- data.frame(N = c(10, 20, 30))
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 2 --- Define generate, analyse, and summarise functions
# help(Generate)
Generate <- function(condition, fixed_objects = NULL){
dat <- with(condition, rnorm(N, 10, 5)) # distributed N(10, 5)
dat
}
# help(Analyse)
Analyse <- function(condition, dat, fixed_objects = NULL){
ret <- mean(dat) # mean of the sample data vector
ret
}
# help(Summarise)
Summarise <- function(condition, results, fixed_objects = NULL){
ret <- c(mu=mean(results), SE=sd(results)) # mean and SD summary of the sample means
ret
}
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 3 --- Collect results by looping over the rows in design
# run the simulation
Final <- runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise)
Final
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Extras
# compare SEs estimates to the true SEs from the formula sigma/sqrt(N)
5 / sqrt(Design$N)
# To store the results from the analyse function either
# a) omit a definition of of summarise(), or
# b) pass save_results = TRUE to runSimulation() and read the results in with SimResults()
# e.g., the a) approach
results <- runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse)
str(results)
head(results[[1]])
# or b) approach
Final <- runSimulation(design=Design, replications=1000, save_results=TRUE,
generate=Generate, analyse=Analyse, summarise=Summarise)
results <- SimResults(Final)
str(results)
head(results[[1]]$results)
# remove the saved results from the hard-drive if you no longer want them
SimClean(results = TRUE)
#-------------------------------------------------------------------------------
# Example 2: t-test and Welch test when varying sample size, group sizes, and SDs
# skeleton functions to be saved and edited
SimFunctions()
\dontrun{
# in real-world simulations it's often better/easier to save
# these functions directly to your hard-drive with
SimFunctions('my-simulation')
}
#### Step 1 --- Define your conditions under study and create design data.frame
Design <- expand.grid(sample_size = c(30, 60, 90, 120),
group_size_ratio = c(1, 4, 8),
standard_deviation_ratio = c(.5, 1, 2))
dim(Design)
head(Design)
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 2 --- Define generate, analyse, and summarise functions
Generate <- function(condition, fixed_objects = NULL){
N <- condition$sample_size # alternatively, could use Attach() to make objects available
grs <- condition$group_size_ratio
sd <- condition$standard_deviation_ratio
if(grs < 1){
N2 <- N / (1/grs + 1)
N1 <- N - N2
} else {
N1 <- N / (grs + 1)
N2 <- N - N1
}
group1 <- rnorm(N1)
group2 <- rnorm(N2, sd=sd)
dat <- data.frame(group = c(rep('g1', N1), rep('g2', N2)), DV = c(group1, group2))
dat
}
Analyse <- function(condition, dat, fixed_objects = NULL){
welch <- t.test(DV ~ group, dat)
ind <- t.test(DV ~ group, dat, var.equal=TRUE)
# In this function the p values for the t-tests are returned,
# and make sure to name each element, for future reference
ret <- c(welch = welch$p.value, independent = ind$p.value)
ret
}
Summarise <- function(condition, results, fixed_objects = NULL){
#find results of interest here (e.g., alpha < .1, .05, .01)
ret <- EDR(results, alpha = .05)
ret
}
#~~~~~~~~~~~~~~~~~~~~~~~~
#### Step 3 --- Collect results by looping over the rows in design
# first, test to see if it works
Final <- runSimulation(design=Design, replications=5,
generate=Generate, analyse=Analyse, summarise=Summarise)
head(Final)
\dontrun{
# complete run with 1000 replications per condition
Final <- runSimulation(design=Design, replications=1000, parallel=TRUE,
generate=Generate, analyse=Analyse, summarise=Summarise)
head(Final, digits = 3)
View(Final)
## save final results to a file upon completion (not run)
runSimulation(design=Design, replications=1000, parallel=TRUE, save=TRUE, filename = 'mysim',
generate=Generate, analyse=Analyse, summarise=Summarise)
## Debug the generate function. See ?browser for help on debugging
## Type help to see available commands (e.g., n, c, where, ...),
## ls() to see what has been defined, and type Q to quit the debugger
runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise,
parallel=TRUE, edit='generate')
## Alternatively, place a browser() within the desired function line to
## jump to a specific location
Summarise <- function(condition, results, fixed_objects = NULL){
#find results of interest here (e.g., alpha < .1, .05, .01)
ret <- EDR(results[,nms], alpha = .05)
browser()
ret
}
runSimulation(design=Design, replications=1000,
generate=Generate, analyse=Analyse, summarise=Summarise,
parallel=TRUE)
## EXTRA: To run the simulation on a MPI cluster, use the following setup on each node (not run)
# library(doMPI)
# cl <- startMPIcluster()
# registerDoMPI(cl)
# Final <- runSimulation(design=Design, replications=1000, MPI=TRUE, save=TRUE,
# generate=Generate, analyse=Analyse, summarise=Summarise)
# saveRDS(Final, 'mysim.rds')
# closeCluster(cl)
# mpi.quit()
## Similarly, run simulation on a network linked via ssh
## (two way ssh key-paired connection must be possible between master and slave nodes)
##
## define IP addresses, including primary IP
# primary <- '192.168.2.20'
# IPs <- list(
# list(host=primary, user='phil', ncore=8),
# list(host='192.168.2.17', user='phil', ncore=8)
# )
# spec <- lapply(IPs, function(IP)
# rep(list(list(host=IP$host, user=IP$user)), IP$ncore))
# spec <- unlist(spec, recursive=FALSE)
#
# cl <- parallel::makeCluster(type='PSOCK', master=primary, spec=spec)
# Final <- runSimulation(design=Design, replications=1000, parallel = TRUE, save=TRUE,
# generate=Generate, analyse=Analyse, summarise=Summarise, cl=cl)
#~~~~~~~~~~~~~~~~~~~~~~~~
###### Post-analysis: Analyze the results via functions like lm() or SimAnova(), and create
###### tables(dplyr) or plots (ggplot2) to help visualize the results.
###### This is where you get to be a data analyst!
library(dplyr)
Final2 <- tbl_df(Final)
Final2 \%>\% summarise(mean(welch), mean(independent))
Final2 \%>\% group_by(standard_deviation_ratio, group_size_ratio) \%>\%
summarise(mean(welch), mean(independent))
# quick ANOVA analysis method with all two-way interactions
SimAnova( ~ (sample_size + group_size_ratio + standard_deviation_ratio)^2, Final)
# or more specific anovas
SimAnova(independent ~ (group_size_ratio + standard_deviation_ratio)^2,
Final)
# make some plots
library(ggplot2)
library(reshape2)
welch_ind <- Final[,c('group_size_ratio', "standard_deviation_ratio",
"welch", "independent")]
dd <- melt(welch_ind, id.vars = names(welch_ind)[1:2])
ggplot(dd, aes(factor(group_size_ratio), value)) + geom_boxplot() +
geom_abline(intercept=0.05, slope=0, col = 'red') +
geom_abline(intercept=0.075, slope=0, col = 'red', linetype='dotted') +
geom_abline(intercept=0.025, slope=0, col = 'red', linetype='dotted') +
facet_wrap(~variable)
ggplot(dd, aes(factor(group_size_ratio), value, fill = factor(standard_deviation_ratio))) +
geom_boxplot() + geom_abline(intercept=0.05, slope=0, col = 'red') +
geom_abline(intercept=0.075, slope=0, col = 'red', linetype='dotted') +
geom_abline(intercept=0.025, slope=0, col = 'red', linetype='dotted') +
facet_grid(variable~standard_deviation_ratio) +
theme(legend.position = 'none')
}
}
\references{
Sigal, M. J., & Chalmers, R. P. (2016). Play it again: Teaching statistics with Monte
Carlo simulation. \code{Journal of Statistics Education, 24}(3), 136-156.
\url{http://www.tandfonline.com/doi/full/10.1080/10691898.2016.1246953}
}
\seealso{
\code{\link{Generate}}, \code{\link{Analyse}}, \code{\link{Summarise}},
\code{\link{SimFunctions}}, \code{\link{SimClean}}, \code{\link{SimAnova}}, \code{\link{SimResults}},
\code{\link{aggregate_simulations}}, \code{\link{Attach}}, \code{\link{SimShiny}}
}
|
# word one hot encode example
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
token_index <- list()
for (sample in samples)
for (word in strsplit(sample, " ")[[1]])
if (!word %in% names(token_index))
token_index[[word]] <- length(token_index) + 2
max_length <- 10
results <- array(0, dim = c(length(samples),
max_length,
max(as.integer(token_index))))
for (i in 1:length(samples)) {
sample <- samples[[i]]
words <- head(strsplit(sample, " ")[[1]], n = max_length)
for (j in 1:length(words)) {
index <- token_index[[words[[j]]]]
results[[i, j, index]] <- 1
}
}
# Character one hot encode example
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
ascii_tokens <- c("", sapply(as.raw(c(32:126)), rawToChar))
token_index <- c(1:(length(ascii_tokens)))
names(token_index) <- ascii_tokens
max_length <- 50
results <- array(0, dim = c(length(samples), max_length, length(token_index)))
for (i in 1:length(samples)) {
sample <- samples[[i]]
characters <- strsplit(sample, "")[[1]]
for (j in 1:length(characters)) {
character <- characters[[j]]
results[i, j, token_index[[character]]] <- 1
}
}
library(keras)
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
tokenizer <- text_tokenizer(num_words = 1000) %>%
fit_text_tokenizer(samples)
sequences <- texts_to_sequences(tokenizer, samples)
one_hot_results <- texts_to_matrix(tokenizer, samples, mode = "binary")
word_index <- tokenizer$word_index
cat("Found", length(word_index), "unique tokens.\n")
| /practice/Keras_example/Keras_onehot_word.R | no_license | shaomin4/shaomin_research | R | false | false | 1,601 | r | # word one hot encode example
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
token_index <- list()
for (sample in samples)
for (word in strsplit(sample, " ")[[1]])
if (!word %in% names(token_index))
token_index[[word]] <- length(token_index) + 2
max_length <- 10
results <- array(0, dim = c(length(samples),
max_length,
max(as.integer(token_index))))
for (i in 1:length(samples)) {
sample <- samples[[i]]
words <- head(strsplit(sample, " ")[[1]], n = max_length)
for (j in 1:length(words)) {
index <- token_index[[words[[j]]]]
results[[i, j, index]] <- 1
}
}
# Character one hot encode example
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
ascii_tokens <- c("", sapply(as.raw(c(32:126)), rawToChar))
token_index <- c(1:(length(ascii_tokens)))
names(token_index) <- ascii_tokens
max_length <- 50
results <- array(0, dim = c(length(samples), max_length, length(token_index)))
for (i in 1:length(samples)) {
sample <- samples[[i]]
characters <- strsplit(sample, "")[[1]]
for (j in 1:length(characters)) {
character <- characters[[j]]
results[i, j, token_index[[character]]] <- 1
}
}
library(keras)
samples <- c("The cat sat on the mat.", "The dog ate my homework.")
tokenizer <- text_tokenizer(num_words = 1000) %>%
fit_text_tokenizer(samples)
sequences <- texts_to_sequences(tokenizer, samples)
one_hot_results <- texts_to_matrix(tokenizer, samples, mode = "binary")
word_index <- tokenizer$word_index
cat("Found", length(word_index), "unique tokens.\n")
|
rad.regex = function(word = "") {
.m = "^\\w((?![Nn]o)[\\w\\s])*"
#regex(paste0(.m, word), ignore_case=T)
.e = "[\\w\\s]*[\\.]"
word = tolower(word)
.w0 = substr(word, 1, 1)
.w1 = paste0("[",toupper(.w0), .w0, "]")
.w2 = substr(word, 2, nchar(word))
.w = paste0(.w1, .w2)
paste0(.m, .w)
}
# x[year == 2014][
# str_detect(report, "[Pp]neumothorax")][
# !str_detect(report, "[Nn]o\\s[\\w\\s]*[Pp]neumothorax")][
# ,str_extract(.{0,40}neumothorax.{0,40})]
# i="pneumothorax";
# e=paste0("\\s([Nn]o|or|without)\\s[\\w\\s,]*",i);
# x[str_detect(report, i)][!str_detect(report, e)]
# [1:40,str_extract(report, paste0(".{40}",a,".{40}"))]
str_extract(report, "(\\w*\\s*){5}([Tt]racheal*\\s*deviat|[Dd]eviat[\\w\\s]*trachea)(\\w*\\s*){5}")
x[XTYPE == "CXR"][str_detect(report, "([Tt]racheal*\\s*deviat|[Dd]eviat[\\w\\s]*trachea)")][,m := str_extract(report, ".{25}([Tt]racheal*\\s*deviat|[Dd]eviat[\\w\\s]*trachea).{10}")]
# ^(?<=[A-Za-z0-9])([A-Za-z0-9\s]*)(?=[A-Za-z0-9])$
| /FEMH/regex.R | no_license | gnayyc/cyy.utils | R | false | false | 1,017 | r | rad.regex = function(word = "") {
.m = "^\\w((?![Nn]o)[\\w\\s])*"
#regex(paste0(.m, word), ignore_case=T)
.e = "[\\w\\s]*[\\.]"
word = tolower(word)
.w0 = substr(word, 1, 1)
.w1 = paste0("[",toupper(.w0), .w0, "]")
.w2 = substr(word, 2, nchar(word))
.w = paste0(.w1, .w2)
paste0(.m, .w)
}
# x[year == 2014][
# str_detect(report, "[Pp]neumothorax")][
# !str_detect(report, "[Nn]o\\s[\\w\\s]*[Pp]neumothorax")][
# ,str_extract(.{0,40}neumothorax.{0,40})]
# i="pneumothorax";
# e=paste0("\\s([Nn]o|or|without)\\s[\\w\\s,]*",i);
# x[str_detect(report, i)][!str_detect(report, e)]
# [1:40,str_extract(report, paste0(".{40}",a,".{40}"))]
str_extract(report, "(\\w*\\s*){5}([Tt]racheal*\\s*deviat|[Dd]eviat[\\w\\s]*trachea)(\\w*\\s*){5}")
x[XTYPE == "CXR"][str_detect(report, "([Tt]racheal*\\s*deviat|[Dd]eviat[\\w\\s]*trachea)")][,m := str_extract(report, ".{25}([Tt]racheal*\\s*deviat|[Dd]eviat[\\w\\s]*trachea).{10}")]
# ^(?<=[A-Za-z0-9])([A-Za-z0-9\s]*)(?=[A-Za-z0-9])$
|
#' Create your conda virtual env with DALEX
#'
#' Python objects may be loaded into R. However, it requires versions of the Python and libraries to match between both machines.
#' This functions allow user to create conda virtual environment based on provided .yml file.
#'
#' @usage create_env(yml, condaenv)
#' @param yml a path to the .yml file. If OS is Windows conda has to be added to the PATH first
#' @param condaenv path to main conda folder. If OS is Unix You may want to specify it. When passed with windows, param will be omitted.
#'
#' @author Szymon Maksymiuk
#'
#' @return Name of created virtual env.
#'
#'
#' @examples
#' \dontrun{
#' create_env(system.file("extdata", "testing_environment.yml", package = "DALEXtra"))
#' }
#' @rdname create_env
#' @export
create_env <- function(yml, condaenv = NULL) {
if (.Platform$OS.type == "unix" & is.null(condaenv)) {
if (is_conda()) {
condaenv = paste(sub('[/][^/]+$', '', reticulate::conda_binary()[1]),
"/..",
sep = "")
message(paste(
"Path to conda not specified while on unix. Default used.",
condaenv
))
} else {
stop("Conda not found")
}
}
# Extract name of the environment that is stored in .yml header
con <- file(yml, "r")
first_line <- readLines(con, n = 1)
close(con)
# Name is stored in the pattern : "name: name_of_env" so we have to split the string
name <- strsplit(first_line, split = " ")[[1]][2]
# Check if specified env already exists
if (name %in% reticulate::conda_list()$name) {
message(sprintf("There already exists environment named the same as it is specified in .yml file - %s - loading", name))
return(name)
}
# Virtual env creation
# Windows and linux has different shells
if (.Platform$OS.type == "windows") {
message(
paste(
"Virtual environment \"" ,
name,
"\" is being created. It may take few minutes.",
sep = ""
)
)
tryCatch(
expr = {
mes <-
shell(paste("conda env create -f", yml, sep = " "), intern = TRUE)
},
warning = function(w) {
mes <-
shell(paste("conda env create -f", yml, sep = " "), intern = TRUE)
if (any(grepl("not recognized", mes))) {
cat(mes)
stop(
"conda is not recognised by your shell. Please set system variables for conda in order to use that function",
call. = FALSE
)
}
else if (any(grepl("ResolvePackageNotFound", mes))) {
cat(mes)
stop("Conda cannot find specified packages at channels you have provided.\n",
"Try to add more channels (conda repositories) to your .yml file.",
"Additionally, packages included in your .yml file may not be available for current Python version or OS. Try to remove exact versions specifications of particular libraries.\n",
"If nothing above works, try to use 'pip:' statement",
call. = FALSE)
}
else{
cat(mes)
stop(
"Unrecognized error occured when creating anaconda virtual env. Try to configure you environment manually using Anaconda prompt. For usefull commands see ?explain_scikitlearn",
call. = FALSE
)
}
}
)
}
if (.Platform$OS.type == "unix") {
message(
paste(
"Virtual environment \"" ,
name,
"\" is being created. It may take few minutes.",
sep = ""
)
)
tryCatch(
expr = {
mes <-
system(paste(condaenv, "/bin/conda ", "env create -f ", yml, sep = ""),
intern = TRUE)
},
# Unix erros not partitioned since it is impossbile to capture whole shell output
warning = function(w) {
stop(w, "\n",
"Conda cannot find specified packages at channels you have provided. Try to add more channels (conda repositories) to your .yml file.\n",
"Packages included in your .yml file may not be available for current Python version or OS. Try to remove exact versions o libraries.\n",
"If nothing above works, some of the packages are not avialable for your conda, try to use 'pip:' statement.\n",
"Error has occured, check warnings() for possible problems",
call. = FALSE)
},
error = function(e) {
if (any(grepl("running command", e))) {
stop(
"conda is not recognised by your shell. Please check you conda path is correct in order to use that function",
call. = FALSE
)
} else{
stop(
e, "\n",
"Unrecognized error occured when creating anaconda virtual env, use warnings() to see it. Try to configure you environment manually using Anaconda prompt. For usefull commands see ?explain_scikitlearn",
call. = FALSE
)
}
}
)
}
name
}
| /R/create_env.R | no_license | ModelOriented/DALEXtra | R | false | false | 5,014 | r | #' Create your conda virtual env with DALEX
#'
#' Python objects may be loaded into R. However, it requires versions of the Python and libraries to match between both machines.
#' This functions allow user to create conda virtual environment based on provided .yml file.
#'
#' @usage create_env(yml, condaenv)
#' @param yml a path to the .yml file. If OS is Windows conda has to be added to the PATH first
#' @param condaenv path to main conda folder. If OS is Unix You may want to specify it. When passed with windows, param will be omitted.
#'
#' @author Szymon Maksymiuk
#'
#' @return Name of created virtual env.
#'
#'
#' @examples
#' \dontrun{
#' create_env(system.file("extdata", "testing_environment.yml", package = "DALEXtra"))
#' }
#' @rdname create_env
#' @export
create_env <- function(yml, condaenv = NULL) {
if (.Platform$OS.type == "unix" & is.null(condaenv)) {
if (is_conda()) {
condaenv = paste(sub('[/][^/]+$', '', reticulate::conda_binary()[1]),
"/..",
sep = "")
message(paste(
"Path to conda not specified while on unix. Default used.",
condaenv
))
} else {
stop("Conda not found")
}
}
# Extract name of the environment that is stored in .yml header
con <- file(yml, "r")
first_line <- readLines(con, n = 1)
close(con)
# Name is stored in the pattern : "name: name_of_env" so we have to split the string
name <- strsplit(first_line, split = " ")[[1]][2]
# Check if specified env already exists
if (name %in% reticulate::conda_list()$name) {
message(sprintf("There already exists environment named the same as it is specified in .yml file - %s - loading", name))
return(name)
}
# Virtual env creation
# Windows and linux has different shells
if (.Platform$OS.type == "windows") {
message(
paste(
"Virtual environment \"" ,
name,
"\" is being created. It may take few minutes.",
sep = ""
)
)
tryCatch(
expr = {
mes <-
shell(paste("conda env create -f", yml, sep = " "), intern = TRUE)
},
warning = function(w) {
mes <-
shell(paste("conda env create -f", yml, sep = " "), intern = TRUE)
if (any(grepl("not recognized", mes))) {
cat(mes)
stop(
"conda is not recognised by your shell. Please set system variables for conda in order to use that function",
call. = FALSE
)
}
else if (any(grepl("ResolvePackageNotFound", mes))) {
cat(mes)
stop("Conda cannot find specified packages at channels you have provided.\n",
"Try to add more channels (conda repositories) to your .yml file.",
"Additionally, packages included in your .yml file may not be available for current Python version or OS. Try to remove exact versions specifications of particular libraries.\n",
"If nothing above works, try to use 'pip:' statement",
call. = FALSE)
}
else{
cat(mes)
stop(
"Unrecognized error occured when creating anaconda virtual env. Try to configure you environment manually using Anaconda prompt. For usefull commands see ?explain_scikitlearn",
call. = FALSE
)
}
}
)
}
if (.Platform$OS.type == "unix") {
message(
paste(
"Virtual environment \"" ,
name,
"\" is being created. It may take few minutes.",
sep = ""
)
)
tryCatch(
expr = {
mes <-
system(paste(condaenv, "/bin/conda ", "env create -f ", yml, sep = ""),
intern = TRUE)
},
# Unix erros not partitioned since it is impossbile to capture whole shell output
warning = function(w) {
stop(w, "\n",
"Conda cannot find specified packages at channels you have provided. Try to add more channels (conda repositories) to your .yml file.\n",
"Packages included in your .yml file may not be available for current Python version or OS. Try to remove exact versions o libraries.\n",
"If nothing above works, some of the packages are not avialable for your conda, try to use 'pip:' statement.\n",
"Error has occured, check warnings() for possible problems",
call. = FALSE)
},
error = function(e) {
if (any(grepl("running command", e))) {
stop(
"conda is not recognised by your shell. Please check you conda path is correct in order to use that function",
call. = FALSE
)
} else{
stop(
e, "\n",
"Unrecognized error occured when creating anaconda virtual env, use warnings() to see it. Try to configure you environment manually using Anaconda prompt. For usefull commands see ?explain_scikitlearn",
call. = FALSE
)
}
}
)
}
name
}
|
#########################################################################
#
# Package:
#
# File: proportion.R
# Contains: proportion
#
# Written by Samuel Beazley and Rodrigo Amadeu
#
# First version: March-2021
# Last update: 5-Aug-2021
#
#########################################################################
#'
#' Test parentage of individual
#'
#' Given individual and a vectors of possible parents, function returns dataframe of proportion of pedigree conflict with each possible trio
#'
#' @param parents a vector with strings related to the name of the suspected parents
#' @param individual a string value with the individual name you are testing
#' @param data the dataframe from which the data is from
#'
#' @return A dataframe of different combinations of parents and individual with the proportion of pedigree conflicts in each trio
#'
#' @examples
#' data(potato.data)
#' proportion(parents = c("W6511.1R","VillettaRose","W9914.1R"),
#' individual = "W15268.1R",
#' data = potato.data)
#'
#' @export
proportion <- function(parents, individual, data)
{
DF <- data.frame()
for(indiv in individuals)
{
table <- gtools::combinations(n = length(parents), r = 2, repeats.allowed = F, v = parents) #unique combinations of parents
table <- cbind(table, rep(indiv, dim(table)[1]) ) #creating table of parents to test
vec <- c() #initializing vector
vec2 <- c()
for(i in 1:dim(table)[1])
{
tmp <- paternity(cbind(data[[ table[i,1] ]], data[[ table[i,3] ]], data[[ table[i,2] ]]))
vec <- cbind(vec,tmp[1])
vec2 <- cbind(vec2,tmp[2])
}
table <- cbind(table, t(vec2), t(vec)) #adding statistic column
colnames(table) <- c("Parent1", "Parent2", "Individual", "N", "Statistic") #labelling columns
DF <- rbind(DF, as.data.frame(subset(table, select = c("Parent1", "Parent2", "Individual", "N", "Statistic")))) #final dataframe
}
return(DF)
}
| /R/proportion.R | no_license | sbeazley/Paternity-Functions | R | false | false | 1,942 | r | #########################################################################
#
# Package:
#
# File: proportion.R
# Contains: proportion
#
# Written by Samuel Beazley and Rodrigo Amadeu
#
# First version: March-2021
# Last update: 5-Aug-2021
#
#########################################################################
#'
#' Test parentage of individual
#'
#' Given individual and a vectors of possible parents, function returns dataframe of proportion of pedigree conflict with each possible trio
#'
#' @param parents a vector with strings related to the name of the suspected parents
#' @param individual a string value with the individual name you are testing
#' @param data the dataframe from which the data is from
#'
#' @return A dataframe of different combinations of parents and individual with the proportion of pedigree conflicts in each trio
#'
#' @examples
#' data(potato.data)
#' proportion(parents = c("W6511.1R","VillettaRose","W9914.1R"),
#' individual = "W15268.1R",
#' data = potato.data)
#'
#' @export
proportion <- function(parents, individual, data)
{
DF <- data.frame()
for(indiv in individuals)
{
table <- gtools::combinations(n = length(parents), r = 2, repeats.allowed = F, v = parents) #unique combinations of parents
table <- cbind(table, rep(indiv, dim(table)[1]) ) #creating table of parents to test
vec <- c() #initializing vector
vec2 <- c()
for(i in 1:dim(table)[1])
{
tmp <- paternity(cbind(data[[ table[i,1] ]], data[[ table[i,3] ]], data[[ table[i,2] ]]))
vec <- cbind(vec,tmp[1])
vec2 <- cbind(vec2,tmp[2])
}
table <- cbind(table, t(vec2), t(vec)) #adding statistic column
colnames(table) <- c("Parent1", "Parent2", "Individual", "N", "Statistic") #labelling columns
DF <- rbind(DF, as.data.frame(subset(table, select = c("Parent1", "Parent2", "Individual", "N", "Statistic")))) #final dataframe
}
return(DF)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IGMM.R
\name{IGMM}
\alias{IGMM}
\title{Iterative Generalized Method of Moments -- IGMM}
\usage{
IGMM(
y,
type = c("h", "hh", "s"),
skewness.x = 0,
kurtosis.x = 3,
tau.init = get_initial_tau(y, type),
robust = FALSE,
tol = .Machine$double.eps^0.25,
location.family = TRUE,
not.negative = NULL,
max.iter = 100,
delta.lower = -1,
delta.upper = 3
)
}
\arguments{
\item{y}{a numeric vector of real values.}
\item{type}{type of Lambert W \eqn{\times} F distribution: skewed \code{"s"};
heavy-tail \code{"h"}; or skewed heavy-tail \code{"hh"}.}
\item{skewness.x}{theoretical skewness of input X; default \code{0}
(symmetric distribution).}
\item{kurtosis.x}{theoretical kurtosis of input X; default \code{3} (Normal
distribution reference).}
\item{tau.init}{starting values for IGMM algorithm; default:
\code{\link{get_initial_tau}}. See also \code{\link{gamma_Taylor}} and
\code{\link{delta_Taylor}}.}
\item{robust}{logical; only used for \code{type = "s"}. If \code{TRUE} a
robust estimate of asymmetry is used (see
\code{\link{medcouple_estimator}}); default: \code{FALSE}.}
\item{tol}{a positive scalar specifiying the tolerance level for terminating
the iterative algorithm. Default: \code{.Machine$double.eps^0.25}}
\item{location.family}{logical; tell the algorithm whether the underlying
input should have a location family distribution (for example, Gaussian
input); default: \code{TRUE}. If \code{FALSE} (e.g., for
\code{"exp"}onential input), then \code{tau['mu_x'] = 0} throughout the
optimization.}
\item{not.negative}{logical; if \code{TRUE}, the estimate for \eqn{\gamma} or
\eqn{\delta} is restricted to non-negative reals. If it is set to
\code{NULL} (default) then it will be set internally to \code{TRUE} for
heavy-tail(s) Lambert W\eqn{ \times} F distributions (\code{type = "h"}
or \code{"hh"}). For skewed Lambert W\eqn{ \times} F (\code{type = "s"})
it will be set to \code{FALSE}, unless it is not a location-scale family
(see \code{\link{get_distname_family}}).}
\item{max.iter}{maximum number of iterations; default: \code{100}.}
\item{delta.lower, delta.upper}{lower and upper bound for
\code{\link{delta_GMM}} optimization. By default: \code{-1} and \code{3}
which covers most real-world heavy-tail scenarios.}
}
\value{
A list of class \code{LambertW_fit}:
\item{tol}{see Arguments}
\item{data}{ data \code{y}}
\item{n}{ number of observations}
\item{type}{see Arguments}
\item{tau.init}{ starting values for \eqn{\tau} }
\item{tau}{ IGMM estimate for \eqn{\tau} }
\item{tau.trace}{entire iteration trace of \eqn{\tau^{(k)}}, \eqn{k = 0, ..., K}, where
\code{K <= max.iter}.}
\item{sub.iterations}{number of iterations only performed in GMM algorithm to find optimal \eqn{\gamma} (or \eqn{\delta})}
\item{iterations}{number of iterations to update \eqn{\mu_x} and
\eqn{\sigma_x}. See References for detals.}
\item{hessian}{ Hessian matrix (obtained from simulations; see References)}
\item{call}{function call}
\item{skewness.x, kurtosis.x}{ see Arguments}
\item{distname}{ a character string describing distribution characteristics given
the target theoretical skewness/kurtosis for the input. Same information as \code{skewness.x} and \code{kurtosis.x} but human-readable.}
\item{location.family}{see Arguments}
\item{message}{message from the optimization method. What kind of convergence?}
\item{method}{estimation method; here: \code{"IGMM"}}
}
\description{
An iterative method of moments estimator to find this \eqn{\tau = (\mu_x,
\sigma_x, \gamma)} for \code{type = 's'} (\eqn{\tau = (\mu_x, \sigma_x,
\delta)} for \code{type = 'h'} or \eqn{\tau = (\mu_x, \sigma_x, \delta_l,
\delta_r)} for \code{type = "hh"}) which minimizes the distance between
the sample and theoretical skewness (or kurtosis) of \eqn{\boldsymbol x}
and X.
This algorithm is only well-defined for data with finite mean and variance
input X. See \code{\link{analyze_convergence}} and references therein
for details.
}
\details{
For algorithm details see the References.
}
\examples{
# estimate tau for the skewed version of a Normal
y <- rLambertW(n = 1000, theta = list(beta = c(2, 1), gamma = 0.2),
distname = "normal")
fity <- IGMM(y, type = "s")
fity
summary(fity)
plot(fity)
# estimate tau for the skewed version of an exponential
y <- rLambertW(n = 1000, theta = list(beta = 1, gamma = 0.5),
distname = "exp")
fity <- IGMM(y, type = "s", skewness.x = 2, location.family = FALSE)
fity
summary(fity)
plot(fity)
# estimate theta for the heavy-tailed version of a Normal = Tukey's h
y <- rLambertW(n = 500, theta = list(beta = c(2, 1), delta = 0.2),
distname = "normal")
system.time(
fity <- IGMM(y, type = "h")
)
fity
summary(fity)
plot(fity)
}
\seealso{
\code{\link{delta_GMM}}, \code{\link{gamma_GMM}}, \code{\link{analyze_convergence}}
}
\author{
Georg M. Goerg
}
\keyword{iteration}
\keyword{optimize}
| /fuzzedpackages/LambertW/man/IGMM.Rd | no_license | akhikolla/testpackages | R | false | true | 5,019 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IGMM.R
\name{IGMM}
\alias{IGMM}
\title{Iterative Generalized Method of Moments -- IGMM}
\usage{
IGMM(
y,
type = c("h", "hh", "s"),
skewness.x = 0,
kurtosis.x = 3,
tau.init = get_initial_tau(y, type),
robust = FALSE,
tol = .Machine$double.eps^0.25,
location.family = TRUE,
not.negative = NULL,
max.iter = 100,
delta.lower = -1,
delta.upper = 3
)
}
\arguments{
\item{y}{a numeric vector of real values.}
\item{type}{type of Lambert W \eqn{\times} F distribution: skewed \code{"s"};
heavy-tail \code{"h"}; or skewed heavy-tail \code{"hh"}.}
\item{skewness.x}{theoretical skewness of input X; default \code{0}
(symmetric distribution).}
\item{kurtosis.x}{theoretical kurtosis of input X; default \code{3} (Normal
distribution reference).}
\item{tau.init}{starting values for IGMM algorithm; default:
\code{\link{get_initial_tau}}. See also \code{\link{gamma_Taylor}} and
\code{\link{delta_Taylor}}.}
\item{robust}{logical; only used for \code{type = "s"}. If \code{TRUE} a
robust estimate of asymmetry is used (see
\code{\link{medcouple_estimator}}); default: \code{FALSE}.}
\item{tol}{a positive scalar specifiying the tolerance level for terminating
the iterative algorithm. Default: \code{.Machine$double.eps^0.25}}
\item{location.family}{logical; tell the algorithm whether the underlying
input should have a location family distribution (for example, Gaussian
input); default: \code{TRUE}. If \code{FALSE} (e.g., for
\code{"exp"}onential input), then \code{tau['mu_x'] = 0} throughout the
optimization.}
\item{not.negative}{logical; if \code{TRUE}, the estimate for \eqn{\gamma} or
\eqn{\delta} is restricted to non-negative reals. If it is set to
\code{NULL} (default) then it will be set internally to \code{TRUE} for
heavy-tail(s) Lambert W\eqn{ \times} F distributions (\code{type = "h"}
or \code{"hh"}). For skewed Lambert W\eqn{ \times} F (\code{type = "s"})
it will be set to \code{FALSE}, unless it is not a location-scale family
(see \code{\link{get_distname_family}}).}
\item{max.iter}{maximum number of iterations; default: \code{100}.}
\item{delta.lower, delta.upper}{lower and upper bound for
\code{\link{delta_GMM}} optimization. By default: \code{-1} and \code{3}
which covers most real-world heavy-tail scenarios.}
}
\value{
A list of class \code{LambertW_fit}:
\item{tol}{see Arguments}
\item{data}{ data \code{y}}
\item{n}{ number of observations}
\item{type}{see Arguments}
\item{tau.init}{ starting values for \eqn{\tau} }
\item{tau}{ IGMM estimate for \eqn{\tau} }
\item{tau.trace}{entire iteration trace of \eqn{\tau^{(k)}}, \eqn{k = 0, ..., K}, where
\code{K <= max.iter}.}
\item{sub.iterations}{number of iterations only performed in GMM algorithm to find optimal \eqn{\gamma} (or \eqn{\delta})}
\item{iterations}{number of iterations to update \eqn{\mu_x} and
\eqn{\sigma_x}. See References for detals.}
\item{hessian}{ Hessian matrix (obtained from simulations; see References)}
\item{call}{function call}
\item{skewness.x, kurtosis.x}{ see Arguments}
\item{distname}{ a character string describing distribution characteristics given
the target theoretical skewness/kurtosis for the input. Same information as \code{skewness.x} and \code{kurtosis.x} but human-readable.}
\item{location.family}{see Arguments}
\item{message}{message from the optimization method. What kind of convergence?}
\item{method}{estimation method; here: \code{"IGMM"}}
}
\description{
An iterative method of moments estimator to find this \eqn{\tau = (\mu_x,
\sigma_x, \gamma)} for \code{type = 's'} (\eqn{\tau = (\mu_x, \sigma_x,
\delta)} for \code{type = 'h'} or \eqn{\tau = (\mu_x, \sigma_x, \delta_l,
\delta_r)} for \code{type = "hh"}) which minimizes the distance between
the sample and theoretical skewness (or kurtosis) of \eqn{\boldsymbol x}
and X.
This algorithm is only well-defined for data with finite mean and variance
input X. See \code{\link{analyze_convergence}} and references therein
for details.
}
\details{
For algorithm details see the References.
}
\examples{
# estimate tau for the skewed version of a Normal
y <- rLambertW(n = 1000, theta = list(beta = c(2, 1), gamma = 0.2),
distname = "normal")
fity <- IGMM(y, type = "s")
fity
summary(fity)
plot(fity)
# estimate tau for the skewed version of an exponential
y <- rLambertW(n = 1000, theta = list(beta = 1, gamma = 0.5),
distname = "exp")
fity <- IGMM(y, type = "s", skewness.x = 2, location.family = FALSE)
fity
summary(fity)
plot(fity)
# estimate theta for the heavy-tailed version of a Normal = Tukey's h
y <- rLambertW(n = 500, theta = list(beta = c(2, 1), delta = 0.2),
distname = "normal")
system.time(
fity <- IGMM(y, type = "h")
)
fity
summary(fity)
plot(fity)
}
\seealso{
\code{\link{delta_GMM}}, \code{\link{gamma_GMM}}, \code{\link{analyze_convergence}}
}
\author{
Georg M. Goerg
}
\keyword{iteration}
\keyword{optimize}
|
## Author: Eduardo César
## Created: 15/11/2014
## Last modified: 15/11/2014
## This file contains two funcions:
## makeCacheMatrix, which creates a special matrix object able to cache its
## inverse
## cacheSolve, which returns the cached inverse of a special matrix if it has
## been previously calculated and the matrix has not changed, or
## calculates, caches and returns the inverse of the matrix if not
## Function: makeCacheMatrix
## Author: Eduardo César
## Created: 15/11/2014
## Last modified: 15/12/2014
## Parameters: x data contents (matrix) of the special matrix object
## Short description: this function creates a special matrix object able to
## cache its inverse
## Detailed Description: this function creates an object (in the OO sense of
## the word), which includes a matrix (the data) and
## several methods for managing this data (set and get)
## and for managing and caching the inverse of the
## matrix (setinv, getinv). All these methods are packed
## in a list for them to be accessed.
## Returns: an special matrix object
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinv <- function(inv) inverse <<- inv
getinv <- function() inverse
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Function: cacheSolve
## Author: Eduardo César
## Created: 15/11/2014
## Last modified: 15/12/2014
## Parameters: x special matrix object
## Short description: this function returns the cached inverse of a special
## matrix if it has been previously calculated and the matrix
## has not changed, or calculates, caches and returns the
## inverse of the matrix if not
## Detailed Description: this function gets the cached value of the special
## matrix object x, if this value is NULL it means that
## either the matrix has changed or its inverse has
## never been calculated. In this case the inverse of the
## matrix stored in the special matrix object is calculated
## using the solve function (the matrix must be invertible),
## the inverse is cached in the special matrix object and
## returned to the user.
## If not then the inverse has been calculated in the past
## and the function returns the cached value.
## Returns: the inverse of the matrix of the special matrix object.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinv()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinv(inverse)
inverse
}
| /cachematrix.R | no_license | ecesar/ProgrammingAssignment2 | R | false | false | 3,211 | r | ## Author: Eduardo César
## Created: 15/11/2014
## Last modified: 15/11/2014
## This file contains two funcions:
## makeCacheMatrix, which creates a special matrix object able to cache its
## inverse
## cacheSolve, which returns the cached inverse of a special matrix if it has
## been previously calculated and the matrix has not changed, or
## calculates, caches and returns the inverse of the matrix if not
## Function: makeCacheMatrix
## Author: Eduardo César
## Created: 15/11/2014
## Last modified: 15/12/2014
## Parameters: x data contents (matrix) of the special matrix object
## Short description: this function creates a special matrix object able to
## cache its inverse
## Detailed Description: this function creates an object (in the OO sense of
## the word), which includes a matrix (the data) and
## several methods for managing this data (set and get)
## and for managing and caching the inverse of the
## matrix (setinv, getinv). All these methods are packed
## in a list for them to be accessed.
## Returns: an special matrix object
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinv <- function(inv) inverse <<- inv
getinv <- function() inverse
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Function: cacheSolve
## Author: Eduardo César
## Created: 15/11/2014
## Last modified: 15/12/2014
## Parameters: x special matrix object
## Short description: this function returns the cached inverse of a special
## matrix if it has been previously calculated and the matrix
## has not changed, or calculates, caches and returns the
## inverse of the matrix if not
## Detailed Description: this function gets the cached value of the special
## matrix object x, if this value is NULL it means that
## either the matrix has changed or its inverse has
## never been calculated. In this case the inverse of the
## matrix stored in the special matrix object is calculated
## using the solve function (the matrix must be invertible),
## the inverse is cached in the special matrix object and
## returned to the user.
## If not then the inverse has been calculated in the past
## and the function returns the cached value.
## Returns: the inverse of the matrix of the special matrix object.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinv()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinv(inverse)
inverse
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rca.R
\name{rca}
\alias{rca}
\title{Relevant Component Analysis}
\usage{
rca(x, chunks, useD = NULL)
}
\arguments{
\item{x}{matrix or data frame of original data.
Each row is a feature vector of a data instance.}
\item{chunks}{list of \code{k} numerical vectors.
Each vector represents a chunklet, the elements
in the vectors indicate where the samples locate
in \code{x}. See examples for more information.}
\item{useD}{optional. When not given, RCA is done in the
original dimension and \code{B} is full rank. When \code{useD} is given,
RCA is preceded by constraints based LDA which reduces
the dimension to \code{useD}. \code{B} in this case is of rank \code{useD}.}
}
\value{
list of the RCA results:
\item{B}{The RCA suggested Mahalanobis matrix.
Distances between data points x1, x2 should be
computed by (x2 - x1)' * B * (x2 - x1)}
\item{A}{The RCA suggested transformation of the data.
The data should be transformed by A * data}
\item{newX}{The data after the RCA transformation (A).
newData = A * data}
The three returned argument are just different forms of the same output.
If one is interested in a Mahalanobis metric over the original data space,
the first argument is all she/he needs. If a transformation into another
space (where one can use the Euclidean metric) is preferred, the second
returned argument is sufficient. Using A and B is equivalent in the
following sense:
if y1 = A * x1, y2 = A * y2 then
(x2 - x1)' * B * (x2 - x1) = (y2 - y1)' * (y2 - y1)
}
\description{
Performs relevant component analysis on the given data.
}
\details{
The RCA function takes a data set and a set of positive constraints
as arguments and returns a linear transformation of the data space
into better representation, alternatively, a Mahalanobis metric
over the data space.
Relevant component analysis consists of three steps:
\enumerate{\item locate the test point
\item compute the distances between the test points
\item find \eqn{k} shortest distances and the bla}
The new representation is known to be optimal in an information
theoretic sense under a constraint of keeping equivalent data
points close to each other.
}
\note{
Note that any different sets of instances (chunklets),
e.g. {1, 3, 7} and {4, 6}, might belong to the
same class and might belong to different classes.
}
\examples{
\dontrun{
library("MASS") # generate synthetic multivariate normal data
set.seed(42)
k <- 100L # sample size of each class
n <- 3L # specify how many classes
N <- k * n # total sample size
x1 <- mvrnorm(k, mu = c(-16, 8), matrix(c(15, 1, 2, 10), ncol = 2))
x2 <- mvrnorm(k, mu = c(0, 0), matrix(c(15, 1, 2, 10), ncol = 2))
x3 <- mvrnorm(k, mu = c(16, -8), matrix(c(15, 1, 2, 10), ncol = 2))
x <- as.data.frame(rbind(x1, x2, x3)) # predictors
y <- gl(n, k) # response
# fully labeled data set with 3 classes
# need to use a line in 2D to classify
plot(x[, 1L], x[, 2L],
bg = c("#E41A1C", "#377EB8", "#4DAF4A")[y],
pch = rep(c(22, 21, 25), each = k)
)
abline(a = -10, b = 1, lty = 2)
abline(a = 12, b = 1, lty = 2)
# generate synthetic chunklets
chunks <- vector("list", 300)
for (i in 1:100) chunks[[i]] <- sample(1L:100L, 10L)
for (i in 101:200) chunks[[i]] <- sample(101L:200L, 10L)
for (i in 201:300) chunks[[i]] <- sample(201L:300L, 10L)
chks <- x[unlist(chunks), ]
# make "chunklet" vector to feed the chunks argument
chunksvec <- rep(-1L, nrow(x))
for (i in 1L:length(chunks)) {
for (j in 1L:length(chunks[[i]])) {
chunksvec[chunks[[i]][j]] <- i
}
}
# relevant component analysis
rcs <- rca(x, chunksvec)
# learned transformation of the data
rcs$A
# learned Mahalanobis distance metric
rcs$B
# whitening transformation applied to the chunklets
chkTransformed <- as.matrix(chks) \%*\% rcs$A
# original data after applying RCA transformation
# easier to classify - using only horizontal lines
xnew <- rcs$newX
plot(xnew[, 1L], xnew[, 2L],
bg = c("#E41A1C", "#377EB8", "#4DAF4A")[gl(n, k)],
pch = c(rep(22, k), rep(21, k), rep(25, k))
)
abline(a = -15, b = 0, lty = 2)
abline(a = 16, b = 0, lty = 2)
}
}
\references{
Aharon Bar-Hillel, Tomer Hertz, Noam Shental, and Daphna Weinshall (2003).
Learning Distance Functions using Equivalence Relations.
\emph{Proceedings of 20th International Conference on
Machine Learning (ICML2003)}.
}
\seealso{
See \code{\link{dca}} for exploiting negative constrains.
}
\author{
Nan Xiao <\url{https://nanx.me}>
}
\keyword{mahalanobis}
\keyword{metric}
\keyword{rca}
\keyword{transformation}
| /man/rca.Rd | permissive | vishalbelsare/dml | R | false | true | 4,593 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rca.R
\name{rca}
\alias{rca}
\title{Relevant Component Analysis}
\usage{
rca(x, chunks, useD = NULL)
}
\arguments{
\item{x}{matrix or data frame of original data.
Each row is a feature vector of a data instance.}
\item{chunks}{list of \code{k} numerical vectors.
Each vector represents a chunklet, the elements
in the vectors indicate where the samples locate
in \code{x}. See examples for more information.}
\item{useD}{optional. When not given, RCA is done in the
original dimension and \code{B} is full rank. When \code{useD} is given,
RCA is preceded by constraints based LDA which reduces
the dimension to \code{useD}. \code{B} in this case is of rank \code{useD}.}
}
\value{
list of the RCA results:
\item{B}{The RCA suggested Mahalanobis matrix.
Distances between data points x1, x2 should be
computed by (x2 - x1)' * B * (x2 - x1)}
\item{A}{The RCA suggested transformation of the data.
The data should be transformed by A * data}
\item{newX}{The data after the RCA transformation (A).
newData = A * data}
The three returned argument are just different forms of the same output.
If one is interested in a Mahalanobis metric over the original data space,
the first argument is all she/he needs. If a transformation into another
space (where one can use the Euclidean metric) is preferred, the second
returned argument is sufficient. Using A and B is equivalent in the
following sense:
if y1 = A * x1, y2 = A * y2 then
(x2 - x1)' * B * (x2 - x1) = (y2 - y1)' * (y2 - y1)
}
\description{
Performs relevant component analysis on the given data.
}
\details{
The RCA function takes a data set and a set of positive constraints
as arguments and returns a linear transformation of the data space
into better representation, alternatively, a Mahalanobis metric
over the data space.
Relevant component analysis consists of three steps:
\enumerate{\item locate the test point
\item compute the distances between the test points
\item find \eqn{k} shortest distances and the bla}
The new representation is known to be optimal in an information
theoretic sense under a constraint of keeping equivalent data
points close to each other.
}
\note{
Note that any different sets of instances (chunklets),
e.g. {1, 3, 7} and {4, 6}, might belong to the
same class and might belong to different classes.
}
\examples{
\dontrun{
library("MASS") # generate synthetic multivariate normal data
set.seed(42)
k <- 100L # sample size of each class
n <- 3L # specify how many classes
N <- k * n # total sample size
x1 <- mvrnorm(k, mu = c(-16, 8), matrix(c(15, 1, 2, 10), ncol = 2))
x2 <- mvrnorm(k, mu = c(0, 0), matrix(c(15, 1, 2, 10), ncol = 2))
x3 <- mvrnorm(k, mu = c(16, -8), matrix(c(15, 1, 2, 10), ncol = 2))
x <- as.data.frame(rbind(x1, x2, x3)) # predictors
y <- gl(n, k) # response
# fully labeled data set with 3 classes
# need to use a line in 2D to classify
plot(x[, 1L], x[, 2L],
bg = c("#E41A1C", "#377EB8", "#4DAF4A")[y],
pch = rep(c(22, 21, 25), each = k)
)
abline(a = -10, b = 1, lty = 2)
abline(a = 12, b = 1, lty = 2)
# generate synthetic chunklets
chunks <- vector("list", 300)
for (i in 1:100) chunks[[i]] <- sample(1L:100L, 10L)
for (i in 101:200) chunks[[i]] <- sample(101L:200L, 10L)
for (i in 201:300) chunks[[i]] <- sample(201L:300L, 10L)
chks <- x[unlist(chunks), ]
# make "chunklet" vector to feed the chunks argument
chunksvec <- rep(-1L, nrow(x))
for (i in 1L:length(chunks)) {
for (j in 1L:length(chunks[[i]])) {
chunksvec[chunks[[i]][j]] <- i
}
}
# relevant component analysis
rcs <- rca(x, chunksvec)
# learned transformation of the data
rcs$A
# learned Mahalanobis distance metric
rcs$B
# whitening transformation applied to the chunklets
chkTransformed <- as.matrix(chks) \%*\% rcs$A
# original data after applying RCA transformation
# easier to classify - using only horizontal lines
xnew <- rcs$newX
plot(xnew[, 1L], xnew[, 2L],
bg = c("#E41A1C", "#377EB8", "#4DAF4A")[gl(n, k)],
pch = c(rep(22, k), rep(21, k), rep(25, k))
)
abline(a = -15, b = 0, lty = 2)
abline(a = 16, b = 0, lty = 2)
}
}
\references{
Aharon Bar-Hillel, Tomer Hertz, Noam Shental, and Daphna Weinshall (2003).
Learning Distance Functions using Equivalence Relations.
\emph{Proceedings of 20th International Conference on
Machine Learning (ICML2003)}.
}
\seealso{
See \code{\link{dca}} for exploiting negative constrains.
}
\author{
Nan Xiao <\url{https://nanx.me}>
}
\keyword{mahalanobis}
\keyword{metric}
\keyword{rca}
\keyword{transformation}
|
##################################################################
#### IFI Project: Bar Chart of Outcomes for Cases ################
##################################################################
# REVISED BY SARAH LU
# July 11, 2017
setwd("/Users/sarah/Google Drive/IFIs_and_HR/Data")
library(foreign)
library(ggplot2)
dat <- read.dta("data/dta files/Cleaned_Complete_WBHR_Data_07052017.dta")
attach(dat)
ip_vec <- c(0)
###############################################
######IP GRAPH#################################
###############################################
#initialize counters for outcome types
no_pos_outcome <- 0
harm_ack <- 0
proj_change <- 0
compens <- 0
#loop through project types
for(i in 1:9){
#loop through all rows in dataframe
for(j in 1:nrow(dat)){
#look for no positive outcome (no harm acknowledged, project change, or compensation)
#also sorts through IP data only (cao = 0)
if(dat[j, "cao"] == 0 && dat[j, "project_type"] == i && dat[j, "harm_acknowledged"] == 0
&& dat[j, "project_change"] == 0 && dat[j, "compensation"] == 0){
no_pos_outcome <- no_pos_outcome + 1
}
#look for harm acknowledged
#also sorts through IP data only (cao = 0)
if(dat[j, "cao"] == 0 && dat[j, "project_type"] == i && dat[j, "harm_acknowledged"] == 1){
harm_ack <- harm_ack + 1
}
#look for project change
#also sorts through IP data only (cao = 0)
if(dat[j, "cao"] == 0 && dat[j, "project_type"] == i && dat[j, "project_change"] == 1){
proj_change <- proj_change + 1
}
#look for compensation
#also sorts through IP data only (cao = 0)
if(dat[j, "cao"] == 0 && dat[j, "project_type"] == i && dat[j, "compensation"] == 1){
compens <- compens + 1
}
}
#add counts to vector
ip_vec <- append(ip_vec, no_pos_outcome)
ip_vec <- append(ip_vec, harm_ack)
ip_vec <- append(ip_vec, proj_change)
ip_vec <- append(ip_vec, compens)
no_pos_outcome <- 0
harm_ack <- 0
proj_change <- 0
compens <- 0
}
ip_vec <- ip_vec[2:37] #get rid of first placeholder value
###################################
#####create data frame for IP plot
#create vector for NGO type
outcomes1 <- c("No Positive Outcome", "Harm Acknowledged","Project Change", "Compensation")
outcomes2 <- rep(outcomes1, 9)
#create vector for project type
projecttype <- c("1", "1", "1", "1", "2", "2", "2", "2", "3", "3", "3", "3", "4", "4", "4", "4", "5", "5", "5", "5", "6", "6", "6", "6", "7", "7", "7", "7", "8", "8", "8", "8", "9", "9", "9", "9")
IP_df <- data.frame(
Outcome_type = factor(outcomes2,
levels = c("No Positive Outcome", "Harm Acknowledged","Project Change", "Compensation")),
Project = factor(projecttype,
levels = c("1","2","3","4","5","6","7","8","9")),
Num_cases = ip_vec
)
#make IP bar chart
p= ggplot(data = IP_df, aes(x = Project, y = Num_cases, fill = Outcome_type))+
geom_bar(stat = "identity", position = position_dodge(), width = 0.75)+
#change y axis to go from 0 - 45, brings the y axis down to 0
scale_y_continuous(limits = c(0,45), expand = c(0, 0))+
#modify legend, change bar colors
scale_fill_manual(values=c( "#b2b2b2", "#777777", "#444444", "#000000"),
name="", #legend title
breaks=c("No Positive Outcome", "Harm Acknowledged", "Project Change", "Compensation"))+
scale_x_discrete(breaks = c("1", "2", "3", "4", "5", "6", "7", "8", "9"),
labels = c("Infrastructure",
"Mining/Resource Extraction", "Land Management/Administration",
"Environmental Sustainability", "Agriculture and Forestry",
"Manufacturing and Services",
"Other (General Poverty Reduction)",
"Government Capacity Building", "Private Sector Capacity Building"))+
theme_classic() + theme(panel.grid=element_blank(), panel.border=element_blank())+
theme(text = element_text(size=14),
legend.position = c(0.85, 0.9),
axis.title.x = element_blank(),
axis.text.x = element_text(hjust=1, angle=45))+ #no x axis title
#x and y axis title
ylab("Number of Cases")+
#xlab("Issue")+
#plot title
labs(title = "Inspection Panel")
p
###############################################
######CAO GRAPH#################################
###############################################
#initalize vector for CAO ngo counts
cao_vec <- c(0)
#initialize counters for outcome types
no_pos_outcome1 <- 0
harm_ack1 <- 0
proj_change1 <- 0
compens1 <- 0
#loop through project types
for(i in 1:9){
#loop through all rows in dataframe
for(j in 1:nrow(dat)){
#look for no positive outcome (no harm acknowledged, project change, or compensation)
#also sorts through CAO data only (cao = 1)
if( !is.na(dat[j, "project_type"])
&& !is.na(dat[j, "harm_acknowledged"])
&& !is.na( dat[j, "project_change"])
&& !is.na(dat[j, "compensation"])
&& dat[j, "cao"] == 1 && dat[j, "project_type"] == i && dat[j, "harm_acknowledged"] == 0
&& dat[j, "project_change"] == 0 && dat[j, "compensation"] == 0 ){
no_pos_outcome1 <- no_pos_outcome1 + 1
}
#look for harm acknowledged
#also sorts through CAO data only (cao = 1)
if(!is.na(dat[j, "project_type"]) && !is.na(dat[j, "harm_acknowledged"]) &&
dat[j, "cao"] == 1 && dat[j, "project_type"] == i && dat[j, "harm_acknowledged"] == 1){
harm_ack1 <- harm_ack1 + 1
}
#look for project change
#also sorts through CAO data only (cao = 1)
if(!is.na(dat[j, "project_type"]) && !is.na(dat[j, "project_change"]) &&
dat[j, "cao"] == 1 && dat[j, "project_type"] == i && dat[j, "project_change"] == 1){
proj_change1 <- proj_change1 + 1
}
#look for compensation
#also sorts through CAO data only (cao = 1)
if(!is.na(dat[j, "project_type"]) && !is.na(dat[j, "compensation"]) &&
dat[j, "cao"] == 1 && dat[j, "project_type"] == i && dat[j, "compensation"] == 1){
compens1 <- compens1 + 1
}
}
#add counts to vector
cao_vec <- append(cao_vec, no_pos_outcome1)
cao_vec <- append(cao_vec, harm_ack1)
cao_vec <- append(cao_vec, proj_change1)
cao_vec <- append(cao_vec, compens1)
no_pos_outcome1 <- 0
harm_ack1 <- 0
proj_change1 <- 0
compens1 <- 0
}
cao_vec <- cao_vec[2:37] #get rid of first placeholder value
###################################
#####create data frame for CAO plot
#create vector for outcome
outcomes1 <- c("No Positive Outcome", "Harm Acknowledged","Project Change", "Compensation")
outcomes2 <- rep(outcomes1, 9)
#create vector for project type
projecttype <- c("1", "1", "1", "1", "2", "2", "2", "2", "3", "3", "3", "3", "4", "4", "4", "4", "5", "5", "5", "5", "6", "6", "6", "6", "7", "7", "7", "7", "8", "8", "8", "8", "9", "9", "9", "9")
CAO_df <- data.frame(
Outcome_type = factor(outcomes2,
levels = c("No Positive Outcome", "Harm Acknowledged","Project Change", "Compensation")),
Project = factor(projecttype,
levels = c("1","2","3","4","5","6","7","8","9")),
Num_cases = cao_vec
)
#make CAO bar chart
q= ggplot(data = CAO_df, aes(x = Project, y = Num_cases, fill = Outcome_type))+
geom_bar(stat = "identity", position = position_dodge(), width = 0.75)+
#change y axis to go from 0 - 45, brings the y axis down to 0
scale_y_continuous(limits = c(0,45), expand = c(0, 0))+
#modify legend, change bar colors
scale_fill_manual(values=c( "#b2b2b2", "#777777", "#444444", "#000000"),
name="", #legend title
breaks=c("No Positive Outcome", "Harm Acknowledged", "Project Change", "Compensation"))+
scale_x_discrete(breaks = c("1", "2", "3", "4", "5", "6", "7", "8", "9"),
labels = c("Infrastructure",
"Mining/Resource Extraction", "Land Management/Administration",
"Environmental Sustainability", "Agriculture and Forestry",
"Manufacturing and Services",
"Other (General Poverty Reduction)",
"Government Capacity Building", "Private Sector Capacity Building"))+
theme_classic() + theme(panel.grid=element_blank(), panel.border=element_blank())+
theme(text = element_text(size=14),
legend.position = c(0.85, 0.9),
axis.title.x = element_blank(),
axis.text.x = element_text(hjust=1, angle=45))+ #no x axis title
#x and y axis title
ylab("Number of Cases")+
#xlab("Issue")+
#plot title
labs(title = "Compliance Advisor/Ombudsman")
q
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
multiplot(p,q, cols=1)
| /Figure 10/10_Favorable_Outcomes_SL_07_11.R | no_license | sarah-lu102/world-bank-data-viz | R | false | false | 10,791 | r | ##################################################################
#### IFI Project: Bar Chart of Outcomes for Cases ################
##################################################################
# REVISED BY SARAH LU
# July 11, 2017
setwd("/Users/sarah/Google Drive/IFIs_and_HR/Data")
library(foreign)
library(ggplot2)
dat <- read.dta("data/dta files/Cleaned_Complete_WBHR_Data_07052017.dta")
attach(dat)
ip_vec <- c(0)
###############################################
######IP GRAPH#################################
###############################################
#initialize counters for outcome types
no_pos_outcome <- 0
harm_ack <- 0
proj_change <- 0
compens <- 0
#loop through project types
for(i in 1:9){
#loop through all rows in dataframe
for(j in 1:nrow(dat)){
#look for no positive outcome (no harm acknowledged, project change, or compensation)
#also sorts through IP data only (cao = 0)
if(dat[j, "cao"] == 0 && dat[j, "project_type"] == i && dat[j, "harm_acknowledged"] == 0
&& dat[j, "project_change"] == 0 && dat[j, "compensation"] == 0){
no_pos_outcome <- no_pos_outcome + 1
}
#look for harm acknowledged
#also sorts through IP data only (cao = 0)
if(dat[j, "cao"] == 0 && dat[j, "project_type"] == i && dat[j, "harm_acknowledged"] == 1){
harm_ack <- harm_ack + 1
}
#look for project change
#also sorts through IP data only (cao = 0)
if(dat[j, "cao"] == 0 && dat[j, "project_type"] == i && dat[j, "project_change"] == 1){
proj_change <- proj_change + 1
}
#look for compensation
#also sorts through IP data only (cao = 0)
if(dat[j, "cao"] == 0 && dat[j, "project_type"] == i && dat[j, "compensation"] == 1){
compens <- compens + 1
}
}
#add counts to vector
ip_vec <- append(ip_vec, no_pos_outcome)
ip_vec <- append(ip_vec, harm_ack)
ip_vec <- append(ip_vec, proj_change)
ip_vec <- append(ip_vec, compens)
no_pos_outcome <- 0
harm_ack <- 0
proj_change <- 0
compens <- 0
}
ip_vec <- ip_vec[2:37] #get rid of first placeholder value
###################################
#####create data frame for IP plot
#create vector for NGO type
outcomes1 <- c("No Positive Outcome", "Harm Acknowledged","Project Change", "Compensation")
outcomes2 <- rep(outcomes1, 9)
#create vector for project type
projecttype <- c("1", "1", "1", "1", "2", "2", "2", "2", "3", "3", "3", "3", "4", "4", "4", "4", "5", "5", "5", "5", "6", "6", "6", "6", "7", "7", "7", "7", "8", "8", "8", "8", "9", "9", "9", "9")
IP_df <- data.frame(
Outcome_type = factor(outcomes2,
levels = c("No Positive Outcome", "Harm Acknowledged","Project Change", "Compensation")),
Project = factor(projecttype,
levels = c("1","2","3","4","5","6","7","8","9")),
Num_cases = ip_vec
)
#make IP bar chart
p= ggplot(data = IP_df, aes(x = Project, y = Num_cases, fill = Outcome_type))+
geom_bar(stat = "identity", position = position_dodge(), width = 0.75)+
#change y axis to go from 0 - 45, brings the y axis down to 0
scale_y_continuous(limits = c(0,45), expand = c(0, 0))+
#modify legend, change bar colors
scale_fill_manual(values=c( "#b2b2b2", "#777777", "#444444", "#000000"),
name="", #legend title
breaks=c("No Positive Outcome", "Harm Acknowledged", "Project Change", "Compensation"))+
scale_x_discrete(breaks = c("1", "2", "3", "4", "5", "6", "7", "8", "9"),
labels = c("Infrastructure",
"Mining/Resource Extraction", "Land Management/Administration",
"Environmental Sustainability", "Agriculture and Forestry",
"Manufacturing and Services",
"Other (General Poverty Reduction)",
"Government Capacity Building", "Private Sector Capacity Building"))+
theme_classic() + theme(panel.grid=element_blank(), panel.border=element_blank())+
theme(text = element_text(size=14),
legend.position = c(0.85, 0.9),
axis.title.x = element_blank(),
axis.text.x = element_text(hjust=1, angle=45))+ #no x axis title
#x and y axis title
ylab("Number of Cases")+
#xlab("Issue")+
#plot title
labs(title = "Inspection Panel")
p
###############################################
######CAO GRAPH#################################
###############################################
#initalize vector for CAO ngo counts
cao_vec <- c(0)
#initialize counters for outcome types
no_pos_outcome1 <- 0
harm_ack1 <- 0
proj_change1 <- 0
compens1 <- 0
#loop through project types
for(i in 1:9){
#loop through all rows in dataframe
for(j in 1:nrow(dat)){
#look for no positive outcome (no harm acknowledged, project change, or compensation)
#also sorts through CAO data only (cao = 1)
if( !is.na(dat[j, "project_type"])
&& !is.na(dat[j, "harm_acknowledged"])
&& !is.na( dat[j, "project_change"])
&& !is.na(dat[j, "compensation"])
&& dat[j, "cao"] == 1 && dat[j, "project_type"] == i && dat[j, "harm_acknowledged"] == 0
&& dat[j, "project_change"] == 0 && dat[j, "compensation"] == 0 ){
no_pos_outcome1 <- no_pos_outcome1 + 1
}
#look for harm acknowledged
#also sorts through CAO data only (cao = 1)
if(!is.na(dat[j, "project_type"]) && !is.na(dat[j, "harm_acknowledged"]) &&
dat[j, "cao"] == 1 && dat[j, "project_type"] == i && dat[j, "harm_acknowledged"] == 1){
harm_ack1 <- harm_ack1 + 1
}
#look for project change
#also sorts through CAO data only (cao = 1)
if(!is.na(dat[j, "project_type"]) && !is.na(dat[j, "project_change"]) &&
dat[j, "cao"] == 1 && dat[j, "project_type"] == i && dat[j, "project_change"] == 1){
proj_change1 <- proj_change1 + 1
}
#look for compensation
#also sorts through CAO data only (cao = 1)
if(!is.na(dat[j, "project_type"]) && !is.na(dat[j, "compensation"]) &&
dat[j, "cao"] == 1 && dat[j, "project_type"] == i && dat[j, "compensation"] == 1){
compens1 <- compens1 + 1
}
}
#add counts to vector
cao_vec <- append(cao_vec, no_pos_outcome1)
cao_vec <- append(cao_vec, harm_ack1)
cao_vec <- append(cao_vec, proj_change1)
cao_vec <- append(cao_vec, compens1)
no_pos_outcome1 <- 0
harm_ack1 <- 0
proj_change1 <- 0
compens1 <- 0
}
cao_vec <- cao_vec[2:37] #get rid of first placeholder value
###################################
#####create data frame for CAO plot
#create vector for outcome
outcomes1 <- c("No Positive Outcome", "Harm Acknowledged","Project Change", "Compensation")
outcomes2 <- rep(outcomes1, 9)
#create vector for project type
projecttype <- c("1", "1", "1", "1", "2", "2", "2", "2", "3", "3", "3", "3", "4", "4", "4", "4", "5", "5", "5", "5", "6", "6", "6", "6", "7", "7", "7", "7", "8", "8", "8", "8", "9", "9", "9", "9")
CAO_df <- data.frame(
Outcome_type = factor(outcomes2,
levels = c("No Positive Outcome", "Harm Acknowledged","Project Change", "Compensation")),
Project = factor(projecttype,
levels = c("1","2","3","4","5","6","7","8","9")),
Num_cases = cao_vec
)
#make CAO bar chart
q= ggplot(data = CAO_df, aes(x = Project, y = Num_cases, fill = Outcome_type))+
geom_bar(stat = "identity", position = position_dodge(), width = 0.75)+
#change y axis to go from 0 - 45, brings the y axis down to 0
scale_y_continuous(limits = c(0,45), expand = c(0, 0))+
#modify legend, change bar colors
scale_fill_manual(values=c( "#b2b2b2", "#777777", "#444444", "#000000"),
name="", #legend title
breaks=c("No Positive Outcome", "Harm Acknowledged", "Project Change", "Compensation"))+
scale_x_discrete(breaks = c("1", "2", "3", "4", "5", "6", "7", "8", "9"),
labels = c("Infrastructure",
"Mining/Resource Extraction", "Land Management/Administration",
"Environmental Sustainability", "Agriculture and Forestry",
"Manufacturing and Services",
"Other (General Poverty Reduction)",
"Government Capacity Building", "Private Sector Capacity Building"))+
theme_classic() + theme(panel.grid=element_blank(), panel.border=element_blank())+
theme(text = element_text(size=14),
legend.position = c(0.85, 0.9),
axis.title.x = element_blank(),
axis.text.x = element_text(hjust=1, angle=45))+ #no x axis title
#x and y axis title
ylab("Number of Cases")+
#xlab("Issue")+
#plot title
labs(title = "Compliance Advisor/Ombudsman")
q
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
multiplot(p,q, cols=1)
|
#' Show corresponding (or not) factor levels
#'
#' Given two datasets, show what factor levels coincide, and which can
#' only be found in one of the two datasets
#' @param x first dataset
#' @param y two dataset
#' @param by.x factor in first dataset
#' @param by.y fatcor in second dataset
mergehelp <- function(x, y, by.x, by.y = by.x)
{
if(is.data.table(x)) {
xby <- x[, sort(unique(by.x)), with = FALSE][[by.x]]
}
else {
xby <- sort(unique(x[, by.x]))
}
if(is.data.table(y)) {
yby <- y[, sort(unique(by.y)), with = FALSE][[by.y]]
}
else {
yby <- sort(unique(y[, by.y]))
}
cat("\n\nIn x, but NOT in y:\n")
cat(setdiff(xby, yby), sep = "; ")
cat("\n\nIn y, but NOT in x:\n")
cat(setdiff(yby, xby), sep = "; ")
cat("\n")
}
| /R/mergehelp.R | no_license | zauster/ormisc | R | false | false | 815 | r | #' Show corresponding (or not) factor levels
#'
#' Given two datasets, show what factor levels coincide, and which can
#' only be found in one of the two datasets
#' @param x first dataset
#' @param y two dataset
#' @param by.x factor in first dataset
#' @param by.y fatcor in second dataset
mergehelp <- function(x, y, by.x, by.y = by.x)
{
if(is.data.table(x)) {
xby <- x[, sort(unique(by.x)), with = FALSE][[by.x]]
}
else {
xby <- sort(unique(x[, by.x]))
}
if(is.data.table(y)) {
yby <- y[, sort(unique(by.y)), with = FALSE][[by.y]]
}
else {
yby <- sort(unique(y[, by.y]))
}
cat("\n\nIn x, but NOT in y:\n")
cat(setdiff(xby, yby), sep = "; ")
cat("\n\nIn y, but NOT in x:\n")
cat(setdiff(yby, xby), sep = "; ")
cat("\n")
}
|
library(shiny)
library(ggplot2)
library(tools)
load(url("http://s3.amazonaws.com/assets.datacamp.com/production/course_4850/datasets/movies.Rdata"))
# UI
ui <- fluidPage(
sidebarLayout(
# Input
sidebarPanel(
# Select variable for y-axis
selectInput(inputId = "y",
label = "Y-axis:",
choices = c("IMDB rating" = "imdb_rating",
"IMDB number of votes" = "imdb_num_votes",
"Critics Score" = "critics_score",
"Audience Score" = "audience_score",
"Runtime" = "runtime"),
selected = "audience_score"),
# Select variable for x-axis
selectInput(inputId = "x",
label = "X-axis:",
choices = c("IMDB rating" = "imdb_rating",
"IMDB number of votes" = "imdb_num_votes",
"Critics Score" = "critics_score",
"Audience Score" = "audience_score",
"Runtime" = "runtime"),
selected = "critics_score"),
# Select variable for color
selectInput(inputId = "z",
label = "Color by:",
choices = c("Title Type" = "title_type",
"Genre" = "genre",
"MPAA Rating" = "mpaa_rating",
"Critics Rating" = "critics_rating",
"Audience Rating" = "audience_rating"),
selected = "mpaa_rating"),
# Set alpha level
sliderInput(inputId = "alpha",
label = "Alpha:",
min = 0, max = 1,
value = 0.5),
# Set point size
sliderInput(inputId = "size",
label = "Size:",
min = 0, max = 5,
value = 2),
# Enter text for plot title
textInput(inputId = "plot_title",
label = "Plot title",
placeholder = "Enter text to be used as plot title"),
# Action button for plot title
actionButton(inputId = "update_plot_title",
label = "Update plot title")
),
# Output:
mainPanel(
plotOutput(outputId = "scatterplot")
)
)
)
# Define server function required to create the scatterplot-
server <- function(input, output, session) {
# New plot title
new_plot_title <- eventReactive(input$update_plot_title,
{ toTitleCase(input$plot_title) }
)
# Create scatterplot object the plotOutput function is expecting
output$scatterplot <- renderPlot({
ggplot(data = movies, aes_string(x = input$x, y = input$y, color = input$z)) +
geom_point(alpha = input$alpha, size = input$size) +
labs(title = new_plot_title())
})
}
# Create a Shiny app object
shinyApp(ui = ui, server = server) | /R/DataCamp - Building Web Applications in R with Shiny/19 Delay with eventReactive().R | no_license | SemenkinaOlga/code-examples | R | false | false | 3,068 | r | library(shiny)
library(ggplot2)
library(tools)
load(url("http://s3.amazonaws.com/assets.datacamp.com/production/course_4850/datasets/movies.Rdata"))
# UI
ui <- fluidPage(
sidebarLayout(
# Input
sidebarPanel(
# Select variable for y-axis
selectInput(inputId = "y",
label = "Y-axis:",
choices = c("IMDB rating" = "imdb_rating",
"IMDB number of votes" = "imdb_num_votes",
"Critics Score" = "critics_score",
"Audience Score" = "audience_score",
"Runtime" = "runtime"),
selected = "audience_score"),
# Select variable for x-axis
selectInput(inputId = "x",
label = "X-axis:",
choices = c("IMDB rating" = "imdb_rating",
"IMDB number of votes" = "imdb_num_votes",
"Critics Score" = "critics_score",
"Audience Score" = "audience_score",
"Runtime" = "runtime"),
selected = "critics_score"),
# Select variable for color
selectInput(inputId = "z",
label = "Color by:",
choices = c("Title Type" = "title_type",
"Genre" = "genre",
"MPAA Rating" = "mpaa_rating",
"Critics Rating" = "critics_rating",
"Audience Rating" = "audience_rating"),
selected = "mpaa_rating"),
# Set alpha level
sliderInput(inputId = "alpha",
label = "Alpha:",
min = 0, max = 1,
value = 0.5),
# Set point size
sliderInput(inputId = "size",
label = "Size:",
min = 0, max = 5,
value = 2),
# Enter text for plot title
textInput(inputId = "plot_title",
label = "Plot title",
placeholder = "Enter text to be used as plot title"),
# Action button for plot title
actionButton(inputId = "update_plot_title",
label = "Update plot title")
),
# Output:
mainPanel(
plotOutput(outputId = "scatterplot")
)
)
)
# Define server function required to create the scatterplot-
server <- function(input, output, session) {
# New plot title
new_plot_title <- eventReactive(input$update_plot_title,
{ toTitleCase(input$plot_title) }
)
# Create scatterplot object the plotOutput function is expecting
output$scatterplot <- renderPlot({
ggplot(data = movies, aes_string(x = input$x, y = input$y, color = input$z)) +
geom_point(alpha = input$alpha, size = input$size) +
labs(title = new_plot_title())
})
}
# Create a Shiny app object
shinyApp(ui = ui, server = server) |
source("warmup.R")
best <- function(state, outcome){
ds <- cache(outcomes)
ds$states()
# print(colNames[2])
# colNames[1]
# colNames[0]
}
cache <- function(data)
{
uniqueStates <- NULL
stateData <- NULL
queryState <- NULL
setStateData <- function(state){
if(state != queryState){
queryState <<- state
stateData <<- data[data$State=state,]
}
return(stateData)
}
states <- function(){
if(is.null(uniqueStates)){
uniqueStates <<- unique(data[, 7])
}
return(uniqueStates)
}
lowestHeartAttack <- function(state){
colName ='Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack'
setStateData(state)
min(as.double(stateData[,colName]));
}
lowestPneumonia <- function(state){
colName = 'Hospital.30.Day.Readmission.Rates.from.Pneumonia'
setStateData(state)
min(as.double(stateData[,colName]));
}
lowestHeartFailure <- function(state){
colName = 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure'
setStateData(state)
min(as.double(stateData[,colName]));
}
list(
lowestHeartAttackRate = lowestHeartAttack,
lowestHeartFailureRate = lowestHeartFailure,
lowestPneumoniaRate = lowestPneumonia,
states = states
)
}
best('CA', 'jflkds') | /R/Code/Week 4/Assignment/best-cached.R | no_license | zeyab/DataScience | R | false | false | 1,298 | r | source("warmup.R")
best <- function(state, outcome){
ds <- cache(outcomes)
ds$states()
# print(colNames[2])
# colNames[1]
# colNames[0]
}
cache <- function(data)
{
uniqueStates <- NULL
stateData <- NULL
queryState <- NULL
setStateData <- function(state){
if(state != queryState){
queryState <<- state
stateData <<- data[data$State=state,]
}
return(stateData)
}
states <- function(){
if(is.null(uniqueStates)){
uniqueStates <<- unique(data[, 7])
}
return(uniqueStates)
}
lowestHeartAttack <- function(state){
colName ='Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack'
setStateData(state)
min(as.double(stateData[,colName]));
}
lowestPneumonia <- function(state){
colName = 'Hospital.30.Day.Readmission.Rates.from.Pneumonia'
setStateData(state)
min(as.double(stateData[,colName]));
}
lowestHeartFailure <- function(state){
colName = 'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure'
setStateData(state)
min(as.double(stateData[,colName]));
}
list(
lowestHeartAttackRate = lowestHeartAttack,
lowestHeartFailureRate = lowestHeartFailure,
lowestPneumoniaRate = lowestPneumonia,
states = states
)
}
best('CA', 'jflkds') |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{table_by}
\alias{table_by}
\title{take two factors and arrange in a proportion table}
\usage{
table_by(fac1, fac2, dat, yes = "Yes")
}
\description{
take two factors and arrange in a proportion table
}
| /Zmisc/man/table_by.Rd | no_license | Zus/zmisc | R | false | true | 296 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{table_by}
\alias{table_by}
\title{take two factors and arrange in a proportion table}
\usage{
table_by(fac1, fac2, dat, yes = "Yes")
}
\description{
take two factors and arrange in a proportion table
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fetch-player-details.R
\name{fetch_player_details}
\alias{fetch_player_details}
\alias{fetch_player_details_afl}
\alias{fetch_player_details_afltables}
\alias{fetch_player_details_footywire}
\title{Fetch Player Details}
\usage{
fetch_player_details(
team = NULL,
current = TRUE,
comp = "AFLM",
source = "AFL",
...
)
fetch_player_details_afl(season, team = NULL, comp = "AFLM")
fetch_player_details_afltables(team = NULL)
fetch_player_details_footywire(team, current = TRUE)
}
\arguments{
\item{team}{team the player played for in the season for, defaults to NULL which returns all teams}
\item{current}{logical, return the current team list for the current calendar year or all historical data}
\item{comp}{One of "AFLM" (default) or "AFLW"}
\item{source}{One of "AFL" (default), "footywire", "afltables"}
\item{...}{Optional parameters passed onto various functions depending on source.}
\item{season}{Season in YYYY format}
}
\value{
A Tibble with the details of the relevant players.
}
\description{
\code{fetch_player_details} returns player details such as date of birth, debut
and other details. The exact details that are returned will depend on which
source is provided.
By default the source used will be the official AFL website.
\code{\link[=fetch_player_details_afl]{fetch_player_details_afl()}}, \code{\link[=fetch_player_details_afltables]{fetch_player_details_afltables()}} and \code{\link[=fetch_player_details_footywire]{fetch_player_details_footywire()}}
can be called directly and return data from the AFL website, AFL Tables and Footywire respectively.
The function will typically be used to return the current team lists. For historical data, you can use the \code{current} argument set to FALSE. This will return all historical data for AFL.com and Footywire data. AFLTables data will always return historical data.
}
\examples{
\dontrun{
# Return data for current Hawthorn players
fetch_player_details("Hawthorn")
fetch_player_details("Adelaide", current = FALSE, comp = "AFLW")
fetch_player_details("GWS", current = TRUE, csource = "footywire")
}
}
\seealso{
\itemize{
\item \link{fetch_player_details_afl} for AFL.com data.
\item \link{fetch_player_details_footywire} for Footywire data.
\item \link{fetch_player_details_footywire} for AFL Tables data.
}
}
\concept{fetch player details functions}
| /man/fetch_player_details.Rd | no_license | jamespkav/fitzRoy | R | false | true | 2,423 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fetch-player-details.R
\name{fetch_player_details}
\alias{fetch_player_details}
\alias{fetch_player_details_afl}
\alias{fetch_player_details_afltables}
\alias{fetch_player_details_footywire}
\title{Fetch Player Details}
\usage{
fetch_player_details(
team = NULL,
current = TRUE,
comp = "AFLM",
source = "AFL",
...
)
fetch_player_details_afl(season, team = NULL, comp = "AFLM")
fetch_player_details_afltables(team = NULL)
fetch_player_details_footywire(team, current = TRUE)
}
\arguments{
\item{team}{team the player played for in the season for, defaults to NULL which returns all teams}
\item{current}{logical, return the current team list for the current calendar year or all historical data}
\item{comp}{One of "AFLM" (default) or "AFLW"}
\item{source}{One of "AFL" (default), "footywire", "afltables"}
\item{...}{Optional parameters passed onto various functions depending on source.}
\item{season}{Season in YYYY format}
}
\value{
A Tibble with the details of the relevant players.
}
\description{
\code{fetch_player_details} returns player details such as date of birth, debut
and other details. The exact details that are returned will depend on which
source is provided.
By default the source used will be the official AFL website.
\code{\link[=fetch_player_details_afl]{fetch_player_details_afl()}}, \code{\link[=fetch_player_details_afltables]{fetch_player_details_afltables()}} and \code{\link[=fetch_player_details_footywire]{fetch_player_details_footywire()}}
can be called directly and return data from the AFL website, AFL Tables and Footywire respectively.
The function will typically be used to return the current team lists. For historical data, you can use the \code{current} argument set to FALSE. This will return all historical data for AFL.com and Footywire data. AFLTables data will always return historical data.
}
\examples{
\dontrun{
# Return data for current Hawthorn players
fetch_player_details("Hawthorn")
fetch_player_details("Adelaide", current = FALSE, comp = "AFLW")
fetch_player_details("GWS", current = TRUE, csource = "footywire")
}
}
\seealso{
\itemize{
\item \link{fetch_player_details_afl} for AFL.com data.
\item \link{fetch_player_details_footywire} for Footywire data.
\item \link{fetch_player_details_footywire} for AFL Tables data.
}
}
\concept{fetch player details functions}
|
## plot3.R
## Read in household power consumption data,
## filter it two a subset of dates, and
## plot it according to spec
## Set path to file
file <- "~/Workspace/coursera/exdata-031/household_power_consumption.txt"
## Read in data
dat <- read.table(file, header = TRUE, sep = ";", na.strings = "?")
## Combine $Date and $Time fields (separated by " ")
## Set as Date/Time
## Add to data
Date_time <- paste(dat$Date, dat$Time, sep = " ")
Date_time <- strptime(Date_time, "%d/%m/%Y %H:%M:%S")
dat$Date_time <- Date_time
## Set $Date as a Date
dat$Date <- as.Date(dat$Date, "%d/%m/%Y")
## Create two filter variables
## to subset the data
date1 <- as.Date("2007-02-01")
date2 <- as.Date("2007-02-02")
## Subset the data to include
## the two dates (above)
subs <- dat[dat$Date %in% c(date1, date2), ]
## Open PNG device
png("plot3.png")
## Plot and add additional lines
plot(subs$Date_time,
subs$Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = "")
lines(subs$Date_time, subs$Sub_metering_2, col = "red")
lines(subs$Date_time, subs$Sub_metering_3, col = "blue")
## Build the legend
legend("topright",
pch = "-",
lwd = 2,
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"))
## Close PNG device
dev.off()
| /plot3.R | no_license | cuddlyhugbear/ExData_Plotting1 | R | false | false | 1,318 | r | ## plot3.R
## Read in household power consumption data,
## filter it two a subset of dates, and
## plot it according to spec
## Set path to file
file <- "~/Workspace/coursera/exdata-031/household_power_consumption.txt"
## Read in data
dat <- read.table(file, header = TRUE, sep = ";", na.strings = "?")
## Combine $Date and $Time fields (separated by " ")
## Set as Date/Time
## Add to data
Date_time <- paste(dat$Date, dat$Time, sep = " ")
Date_time <- strptime(Date_time, "%d/%m/%Y %H:%M:%S")
dat$Date_time <- Date_time
## Set $Date as a Date
dat$Date <- as.Date(dat$Date, "%d/%m/%Y")
## Create two filter variables
## to subset the data
date1 <- as.Date("2007-02-01")
date2 <- as.Date("2007-02-02")
## Subset the data to include
## the two dates (above)
subs <- dat[dat$Date %in% c(date1, date2), ]
## Open PNG device
png("plot3.png")
## Plot and add additional lines
plot(subs$Date_time,
subs$Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = "")
lines(subs$Date_time, subs$Sub_metering_2, col = "red")
lines(subs$Date_time, subs$Sub_metering_3, col = "blue")
## Build the legend
legend("topright",
pch = "-",
lwd = 2,
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"))
## Close PNG device
dev.off()
|
X <- read.table("/Users/mihai/Documents/MyWebSites/Claree/Rweb/cases/camip/camip.txt",
header=TRUE, sep="", na.strings="NA", dec=".", strip.white=TRUE)
aRweb="(<a href=\"http://localhost/Claree/Rweb\">Vous pouvez utiliser le système statique R</a>)"
q="Quelle est la moyenne de la variable"
for(i in 1:43){
cat(q," ", paste("V",i+1,sep=""),aRweb ,paste("{#",mean(X[i][X[i]>0]), ":0.01}",sep=""),"\n\n")
}
| /slides_adunivarie/quizz/quizzmaker-adintro_camipmeans.R | no_license | calciu/slides_ad | R | false | false | 411 | r |
X <- read.table("/Users/mihai/Documents/MyWebSites/Claree/Rweb/cases/camip/camip.txt",
header=TRUE, sep="", na.strings="NA", dec=".", strip.white=TRUE)
aRweb="(<a href=\"http://localhost/Claree/Rweb\">Vous pouvez utiliser le système statique R</a>)"
q="Quelle est la moyenne de la variable"
for(i in 1:43){
cat(q," ", paste("V",i+1,sep=""),aRweb ,paste("{#",mean(X[i][X[i]>0]), ":0.01}",sep=""),"\n\n")
}
|
# This is a play script for simulitis
initial_conditions <- function(n, speed, xlim = c(0,100), ylim = c(0,100), cell_size = 0.5) {
centers <- data.frame(x = runif(n,xlim[1],xlim[2]),
y = runif(n,ylim[1],ylim[2]))
# distances <- dist(centers)
directions <- runif(n,0,2*pi)
xy_directions <- data.frame(delta_x = speed * cos(directions),
delta_y = speed * sin(directions))
boundary_distance <- data.frame(
top = (ylim[2] - centers$y)^2,
left = (xlim[1] - centers$x)^2,
bottom = (ylim[1] - centers$y)^2,
right = (xlim[2] - centers$x)^2
)
infected <- rep("Healthy",n)
infected[sample(seq(n),size = 1)] <- "Infected"
state <- cbind(centers,xy_directions,infected)
simulitis_step <- list(state = state, xlim = xlim,
ylim = ylim)
class(simulitis_step) <- "simulitis"
return(simulitis_step)
}
previous_step <- initial_conditions(100,0.3)
plot.simulitis <- function(x) {
plot(x$state$x, x$state$y, col = c("black","red")[as.numeric(x$state$infected)],
pch = 19, xlim = x$xlim, ylim = x$ylim, xlab = "", ylab = "")
}
plot(previous_step)
simulitis_step <- function(previous_step) {
delta_x <- ifelse(previous_step$state$x + previous_step$state$delta_x < min(previous_step$xlim) |
previous_step$state$x + previous_step$state$delta_x > max(previous_step$xlim),
-previous_step$state$delta_x,previous_step$state$delta_x)
delta_y <- ifelse(previous_step$state$y + previous_step$state$delta_y < min(previous_step$ylim) |
previous_step$state$y + previous_step$state$delta_y > max(previous_step$ylim),
-previous_step$state$delta_y,previous_step$state$delta_y)
next_centers <- data.frame(x = previous_step$state$x + delta_x,
y = previous_step$state$y + delta_y)
xy_directions <- data.frame(delta_x = delta_x,
delta_y = delta_y)
# distances <- as.matrix(dist(next_centers))
simulitis_step <- list(
state = cbind(next_centers, xy_directions, infected = previous_step$state$infected),
xlim = previous_step$xlim,
ylim = previous_step$ylim
)
class(simulitis_step) <- "simulitis"
return(simulitis_step)
}
step_2 <- simulitis_step(previous_step)
plot(step_2)
n_steps <- 100
next_step <- initial_conditions(100,0.3)
df_out <- data.frame(
x = next_step$state$x,
y = next_step$state$y,
infected = next_step$state$infected,
t = 1
)
for(tt in seq(2,n_steps)) {
next_step <- simulitis_step(next_step)
df_out <- rbind(df_out,data.frame(
x = next_step$state$x,
y = next_step$state$y,
infected = next_step$state$infected,
t = tt
))
}
library(gganimate)
ggplot(df_out) +
geom_point(aes(x = x, y = y, color = infected)) +
transition_states(t, wrap = T) +
scale_color_discrete("") +
theme_bw()
| /simulitis/simulitis.R | no_license | danieladamspencer/visualization-fiesta | R | false | false | 2,877 | r | # This is a play script for simulitis
initial_conditions <- function(n, speed, xlim = c(0,100), ylim = c(0,100), cell_size = 0.5) {
centers <- data.frame(x = runif(n,xlim[1],xlim[2]),
y = runif(n,ylim[1],ylim[2]))
# distances <- dist(centers)
directions <- runif(n,0,2*pi)
xy_directions <- data.frame(delta_x = speed * cos(directions),
delta_y = speed * sin(directions))
boundary_distance <- data.frame(
top = (ylim[2] - centers$y)^2,
left = (xlim[1] - centers$x)^2,
bottom = (ylim[1] - centers$y)^2,
right = (xlim[2] - centers$x)^2
)
infected <- rep("Healthy",n)
infected[sample(seq(n),size = 1)] <- "Infected"
state <- cbind(centers,xy_directions,infected)
simulitis_step <- list(state = state, xlim = xlim,
ylim = ylim)
class(simulitis_step) <- "simulitis"
return(simulitis_step)
}
previous_step <- initial_conditions(100,0.3)
plot.simulitis <- function(x) {
plot(x$state$x, x$state$y, col = c("black","red")[as.numeric(x$state$infected)],
pch = 19, xlim = x$xlim, ylim = x$ylim, xlab = "", ylab = "")
}
plot(previous_step)
simulitis_step <- function(previous_step) {
delta_x <- ifelse(previous_step$state$x + previous_step$state$delta_x < min(previous_step$xlim) |
previous_step$state$x + previous_step$state$delta_x > max(previous_step$xlim),
-previous_step$state$delta_x,previous_step$state$delta_x)
delta_y <- ifelse(previous_step$state$y + previous_step$state$delta_y < min(previous_step$ylim) |
previous_step$state$y + previous_step$state$delta_y > max(previous_step$ylim),
-previous_step$state$delta_y,previous_step$state$delta_y)
next_centers <- data.frame(x = previous_step$state$x + delta_x,
y = previous_step$state$y + delta_y)
xy_directions <- data.frame(delta_x = delta_x,
delta_y = delta_y)
# distances <- as.matrix(dist(next_centers))
simulitis_step <- list(
state = cbind(next_centers, xy_directions, infected = previous_step$state$infected),
xlim = previous_step$xlim,
ylim = previous_step$ylim
)
class(simulitis_step) <- "simulitis"
return(simulitis_step)
}
step_2 <- simulitis_step(previous_step)
plot(step_2)
n_steps <- 100
next_step <- initial_conditions(100,0.3)
df_out <- data.frame(
x = next_step$state$x,
y = next_step$state$y,
infected = next_step$state$infected,
t = 1
)
for(tt in seq(2,n_steps)) {
next_step <- simulitis_step(next_step)
df_out <- rbind(df_out,data.frame(
x = next_step$state$x,
y = next_step$state$y,
infected = next_step$state$infected,
t = tt
))
}
library(gganimate)
ggplot(df_out) +
geom_point(aes(x = x, y = y, color = infected)) +
transition_states(t, wrap = T) +
scale_color_discrete("") +
theme_bw()
|
#' Show the HTML content
#'
#' Show the generated HTML content in the
#' R console.
#' @export
show_html <- function(x) {
html_lines <- generate_html_lines(x)
message("Formatted HTML content:")
cat(html_lines)
invisible(x)
}
| /R/show_html.R | permissive | rich-iannone/hyper | R | false | false | 237 | r | #' Show the HTML content
#'
#' Show the generated HTML content in the
#' R console.
#' @export
show_html <- function(x) {
html_lines <- generate_html_lines(x)
message("Formatted HTML content:")
cat(html_lines)
invisible(x)
}
|
###HW #2 Andrew Wong (2 Sample Inference)
#1: data in file "navajo.csv" is the number of traditional Navaho hogans in a given district and the number of modern homes in the same district
# for a random sample of 8 districts on the Navajo reservation. Test whether the mean number of Navajo hogans is equal to the mean number of modern homes on
#the reservation.
#2: data in file "trucks.csv" is the retail price for models of pickups in two different US cities.
#Prices are in thousands of dollars. Test wheter the mean retail prices of pickups is the same in each city.
#Due 2/14
#1: Paired test since data for both modern house and navajo comes from same district, but from 8 different districts
library(readr)
navajo <- read_csv("C:/Users/Andrew/Downloads/navajo.csv")
View(navajo)
d <- navajo$HOGANS - navajo$MODERN
#Check assumptions
#Check normality
qqnorm(d) #Looks normalish, but might be problems at the ends
shapiro.test(d) #p value is .4597 is can't say we reject H0 (Normal)
#Look for trends
plot(d) #very few points, but I can't see any trend
#Let's do one sample t-test
t.test(d, alternative = "two.sided", mu = 0) #pvalue is .4564 which means we don't reject that means are the same
mean(navajo$HOGANS) #25.25
mean(navajo$MODERN) #31.25
#Let's double check with 2 sample paired test
t.test(navajo$HOGANS, navajo$MODERN, alternative = "two.sided", paired = T) #Got same p value
#Just to make sure, let's also check the not paired t test too
t.test(navajo$HOGANS, navajo$MODERN, alternative = "two.sided", paired = F) #p value is .5303, which means still don't reject H0
#Conclusion: equal means of Hogans and Modern houses. The process was that I noticed the data was paired. Then I checked for normality and checked to
#see if there was any patterns in the data. Noramlity was good, and there were no patterns. This meant I could use a one sample t-test on the differences of Hogans
# and Modern houses for a given district. I did the paired test and got a p value of .4564, which meant not to reject. Then just for fun, I did a not paired t-test
#and still go the same answer of not rejecting H0.
#2: data in file "trucks.csv" is the retail price for models of pickups in two different US cities.
#Prices are in thousands of dollars. Test wheter the mean retail prices of pickups is the same in each city.
trucks <- read.csv("C:/Users/Andrew/Downloads/trucks.csv", header=TRUE)
View(trucks)
#Check assumptions
x1 <- trucks$X1
x2 <- trucks$X2
#Get rid of NA's since data was weird and had a row of NA's
x1 <- x1[!is.na(x1)]
x2 <- x2[!is.na(x2)]
x1
x2
##Check assumptions of our test
#Check normality
qqnorm(x1) #No line, looks like a weird spiral
qqnorm(x2) #This looks okay for normal
shapiro.test(x1) #p value is .3019, which means okay to assume normal
shapiro.test(x2) #p value is .6921 which means okay to assume normal
#Check for trends
plot(x1) #No pattern
plot(x2) #No pattern
cor(x1,x2) #Also two city's prices are not highly correlated
##Check assumption of equal variances
var(x1)
var(x2) #Very simliar, so can assume equal variances
#If we need to test
var.test(x1,x2) #H0 is that they are qual variances. p value is .95 so confirms variances are equal
#Since normality and no patterns (ind) check out we can do t-test
t.test(x1,x2, alternative = "two.sided", paired = F, var.equal = T) #p value is .6167 which means can't reject
#Conclusion: Pickup prices between the two cities have the same mean. The first thing I had to do was get rid of the NA's that appeared in the data
#when I imported. Then I checked each x1 and x2 on if normal and if each one had trends. Both checked out for normality and no pattern. Then I check for
#equal variances, and the variances were similar so I assumed equal variances. Just in case I also did the var.test() function and got a p value of .95.
#This meant I can't rejec H0 that the variances of x1 and x2 are equal. Because of all this, I was able to use a two sample t-test. I got a p-value of
#.6167, which meant I couldn't reject.
| /ANOVA - HW#2 - Andrew Wong.R | no_license | awong026/Two.sample.inference.analysis | R | false | false | 4,043 | r | ###HW #2 Andrew Wong (2 Sample Inference)
#1: data in file "navajo.csv" is the number of traditional Navaho hogans in a given district and the number of modern homes in the same district
# for a random sample of 8 districts on the Navajo reservation. Test whether the mean number of Navajo hogans is equal to the mean number of modern homes on
#the reservation.
#2: data in file "trucks.csv" is the retail price for models of pickups in two different US cities.
#Prices are in thousands of dollars. Test wheter the mean retail prices of pickups is the same in each city.
#Due 2/14
#1: Paired test since data for both modern house and navajo comes from same district, but from 8 different districts
library(readr)
navajo <- read_csv("C:/Users/Andrew/Downloads/navajo.csv")
View(navajo)
d <- navajo$HOGANS - navajo$MODERN
#Check assumptions
#Check normality
qqnorm(d) #Looks normalish, but might be problems at the ends
shapiro.test(d) #p value is .4597 is can't say we reject H0 (Normal)
#Look for trends
plot(d) #very few points, but I can't see any trend
#Let's do one sample t-test
t.test(d, alternative = "two.sided", mu = 0) #pvalue is .4564 which means we don't reject that means are the same
mean(navajo$HOGANS) #25.25
mean(navajo$MODERN) #31.25
#Let's double check with 2 sample paired test
t.test(navajo$HOGANS, navajo$MODERN, alternative = "two.sided", paired = T) #Got same p value
#Just to make sure, let's also check the not paired t test too
t.test(navajo$HOGANS, navajo$MODERN, alternative = "two.sided", paired = F) #p value is .5303, which means still don't reject H0
#Conclusion: equal means of Hogans and Modern houses. The process was that I noticed the data was paired. Then I checked for normality and checked to
#see if there was any patterns in the data. Noramlity was good, and there were no patterns. This meant I could use a one sample t-test on the differences of Hogans
# and Modern houses for a given district. I did the paired test and got a p value of .4564, which meant not to reject. Then just for fun, I did a not paired t-test
#and still go the same answer of not rejecting H0.
#2: data in file "trucks.csv" is the retail price for models of pickups in two different US cities.
#Prices are in thousands of dollars. Test wheter the mean retail prices of pickups is the same in each city.
trucks <- read.csv("C:/Users/Andrew/Downloads/trucks.csv", header=TRUE)
View(trucks)
#Check assumptions
x1 <- trucks$X1
x2 <- trucks$X2
#Get rid of NA's since data was weird and had a row of NA's
x1 <- x1[!is.na(x1)]
x2 <- x2[!is.na(x2)]
x1
x2
##Check assumptions of our test
#Check normality
qqnorm(x1) #No line, looks like a weird spiral
qqnorm(x2) #This looks okay for normal
shapiro.test(x1) #p value is .3019, which means okay to assume normal
shapiro.test(x2) #p value is .6921 which means okay to assume normal
#Check for trends
plot(x1) #No pattern
plot(x2) #No pattern
cor(x1,x2) #Also two city's prices are not highly correlated
##Check assumption of equal variances
var(x1)
var(x2) #Very simliar, so can assume equal variances
#If we need to test
var.test(x1,x2) #H0 is that they are qual variances. p value is .95 so confirms variances are equal
#Since normality and no patterns (ind) check out we can do t-test
t.test(x1,x2, alternative = "two.sided", paired = F, var.equal = T) #p value is .6167 which means can't reject
#Conclusion: Pickup prices between the two cities have the same mean. The first thing I had to do was get rid of the NA's that appeared in the data
#when I imported. Then I checked each x1 and x2 on if normal and if each one had trends. Both checked out for normality and no pattern. Then I check for
#equal variances, and the variances were similar so I assumed equal variances. Just in case I also did the var.test() function and got a p value of .95.
#This meant I can't rejec H0 that the variances of x1 and x2 are equal. Because of all this, I was able to use a two sample t-test. I got a p-value of
#.6167, which meant I couldn't reject.
|
/CH-10/CH-10-02.R | no_license | Erfanit64/R-programming-with-applications-to-financial-quantitive-analysis | R | false | false | 2,546 | r | ||
## Load all custom functions, sound Libraries
library(tuneR)
library(seewave)
## Custom paste function(I got tired of typing in all these options).
pw <- function(x1,y1) pastew(x1, y1, at = "start", output = "Wave")
cw <- function(sound, t1, t2) extractWave(sound, from = t1, to = t2, interact = FALSE, xunit = "time")
## Find some points on the line (npts, to be specific), and show their coordinate
locatr <- function(npts = 2){
locs <- locator(n = npts)
abline(v = locs$x)
locs$x
}
## Create a loop and playback function
loopr <- function(thebeat, ntimes = 1, hear = TRUE, giveback = TRUE){
for(i in 1:ntimes) {if(i == 1) beat <- thebeat
else { beat <- do.call(bind, c(beat, thebeat))}}
if(hear == TRUE) play(beat)
if(giveback) beat
}
## make a function that easily changes the time on a sample
##' @title ct
##' @param wavObj ## A wave object to change the time on
##' @param tnew ## The new time you'd like (in seconds)
##' @return The same wave object, but with the time changed
##' @author Benjamin Rogers
ct <- function(wavObj, tnew){
## get old time.
t.old <- duration(wavObj)
wavObj@samp.rate <- wavObj@samp.rate * t.old/tnew
wavObj
}
## Not sure if necessary, but oh well.
setWavPlayer('afplay')
| /PreReqs.R | no_license | kingofkung/SampleR | R | false | false | 1,282 | r | ## Load all custom functions, sound Libraries
library(tuneR)
library(seewave)
## Custom paste function(I got tired of typing in all these options).
pw <- function(x1,y1) pastew(x1, y1, at = "start", output = "Wave")
cw <- function(sound, t1, t2) extractWave(sound, from = t1, to = t2, interact = FALSE, xunit = "time")
## Find some points on the line (npts, to be specific), and show their coordinate
locatr <- function(npts = 2){
locs <- locator(n = npts)
abline(v = locs$x)
locs$x
}
## Create a loop and playback function
loopr <- function(thebeat, ntimes = 1, hear = TRUE, giveback = TRUE){
for(i in 1:ntimes) {if(i == 1) beat <- thebeat
else { beat <- do.call(bind, c(beat, thebeat))}}
if(hear == TRUE) play(beat)
if(giveback) beat
}
## make a function that easily changes the time on a sample
##' @title ct
##' @param wavObj ## A wave object to change the time on
##' @param tnew ## The new time you'd like (in seconds)
##' @return The same wave object, but with the time changed
##' @author Benjamin Rogers
ct <- function(wavObj, tnew){
## get old time.
t.old <- duration(wavObj)
wavObj@samp.rate <- wavObj@samp.rate * t.old/tnew
wavObj
}
## Not sure if necessary, but oh well.
setWavPlayer('afplay')
|
###################################################
### code chunk number 2: eEvents1
###################################################
eEvents1<-function(lambda=1, eta=0, gamma=1, R=1, S=NULL,
T=2, Tfinal=NULL, minfup=0, simple=TRUE)
{
if (is.null(Tfinal))
{ Tfinal <- T
minfupia <- minfup
}
else minfupia <- max(0, minfup-(Tfinal - T))
nlambda <- length(lambda)
if (length(eta)==1 & nlambda > 1)
eta <- array(eta,nlambda)
T1 <- cumsum(S)
T1 <- c(T1[T1<T],T)
T2 <- T - cumsum(R)
T2[T2 < minfupia] <- minfupia
i <- 1:length(gamma)
gamma[i>length(unique(T2))] <- 0
T2 <- unique(c(T,T2[T2 > 0]))
T3 <- sort(unique(c(T1,T2)))
if (sum(R) >= T) T2 <- c(T2,0)
nperiod <- length(T3)
s <- T3-c(0,T3[1:(nperiod-1)])
lam <- array(lambda[nlambda],nperiod)
et <- array(eta[nlambda],nperiod)
gam <- array(0,nperiod)
for(i in length(T1):1)
{ indx <- T3<=T1[i]
lam[indx]<-lambda[i]
et[indx]<-eta[i]
}
for(i in min(length(gamma)+1,length(T2)):2)
gam[T3>T2[i]]<-gamma[i-1]
q <- exp(-(lam+et)*s)
Q <- cumprod(q)
indx<-1:(nperiod-1)
Qm1 <- c(1,Q[indx])
p <- lam/(lam+et)*Qm1*(1-q)
p[is.nan(p)] <- 0
P <- cumsum(p)
B <- gam/(lam+et)*lam*(s-(1-q)/(lam+et))
B[is.nan(B)]<-0
A <- c(0,P[indx])*gam*s+Qm1*B
if (!simple)
return(list(lambda=lambda, eta=eta, gamma=gamma, R=R, S=S,
T=T, Tfinal=Tfinal, minfup=minfup, d=sum(A),
n=sum(gam*s), q=q,Q=Q,p=p,P=P,B=B,A=A,T1=T1,
T2=T2,T3=T3,lam=lam,et=et,gam=gam))
else
return(list(lambda=lambda, eta=eta, gamma=gamma, R=R, S=S,
T=T, Tfinal=Tfinal, minfup=minfup, d=sum(A),
n=sum(gam*s)))
}
###################################################
### code chunk number 4: eEvents
###################################################
eEvents<-function(lambda=1, eta=0, gamma=1, R=1, S=NULL, T=2,
Tfinal=NULL, minfup=0, digits=4)
{ if (is.null(Tfinal))
{ if (minfup >= T)
stop("Minimum follow-up greater than study duration.")
Tfinal <- T
minfupia <- minfup
}
else minfupia <- max(0, minfup-(Tfinal - T))
if (!is.matrix(lambda))
lambda <- matrix(lambda, nrow=length(lambda))
if (!is.matrix(eta))
eta <- matrix(eta,nrow=nrow(lambda),ncol=ncol(lambda))
if (!is.matrix(gamma))
gamma<-matrix(gamma,nrow=length(R),ncol=ncol(lambda))
n <- array(0,ncol(lambda))
d <- n
for(i in 1:ncol(lambda))
{ a <- eEvents1(lambda=lambda[,i],eta=eta[,i],
gamma=gamma[,i],R=R,S=S,T=T,
Tfinal=Tfinal, minfup=minfup)
n[i]<-a$n
d[i]<-a$d
}
T1 <- cumsum(S)
T1 <- unique(c(0,T1[T1<T],T))
nper <- length(T1)-1
names1 <- round(T1[1:nper],digits)
namesper <- paste("-",round(T1[2:(nper+1)],digits),sep="")
namesper <- paste(names1,namesper,sep="")
if (nper < dim(lambda)[1])
lambda <- matrix(lambda[1:nper,],nrow=nper)
if (nper < dim(eta)[1])
eta <- matrix(eta[1:nper,],nrow=nper)
rownames(lambda) <- namesper
rownames(eta) <- namesper
colnames(lambda) <- paste("Stratum",1:ncol(lambda))
colnames(eta) <- paste("Stratum",1:ncol(eta))
T2 <- cumsum(R)
T2[T - T2 < minfupia] <- T - minfupia
T2 <- unique(c(0,T2))
nper <- length(T2)-1
names1 <- round(c(T2[1:nper]),digits)
namesper <- paste("-",round(T2[2:(nper+1)],digits),sep="")
namesper <- paste(names1,namesper,sep="")
if (nper < length(gamma))
gamma <- matrix(gamma[1:nper,],nrow=nper)
rownames(gamma) <- namesper
colnames(gamma) <- paste("Stratum",1:ncol(gamma))
x <- list(lambda=lambda, eta=eta, gamma=gamma, R=R,
S=S, T=T, Tfinal=Tfinal,
minfup=minfup, d=d, n=n, digits=digits)
class(x) <- "eEvents"
return(x)
}
###################################################
### code chunk number 5: periods
###################################################
periods <- function(S, T, minfup, digits)
{ periods <- cumsum(S)
if (length(periods)==0) periods <- max(0, T - minfup)
else
{ maxT <- max(0,min(T - minfup, max(periods)))
periods <- periods[periods <= maxT]
if (max(periods) < T - minfup)
periods <- c(periods, T - minfup)
}
nper <- length(periods)
names1 <- c(0, round(periods[1:(nper-1)],digits))
names <- paste("-",periods,sep="")
names <- paste(names1,names,sep="")
return(list(periods,names))
}
###################################################
### code chunk number 6: print.eEvents
###################################################
print.eEvents <- function(x,digits=4,...){
if (class(x) != "eEvents")
stop("print.eEvents: primary argument must have class eEvents")
cat("Study duration: Tfinal=",
round(x$Tfinal,digits), "\n", sep="")
cat("Analysis time: T=",
round(x$T,digits), "\n", sep="")
cat("Accrual duration: ",
round(min(x$T - max(0, x$minfup-(x$Tfinal - x$T)),
sum(x$R)),digits), "\n", sep="")
cat("Min. end-of-study follow-up: minfup=",
round(x$minfup,digits), "\n", sep="")
cat("Expected events (total): ",
round(sum(x$d),digits), "\n",sep="")
if (length(x$d)>1)
{ cat("Expected events by stratum: d=",
round(x$d[1],digits))
for(i in 2:length(x$d))
cat(paste("",round(x$d[i],digits)))
cat("\n")
}
cat("Expected sample size (total): ",
round(sum(x$n),digits), "\n", sep="")
if (length(x$n)>1)
{ cat("Sample size by stratum: n=",
round(x$n[1],digits))
for(i in 2:length(x$n))
cat(paste("",round(x$n[i],digits)))
cat("\n")
}
nstrata <- dim(x$lambda)[2]
cat("Number of strata: ",
nstrata, "\n", sep="")
cat("Accrual rates:\n")
print(round(x$gamma,digits))
cat("Event rates:\n")
print(round(x$lambda,digits))
cat("Censoring rates:\n")
print(round(x$eta,digits))
return(x)
}
###################################################
### code chunk number 12: nameperiod
###################################################
"nameperiod" <- function(R, digits=2)
{ if (length(R)==1) return(paste("0-",round(R,digits),sep=""))
R0 <- c(0,R[1:(length(R)-1)])
return(paste(round(R0,digits),"-",round(R,digits),sep=""))
}
###################################################
### code chunk number 14: LFPWE
###################################################
LFPWE <- function(alpha=.025, sided=1, beta=.1,
lambdaC=log(2) / 6, hr=.5, hr0=1, etaC=0, etaE=0,
gamma=1, ratio=1, R=18, S=NULL, T=24, minfup=NULL)
{ # set up parameters
zalpha <- -qnorm(alpha/sided)
zbeta <- -qnorm(beta)
if (is.null(minfup)) minfup <- max(0,T-sum(R))
if (length(R)==1) {R <- T-minfup
}else if (sum(R) != T-minfup)
{ cR<-cumsum(R)
nR<-length(R)
if (cR[length(cR)] < T - minfup) {cR[length(cR)]<-T-minfup
}else
{ cR[cR>T-minfup]<-T-minfup
cR <- unique(cR)
}
if (length(cR)>1) {R <- cR-c(0,cR[1:(length(cR)-1)])
}else R <- cR
if (nR != length(R))
{ if (is.vector(gamma)) {gamma <- gamma[1:length(R)]
}else gamma<-gamma[1:length(R),]
}
}
ngamma <- length(R)
if (is.null(S)) {nlambda <- 1
}else nlambda <- length(S) + 1
Qe <- ratio / (1 + ratio)
Qc <- 1 - Qe
# compute H0 failure rates as average of control, experimental
if (length(ratio)==1){
lambdaC0 <- (1 + hr * ratio) / (1 + hr0 * ratio) * lambdaC
gammaC <- gamma*Qc
gammaE <- gamma*Qe
}else{
lambdaC0 <- lambdaC %*% diag((1 + hr * ratio) / (1 + hr0 * ratio))
gammaC <- gamma%*%diag(Qc)
gammaE <- gamma%*%diag(Qe)
}
# do computations
eDC0 <- sum(eEvents(lambda=lambdaC0, eta=etaC, gamma=gammaC,
R=R, S=S, T=T, minfup=minfup)$d)
eDE0 <- sum(eEvents(lambda=lambdaC0 * hr0, eta=etaE, gamma=gammaE,
R=R, S=S, T=T, minfup=minfup)$d)
eDC <- eEvents(lambda=lambdaC, eta=etaC, gamma=gammaC,
R=R, S=S, T=T, minfup=minfup)
eDE <- eEvents(lambda=lambdaC * hr, eta=etaE, gamma=gammaE,
R=R, S=S, T=T, minfup=minfup)
n <- ((zalpha * sqrt(1 / eDC0 + 1 / eDE0) +
zbeta * sqrt(1 / sum(eDC$d) + 1 / sum(eDE$d))
) / log(hr / hr0))^2
mx <- sum(eDC$n + eDE$n)
rval <- list(alpha=alpha, sided=sided, beta=beta, power=1-beta,
lambdaC=lambdaC, etaC=etaC, etaE=etaE, gamma=n * gamma,
ratio=ratio, R=R, S=S, T=T, minfup=minfup,
hr=hr, hr0=hr0, n=n * mx, d=n * sum(eDC$d + eDE$d),
eDC=eDC$d*n, eDE=eDE$d*n, eDC0=eDC0*n, eDE0=eDE0*n,
eNC=eDC$n*n, eNE=eDE$n*n, variable="Accrual rate")
class(rval) <- "nSurv"
return(rval)
}
print.nSurv<-function(x,digits=4,...){
if (class(x) != "nSurv")
stop("Primary argument must have class nSurv")
x$digits<-digits
x$sided <- 1
cat("Fixed design, two-arm trial with time-to-event\n")
cat("outcome (Lachin and Foulkes, 1986).\n")
cat("Solving for: ",x$variable,"\n")
cat("Hazard ratio H1/H0=",
round(x$hr,digits),
"/", round(x$hr0,digits),"\n",sep="")
cat("Study duration: T=",
round(x$T,digits), "\n", sep="")
cat("Accrual duration: ",
round(x$T-x$minfup,digits),"\n",sep="")
cat("Min. end-of-study follow-up: minfup=",
round(x$minfup,digits), "\n", sep="")
cat("Expected events (total, H1): ",
round(x$d,digits), "\n",sep="")
cat("Expected sample size (total): ",
round(x$n,digits), "\n", sep="")
enrollper <- periods(x$S, x$T, x$minfup, x$digits)
cat("Accrual rates:\n")
print(round(x$gamma,digits))
cat("Control event rates (H1):\n")
print(round(x$lambda,digits))
if (max(abs(x$etaC-x$etaE))==0)
{ cat("Censoring rates:\n")
print(round(x$etaC,digits))
}
else
{ cat("Control censoring rates:\n")
print(round(x$etaC,digits))
cat("Experimental censoring rates:\n")
print(round(x$etaE,digits))
}
cat("Power: 100*(1-beta)=",
round((1-x$beta)*100,digits), "%\n",sep="")
cat("Type I error (", x$sided,
"-sided): 100*alpha=",
100*x$alpha, "%\n", sep="")
if (min(x$ratio==1)==1)
cat("Equal randomization: ratio=1\n")
else cat("Randomization (Exp/Control): ratio=",
x$ratio, "\n")
}
###################################################
### code chunk number 19: KTZ
###################################################
KTZ <- function(x=NULL, minfup=NULL, n1Target=NULL,
lambdaC=log(2) / 6, etaC=0, etaE=0,
gamma=1, ratio=1, R=18, S=NULL, beta=.1,
alpha=.025, sided=1, hr0=1, hr=.5, simple=TRUE)
{ zalpha<- -qnorm(alpha/sided)
Qc <- 1/(1+ratio)
Qe <- 1 - Qc
# set minimum follow-up to x if that is missing and x is given
if (!is.null(x) && is.null(minfup))
{ minfup <- x
if (sum(R)==Inf)
stop("If minimum follow-up is sought, enrollment duration must be finite")
T <- sum(R)+minfup
variable <- "Follow-up duration"
}
else if (!is.null(x)&&!is.null(minfup))
{ # otherwise, if x is given, set it to accrual duration
T <- x+minfup
R[length(R)]<-Inf
variable <- "Accrual duration"
}
else
{ # otherwise, set follow-up time to accrual plus follow-up
T <- sum(R) + minfup
variable <- "Power"
}
# compute H0 failure rates as average of control, experimental
if (length(ratio)==1){
lambdaC0 <- (1 + hr * ratio) / (1 + hr0 * ratio) * lambdaC
gammaC <- gamma*Qc
gammaE <- gamma*Qe
}else{
lambdaC0 <- lambdaC %*% diag((1 + hr * ratio) / (1 + hr0 * ratio))
gammaC <- gamma%*%diag(Qc)
gammaE <- gamma%*%diag(Qe)
}
# do computations
eDC <- eEvents(lambda=lambdaC, eta=etaC, gamma=gammaC,
R=R, S=S, T=T, minfup=minfup)
eDE <- eEvents(lambda=lambdaC * hr, eta=etaE, gamma=gammaE,
R=R, S=S, T=T, minfup=minfup)
# if this is all that is needed, return difference
# from targeted number of events
if (simple && !is.null(n1Target))
return(sum(eDC$d+eDE$d)-n1Target)
eDC0 <- eEvents(lambda=lambdaC0, eta=etaC, gamma=gammaC,
R=R, S=S, T=T, minfup=minfup)
eDE0 <- eEvents(lambda=lambdaC0 * hr0, eta=etaE, gamma=gammaE,
R=R, S=S, T=T, minfup=minfup)
# compute Z-value related to power from power equation
zb <- (log(hr0 / hr) -
zalpha * sqrt(1 / sum(eDC0$d) + 1 / sum(eDE0$d)))/
sqrt(1 / sum(eDC$d) + 1 / sum(eDE$d))
# if that is all that is needed, return difference from
# targeted value
if (simple)
{ if (!is.null(beta)) return(zb + qnorm(beta))
else return(pnorm(-zb))
}
# compute power
power <- pnorm(zb)
beta <- 1-power
# set accrual period durations
if (sum(R) != T-minfup)
{ if (length(R)==1) R <- T-minfup
else
{ nR <- length(R)
cR <- cumsum(R)
cR[cR>T-minfup] <- T - minfup
cR <- unique(cR)
cR[length(R)] <- T - minfup
if (length(cR)==1) R <- cR
else R <- cR - c(0,cR[1:(length(cR)-1)])
if (length(R) != nR)
{ gamma <- matrix(gamma[1:length(R),], nrow=length(R))
gdim <- dim(gamma)
}
}
}
rval <- list(alpha=alpha, sided=sided, beta=beta, power=power,
lambdaC=lambdaC, etaC=etaC, etaE=etaE,
gamma=gamma, ratio=ratio, R=R, S=S, T=T,
minfup=minfup, hr=hr, hr0=hr0, n=sum(eDC$n + eDE$n),
d=sum(eDC$d + eDE$d), tol=NULL, eDC=eDC$d, eDE=eDE$d,
eDC0=eDC0$d, eDE0=eDE0$d, eNC=eDC$n, eNE=eDE$n,
variable=variable)
class(rval)<-"nSurv"
return(rval)
}
###################################################
### code chunk number 21: KT
###################################################
KT <- function(alpha=.025, sided=1, beta=.1,
lambdaC=log(2) / 6, hr=.5, hr0=1, etaC=0, etaE=0,
gamma=1, ratio=1, R=18, S=NULL, minfup=NULL,
n1Target=NULL, tol = .Machine$double.eps^0.25)
{ # set up parameters
ngamma <- length(R)
if (is.null(S)) {nlambda <- 1
}else nlambda <- length(S) + 1
Qe <- ratio / (1 + ratio)
Qc <- 1 - Qe
if (!is.matrix(lambdaC)) lambdaC <- matrix(lambdaC)
ldim <- dim(lambdaC)
nstrata <- ldim[2]
nlambda <- ldim[1]
etaC <- matrix(etaC,nrow=nlambda,ncol=nstrata)
etaE <- matrix(etaE,nrow=nlambda,ncol=nstrata)
if (!is.matrix(gamma)) gamma <- matrix(gamma)
gdim <- dim(gamma)
eCdim <- dim(etaC)
eEdim <- dim(etaE)
# search for trial duration needed to achieve desired power
if (is.null(minfup))
{ if (sum(R)==Inf){
stop("Enrollment duration must be specified as finite")}
left <- KTZ(.01, lambdaC=lambdaC, n1Target=n1Target,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, beta=beta, alpha=alpha, sided=sided,
hr0=hr0, hr=hr)
right <- KTZ(1000, lambdaC=lambdaC, n1Target=n1Target,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, beta=beta, alpha=alpha, sided=sided,
hr0=hr0, hr=hr)
if (left>0) stop("Enrollment duration over-powers trial")
if (right<0) stop("Enrollment duration insufficient to power trial")
y <- uniroot(f=KTZ,interval=c(.01,10000), lambdaC=lambdaC,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, beta=beta, alpha=alpha, sided=sided,
hr0=hr0, hr=hr, tol=tol, n1Target=n1Target)
minfup <- y$root
xx<-KTZ(x=y$root,lambdaC=lambdaC,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, minfup=NULL, beta=beta, alpha=alpha,
sided=sided, hr0=hr0, hr=hr, simple=F)
xx$tol <- tol
return(xx)
}else
{ y <- uniroot(f=KTZ, interval=minfup+c(.01,10000), lambdaC=lambdaC,
n1Target=n1Target,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, minfup=minfup, beta=beta,
alpha=alpha, sided=sided, hr0=hr0, hr=hr, tol=tol)
xx <- KTZ(x=y$root,lambdaC=lambdaC,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, minfup=minfup, beta=beta, alpha=alpha,
sided=sided, hr0=hr0, hr=hr, simple=F)
xx$tol<-tol
return(xx)
}
}
###################################################
### code chunk number 25: nSurv
###################################################
nSurv <- function(lambdaC=log(2)/6, hr=.6, hr0=1, eta = 0, etaE=NULL,
gamma=1, R=12, S=NULL, T=NULL, minfup = NULL, ratio = 1,
alpha = 0.025, beta = 0.10, sided = 1, tol = .Machine$double.eps^0.25)
{ if (is.null(etaE)) etaE<-eta
# set up rates as matrices with row and column names
# default is 1 stratum if lambdaC not input as matrix
if (is.vector(lambdaC)) lambdaC <- matrix(lambdaC)
ldim <- dim(lambdaC)
nstrata <- ldim[2]
nlambda <- ldim[1]
rownames(lambdaC) <- paste("Period", 1:nlambda)
colnames(lambdaC) <- paste("Stratum", 1:nstrata)
etaC <- matrix(eta,nrow=nlambda,ncol=nstrata)
etaE <- matrix(etaE,nrow=nlambda,ncol=nstrata)
if (!is.matrix(gamma)) gamma <- matrix(gamma)
gdim <- dim(gamma)
eCdim <- dim(etaC)
eEdim <- dim(etaE)
if (is.null(minfup) || is.null(T))
xx<-KT(lambdaC=lambdaC, hr=hr, hr0=hr0, etaC=etaC, etaE=etaE,
gamma=gamma, R=R, S=S, minfup = minfup, ratio = ratio,
alpha=alpha, sided=sided, beta = beta, tol = tol)
else if (is.null(beta))
xx<-KTZ(lambdaC=lambdaC, hr=hr, hr0=hr0, etaC=etaC, etaE=etaE,
gamma=gamma, R=R, S=S, minfup = minfup, ratio = ratio,
alpha=alpha, sided=sided, beta = beta, simple=F)
else
xx<-LFPWE(lambdaC=lambdaC, hr=hr, hr0=hr0, etaC=etaC, etaE=etaE,
gamma=gamma, R=R, S=S, T=T, minfup = minfup, ratio = ratio,
alpha=alpha, sided=sided, beta = beta)
nameR<-nameperiod(cumsum(xx$R))
stratnames <- paste("Stratum",1:ncol(xx$lambdaC))
if (is.null(xx$S)) nameS<-"0-Inf"
else nameS <- nameperiod(cumsum(c(xx$S,Inf)))
rownames(xx$lambdaC)<-nameS
colnames(xx$lambdaC)<-stratnames
rownames(xx$etaC)<-nameS
colnames(xx$etaC)<-stratnames
rownames(xx$etaE)<-nameS
colnames(xx$etaE)<-stratnames
rownames(xx$gamma)<-nameR
colnames(xx$gamma)<-stratnames
return(xx)
}
###################################################
### code chunk number 33: gsnSurv
###################################################
gsnSurv <- function(x,nEvents)
{ if (x$variable=="Accrual rate")
{ Ifct <- nEvents/x$d
rval <- list(lambdaC=x$lambdaC, etaC=x$etaC, etaE=x$etaE,
gamma=x$gamma*Ifct, ratio=x$ratio, R=x$R, S=x$S, T=x$T,
minfup=x$minfup, hr=x$hr, hr0=x$hr0, n=x$n*Ifct, d=nEvents,
eDC=x$eDC*Ifct,eDE=x$eDE*Ifct,eDC0=x$eDC0*Ifct,
eDE0=x$eDE0*Ifct,eNC=x$eNC*Ifct,eNE=x$eNE*Ifct,
variable=x$variable)
}
else if (x$variable=="Accrual duration")
{ y<-KT(n1Target=nEvents,minfup=x$minfup,lambdaC=x$lambdaC,etaC=x$etaC,
etaE=x$etaE,R=x$R,S=x$S,hr=x$hr,hr0=x$hr0,gamma=x$gamma,
ratio=x$ratio,tol=x$tol)
rval <- list(lambdaC=x$lambdaC, etaC=x$etaC, etaE=x$etaE,
gamma=x$gamma, ratio=x$ratio, R=y$R, S=x$S, T=y$T,
minfup=y$minfup, hr=x$hr, hr0=x$hr0, n=y$n, d=nEvents,
eDC=y$eDC,eDE=y$eDE,eDC0=y$eDC0,
eDE0=y$eDE0,eNC=y$eNC,eNE=y$eNE,tol=x$tol,
variable=x$variable)
}
else
{ y<-KT(n1Target=nEvents,minfup=NULL,lambdaC=x$lambdaC,etaC=x$etaC,
etaE=x$etaE,R=x$R,S=x$S,hr=x$hr,hr0=x$hr0,gamma=x$gamma,
ratio=x$ratio,tol=x$tol)
rval <- list(lambdaC=x$lambdaC, etaC=x$etaC, etaE=x$etaE,
gamma=x$gamma, ratio=x$ratio, R=x$R, S=x$S, T=y$T,
minfup=y$minfup, hr=x$hr, hr0=x$hr0, n=y$n, d=nEvents,
eDC=y$eDC,eDE=y$eDE,eDC0=y$eDC0,
eDE0=y$eDE0,eNC=y$eNC,eNE=y$eNE,tol=x$tol,
variable=x$variable)
}
class(rval) <- "gsSize"
return(rval)
}
###################################################
### code chunk number 35: tEventsIA
###################################################
tEventsIA<-function(x, timing=.25, tol = .Machine$double.eps^0.25)
{ T <- x$T[length(x$T)]
z <- uniroot(f=nEventsIA, interval=c(0.0001,T-.0001), x=x,
target=timing, tol=tol,simple=TRUE)
return(nEventsIA(tIA=z$root,x=x,simple=FALSE))
}
nEventsIA <- function(tIA=5, x=NULL, target=0, simple=TRUE)
{ Qe <- x$ratio/(1+x$ratio)
eDC <- eEvents(lambda=x$lambdaC, eta=x$etaC,
gamma=x$gamma*(1-Qe), R=x$R, S=x$S, T=tIA,
Tfinal=x$T[length(x$T)], minfup=x$minfup)
eDE <- eEvents(lambda=x$lambdaC * x$hr, eta=x$etaC,
gamma=x$gamma*Qe, R=x$R, S=x$S, T=tIA,
Tfinal=x$T[length(x$T)], minfup=x$minfup)
if (simple)
{ if (class(x)[1] == "gsSize") d<-x$d
#OLD else d <- sum(x$eDC[length(x$eDC)]+x$eDE[length(x$eDE)])
else if(!is.matrix(x$eDC)) d <- sum(x$eDC[length(x$eDC)]+x$eDE[length(x$eDE)])
else d <- sum(x$eDC[nrow(x$eDC),]+x$eDE[nrow(x$eDE),])
return(sum(eDC$d+eDE$d)-target*d)
}
else return(list(T=tIA,eDC=eDC$d,eDE=eDE$d,eNC=eDC$n,eNE=eDE$n))
}
###################################################
### code chunk number 36: gsSurv
###################################################
gsSurv<-function(k=3, test.type=4, alpha=0.025, sided=1,
beta=0.1, astar=0, timing=1, sfu=sfHSD, sfupar=-4,
sfl=sfHSD, sflpar=-2, r=18,
lambdaC=log(2)/6, hr=.6, hr0=1, eta=0, etaE=NULL,
gamma=1, R=12, S=NULL, T=NULL, minfup=NULL, ratio=1,
tol = .Machine$double.eps^0.25)
{ x<-nSurv(lambdaC=lambdaC, hr=hr, hr0=hr0, eta = eta, etaE=etaE,
gamma=gamma, R=R, S=S, T=T, minfup = minfup, ratio = ratio,
alpha = alpha, beta = beta, sided = sided, tol = tol)
y<-gsDesign(k=k,test.type=test.type,alpha=alpha/sided,
beta=beta, astar=astar, n.fix=x$d, timing=timing,
sfu=sfu, sfupar=sfupar, sfl=sfl, sflpar=sflpar, tol=tol,
delta1=log(hr), delta0=log(hr0))
z<-gsnSurv(x,y$n.I[k])
eDC <- NULL
eDE <- NULL
eNC <- NULL
eNE <- NULL
T <- NULL
for(i in 1:(k-1)){
xx <- tEventsIA(z,y$timing[i],tol)
T <- c(T,xx$T)
eDC <- rbind(eDC,xx$eDC)
eDE <- rbind(eDE,xx$eDE)
eNC <- rbind(eNC,xx$eNC)
eNE <- rbind(eNE,xx$eNE)
}
y$T <- c(T,z$T)
y$eDC <- rbind(eDC,z$eDC)
y$eDE <- rbind(eDE,z$eDE)
y$eNC <- rbind(eNC,z$eNC)
y$eNE <- rbind(eNE,z$eNE)
y$hr=hr; y$hr0=hr0; y$R=z$R; y$S=z$S; y$minfup=z$minfup;
y$gamma=z$gamma; y$ratio=ratio; y$lambdaC=z$lambdaC;
y$etaC=z$etaC; y$etaE=z$etaE; y$variable=x$variable; y$tol=tol
class(y) <- c("gsSurv","gsDesign")
nameR<-nameperiod(cumsum(y$R))
stratnames <- paste("Stratum",1:ncol(y$lambdaC))
if (is.null(y$S)) nameS<-"0-Inf"
else nameS <- nameperiod(cumsum(c(y$S,Inf)))
rownames(y$lambdaC)<-nameS
colnames(y$lambdaC)<-stratnames
rownames(y$etaC)<-nameS
colnames(y$etaC)<-stratnames
rownames(y$etaE)<-nameS
colnames(y$etaE)<-stratnames
rownames(y$gamma)<-nameR
colnames(y$gamma)<-stratnames
return(y)
}
print.gsSurv<-function(x,digits=2,...){
cat("Time to event group sequential design with HR=",x$hr,"\n")
if (x$hr0 != 1) cat("Non-inferiority design with null HR=",x$hr0,"\n")
if (min(x$ratio==1)==1)
cat("Equal randomization: ratio=1\n")
else {cat("Randomization (Exp/Control): ratio=",
x$ratio, "\n")
if (length(x$ratio)>1) cat("(randomization ratios shown by strata)\n")
}
print.gsDesign(x)
if(x$test.type != 1){
y<-cbind(x$T,(x$eNC+x$eNE)%*%array(1,ncol(x$eNE)),
(x$eDC+x$eDE)%*%array(1,ncol(x$eNE)),
round(zn2hr(x$lower$bound,x$n.I,x$ratio,hr0=x$hr0,hr1=x$hr),3),
round(zn2hr(x$upper$bound,x$n.I,x$ratio,hr0=x$hr0,hr1=x$hr),3))
colnames(y)<-c("T","n","Events","HR futility","HR efficacy")
}else{
y<-cbind(x$T,(x$eNC+x$eNE)%*%array(1,ncol(x$eNE)),
(x$eDC+x$eDE)%*%array(1,ncol(x$eNE)),
round(zn2hr(x$upper$bound,x$n.I,x$ratio,hr0=x$hr0,hr1=x$hr),3))
colnames(y)<-c("T","n","Events","HR efficacy")
}
rnames<-paste("IA",1:(x$k))
rnames[length(rnames)]<-"Final"
rownames(y)<-rnames
print(y)
cat("Accrual rates:\n")
print(round(x$gamma,digits))
cat("Control event rates (H1):\n")
print(round(x$lambda,digits))
if (max(abs(x$etaC-x$etaE))==0)
{ cat("Censoring rates:\n")
print(round(x$etaC,digits))
}
else
{ cat("Control censoring rates:\n")
print(round(x$etaC,digits))
cat("Experimental censoring rates:\n")
print(round(x$etaE,digits))
}
}
xtable.gsSurv <- function(x, caption=NULL, label=NULL, align=NULL, digits=NULL,
display=NULL, auto=FALSE, footnote=NULL, fnwid="9cm", timename="months",...){
k <- x$k
stat <- c("Z-value","HR","p (1-sided)", paste("P\\{Cross\\} if HR=",x$hr0,sep=""),
paste("P\\{Cross\\} if HR=",x$hr,sep=""))
st <- stat
for (i in 2:k) stat <- c(stat,st)
an <- array(" ",5*k)
tim <- an
enrol <- an
fut <- an
eff <- an
an[5*(0:(k-1))+1]<-c(paste("IA ",as.character(1:(k-1)),": ",
as.character(round(100*x$timing[1:(k-1)],1)), "\\%",sep=""), "Final analysis")
an[5*(1:(k-1))+1] <- paste("\\hline",an[5*(1:(k-1))+1])
an[5*(0:(k-1))+2]<- paste("N:",ceiling(rowSums(x$eNC))+ceiling(rowSums(x$eNE)))
an[5*(0:(k-1))+3]<- paste("Events:",ceiling(rowSums(x$eDC+x$eDE)))
an[5*(0:(k-1))+4]<- paste(round(x$T,1),timename,sep=" ")
if (x$test.type != 1) fut[5*(0:(k-1))+1]<- as.character(round(x$lower$bound,2))
eff[5*(0:(k-1))+1]<- as.character(round(x$upper$bound,2))
if (x$test.type != 1) fut[5*(0:(k-1))+2]<- as.character(round(gsHR(z=x$lower$bound,i=1:k,x,ratio=x$ratio)*x$hr0,2))
eff[5*(0:(k-1))+2]<- as.character(round(gsHR(z=x$upper$bound,i=1:k,x,ratio=x$ratio)*x$hr0,2))
asp <- as.character(round(pnorm(-x$upper$bound),4))
asp[asp=="0"]<-"$< 0.0001$"
eff[5*(0:(k-1))+3] <- asp
asp <- as.character(round(cumsum(x$upper$prob[,1]),4))
asp[asp=="0"]<-"$< 0.0001$"
eff[5*(0:(k-1))+4] <- asp
asp <- as.character(round(cumsum(x$upper$prob[,2]),4))
asp[asp=="0"]<-"$< 0.0001$"
eff[5*(0:(k-1))+5] <- asp
if (x$test.type != 1) {
bsp <- as.character(round(pnorm(-x$lower$bound),4))
bsp[bsp=="0"]<-" $< 0.0001$"
fut[5*(0:(k-1))+3] <- bsp
bsp <- as.character(round(cumsum(x$lower$prob[,1]),4))
bsp[bsp=="0"]<-"$< 0.0001$"
fut[5*(0:(k-1))+4] <- bsp
bsp <- as.character(round(cumsum(x$lower$prob[,2]),4))
bsp[bsp=="0"]<-"$< 0.0001$"
fut[5*(0:(k-1))+5] <- bsp
}
neff <- length(eff)
if (!is.null(footnote)) eff[neff] <-
paste(eff[neff],"\\\\ \\hline \\multicolumn{4}{p{",fnwid,"}}{\\footnotesize",footnote,"}")
if (x$test.type != 1){
xxtab <- data.frame(cbind(an,stat,fut,eff))
colnames(xxtab) <- c("Analysis","Value","Futility","Efficacy")
}else{
xxtab <- data.frame(cbind(an,stat,eff))
colnames(xxtab) <- c("Analysis","Value","Efficacy")
}
return(xtable(xxtab, caption=caption, label=label, align=align, digits=digits,
display=display,auto=auto,...))
}
| /gsDesign/R/gsSurv.R | no_license | ingted/R-Examples | R | false | false | 27,610 | r | ###################################################
### code chunk number 2: eEvents1
###################################################
eEvents1<-function(lambda=1, eta=0, gamma=1, R=1, S=NULL,
T=2, Tfinal=NULL, minfup=0, simple=TRUE)
{
if (is.null(Tfinal))
{ Tfinal <- T
minfupia <- minfup
}
else minfupia <- max(0, minfup-(Tfinal - T))
nlambda <- length(lambda)
if (length(eta)==1 & nlambda > 1)
eta <- array(eta,nlambda)
T1 <- cumsum(S)
T1 <- c(T1[T1<T],T)
T2 <- T - cumsum(R)
T2[T2 < minfupia] <- minfupia
i <- 1:length(gamma)
gamma[i>length(unique(T2))] <- 0
T2 <- unique(c(T,T2[T2 > 0]))
T3 <- sort(unique(c(T1,T2)))
if (sum(R) >= T) T2 <- c(T2,0)
nperiod <- length(T3)
s <- T3-c(0,T3[1:(nperiod-1)])
lam <- array(lambda[nlambda],nperiod)
et <- array(eta[nlambda],nperiod)
gam <- array(0,nperiod)
for(i in length(T1):1)
{ indx <- T3<=T1[i]
lam[indx]<-lambda[i]
et[indx]<-eta[i]
}
for(i in min(length(gamma)+1,length(T2)):2)
gam[T3>T2[i]]<-gamma[i-1]
q <- exp(-(lam+et)*s)
Q <- cumprod(q)
indx<-1:(nperiod-1)
Qm1 <- c(1,Q[indx])
p <- lam/(lam+et)*Qm1*(1-q)
p[is.nan(p)] <- 0
P <- cumsum(p)
B <- gam/(lam+et)*lam*(s-(1-q)/(lam+et))
B[is.nan(B)]<-0
A <- c(0,P[indx])*gam*s+Qm1*B
if (!simple)
return(list(lambda=lambda, eta=eta, gamma=gamma, R=R, S=S,
T=T, Tfinal=Tfinal, minfup=minfup, d=sum(A),
n=sum(gam*s), q=q,Q=Q,p=p,P=P,B=B,A=A,T1=T1,
T2=T2,T3=T3,lam=lam,et=et,gam=gam))
else
return(list(lambda=lambda, eta=eta, gamma=gamma, R=R, S=S,
T=T, Tfinal=Tfinal, minfup=minfup, d=sum(A),
n=sum(gam*s)))
}
###################################################
### code chunk number 4: eEvents
###################################################
eEvents<-function(lambda=1, eta=0, gamma=1, R=1, S=NULL, T=2,
Tfinal=NULL, minfup=0, digits=4)
{ if (is.null(Tfinal))
{ if (minfup >= T)
stop("Minimum follow-up greater than study duration.")
Tfinal <- T
minfupia <- minfup
}
else minfupia <- max(0, minfup-(Tfinal - T))
if (!is.matrix(lambda))
lambda <- matrix(lambda, nrow=length(lambda))
if (!is.matrix(eta))
eta <- matrix(eta,nrow=nrow(lambda),ncol=ncol(lambda))
if (!is.matrix(gamma))
gamma<-matrix(gamma,nrow=length(R),ncol=ncol(lambda))
n <- array(0,ncol(lambda))
d <- n
for(i in 1:ncol(lambda))
{ a <- eEvents1(lambda=lambda[,i],eta=eta[,i],
gamma=gamma[,i],R=R,S=S,T=T,
Tfinal=Tfinal, minfup=minfup)
n[i]<-a$n
d[i]<-a$d
}
T1 <- cumsum(S)
T1 <- unique(c(0,T1[T1<T],T))
nper <- length(T1)-1
names1 <- round(T1[1:nper],digits)
namesper <- paste("-",round(T1[2:(nper+1)],digits),sep="")
namesper <- paste(names1,namesper,sep="")
if (nper < dim(lambda)[1])
lambda <- matrix(lambda[1:nper,],nrow=nper)
if (nper < dim(eta)[1])
eta <- matrix(eta[1:nper,],nrow=nper)
rownames(lambda) <- namesper
rownames(eta) <- namesper
colnames(lambda) <- paste("Stratum",1:ncol(lambda))
colnames(eta) <- paste("Stratum",1:ncol(eta))
T2 <- cumsum(R)
T2[T - T2 < minfupia] <- T - minfupia
T2 <- unique(c(0,T2))
nper <- length(T2)-1
names1 <- round(c(T2[1:nper]),digits)
namesper <- paste("-",round(T2[2:(nper+1)],digits),sep="")
namesper <- paste(names1,namesper,sep="")
if (nper < length(gamma))
gamma <- matrix(gamma[1:nper,],nrow=nper)
rownames(gamma) <- namesper
colnames(gamma) <- paste("Stratum",1:ncol(gamma))
x <- list(lambda=lambda, eta=eta, gamma=gamma, R=R,
S=S, T=T, Tfinal=Tfinal,
minfup=minfup, d=d, n=n, digits=digits)
class(x) <- "eEvents"
return(x)
}
###################################################
### code chunk number 5: periods
###################################################
periods <- function(S, T, minfup, digits)
{ periods <- cumsum(S)
if (length(periods)==0) periods <- max(0, T - minfup)
else
{ maxT <- max(0,min(T - minfup, max(periods)))
periods <- periods[periods <= maxT]
if (max(periods) < T - minfup)
periods <- c(periods, T - minfup)
}
nper <- length(periods)
names1 <- c(0, round(periods[1:(nper-1)],digits))
names <- paste("-",periods,sep="")
names <- paste(names1,names,sep="")
return(list(periods,names))
}
###################################################
### code chunk number 6: print.eEvents
###################################################
print.eEvents <- function(x,digits=4,...){
if (class(x) != "eEvents")
stop("print.eEvents: primary argument must have class eEvents")
cat("Study duration: Tfinal=",
round(x$Tfinal,digits), "\n", sep="")
cat("Analysis time: T=",
round(x$T,digits), "\n", sep="")
cat("Accrual duration: ",
round(min(x$T - max(0, x$minfup-(x$Tfinal - x$T)),
sum(x$R)),digits), "\n", sep="")
cat("Min. end-of-study follow-up: minfup=",
round(x$minfup,digits), "\n", sep="")
cat("Expected events (total): ",
round(sum(x$d),digits), "\n",sep="")
if (length(x$d)>1)
{ cat("Expected events by stratum: d=",
round(x$d[1],digits))
for(i in 2:length(x$d))
cat(paste("",round(x$d[i],digits)))
cat("\n")
}
cat("Expected sample size (total): ",
round(sum(x$n),digits), "\n", sep="")
if (length(x$n)>1)
{ cat("Sample size by stratum: n=",
round(x$n[1],digits))
for(i in 2:length(x$n))
cat(paste("",round(x$n[i],digits)))
cat("\n")
}
nstrata <- dim(x$lambda)[2]
cat("Number of strata: ",
nstrata, "\n", sep="")
cat("Accrual rates:\n")
print(round(x$gamma,digits))
cat("Event rates:\n")
print(round(x$lambda,digits))
cat("Censoring rates:\n")
print(round(x$eta,digits))
return(x)
}
###################################################
### code chunk number 12: nameperiod
###################################################
"nameperiod" <- function(R, digits=2)
{ if (length(R)==1) return(paste("0-",round(R,digits),sep=""))
R0 <- c(0,R[1:(length(R)-1)])
return(paste(round(R0,digits),"-",round(R,digits),sep=""))
}
###################################################
### code chunk number 14: LFPWE
###################################################
LFPWE <- function(alpha=.025, sided=1, beta=.1,
lambdaC=log(2) / 6, hr=.5, hr0=1, etaC=0, etaE=0,
gamma=1, ratio=1, R=18, S=NULL, T=24, minfup=NULL)
{ # set up parameters
zalpha <- -qnorm(alpha/sided)
zbeta <- -qnorm(beta)
if (is.null(minfup)) minfup <- max(0,T-sum(R))
if (length(R)==1) {R <- T-minfup
}else if (sum(R) != T-minfup)
{ cR<-cumsum(R)
nR<-length(R)
if (cR[length(cR)] < T - minfup) {cR[length(cR)]<-T-minfup
}else
{ cR[cR>T-minfup]<-T-minfup
cR <- unique(cR)
}
if (length(cR)>1) {R <- cR-c(0,cR[1:(length(cR)-1)])
}else R <- cR
if (nR != length(R))
{ if (is.vector(gamma)) {gamma <- gamma[1:length(R)]
}else gamma<-gamma[1:length(R),]
}
}
ngamma <- length(R)
if (is.null(S)) {nlambda <- 1
}else nlambda <- length(S) + 1
Qe <- ratio / (1 + ratio)
Qc <- 1 - Qe
# compute H0 failure rates as average of control, experimental
if (length(ratio)==1){
lambdaC0 <- (1 + hr * ratio) / (1 + hr0 * ratio) * lambdaC
gammaC <- gamma*Qc
gammaE <- gamma*Qe
}else{
lambdaC0 <- lambdaC %*% diag((1 + hr * ratio) / (1 + hr0 * ratio))
gammaC <- gamma%*%diag(Qc)
gammaE <- gamma%*%diag(Qe)
}
# do computations
eDC0 <- sum(eEvents(lambda=lambdaC0, eta=etaC, gamma=gammaC,
R=R, S=S, T=T, minfup=minfup)$d)
eDE0 <- sum(eEvents(lambda=lambdaC0 * hr0, eta=etaE, gamma=gammaE,
R=R, S=S, T=T, minfup=minfup)$d)
eDC <- eEvents(lambda=lambdaC, eta=etaC, gamma=gammaC,
R=R, S=S, T=T, minfup=minfup)
eDE <- eEvents(lambda=lambdaC * hr, eta=etaE, gamma=gammaE,
R=R, S=S, T=T, minfup=minfup)
n <- ((zalpha * sqrt(1 / eDC0 + 1 / eDE0) +
zbeta * sqrt(1 / sum(eDC$d) + 1 / sum(eDE$d))
) / log(hr / hr0))^2
mx <- sum(eDC$n + eDE$n)
rval <- list(alpha=alpha, sided=sided, beta=beta, power=1-beta,
lambdaC=lambdaC, etaC=etaC, etaE=etaE, gamma=n * gamma,
ratio=ratio, R=R, S=S, T=T, minfup=minfup,
hr=hr, hr0=hr0, n=n * mx, d=n * sum(eDC$d + eDE$d),
eDC=eDC$d*n, eDE=eDE$d*n, eDC0=eDC0*n, eDE0=eDE0*n,
eNC=eDC$n*n, eNE=eDE$n*n, variable="Accrual rate")
class(rval) <- "nSurv"
return(rval)
}
print.nSurv<-function(x,digits=4,...){
if (class(x) != "nSurv")
stop("Primary argument must have class nSurv")
x$digits<-digits
x$sided <- 1
cat("Fixed design, two-arm trial with time-to-event\n")
cat("outcome (Lachin and Foulkes, 1986).\n")
cat("Solving for: ",x$variable,"\n")
cat("Hazard ratio H1/H0=",
round(x$hr,digits),
"/", round(x$hr0,digits),"\n",sep="")
cat("Study duration: T=",
round(x$T,digits), "\n", sep="")
cat("Accrual duration: ",
round(x$T-x$minfup,digits),"\n",sep="")
cat("Min. end-of-study follow-up: minfup=",
round(x$minfup,digits), "\n", sep="")
cat("Expected events (total, H1): ",
round(x$d,digits), "\n",sep="")
cat("Expected sample size (total): ",
round(x$n,digits), "\n", sep="")
enrollper <- periods(x$S, x$T, x$minfup, x$digits)
cat("Accrual rates:\n")
print(round(x$gamma,digits))
cat("Control event rates (H1):\n")
print(round(x$lambda,digits))
if (max(abs(x$etaC-x$etaE))==0)
{ cat("Censoring rates:\n")
print(round(x$etaC,digits))
}
else
{ cat("Control censoring rates:\n")
print(round(x$etaC,digits))
cat("Experimental censoring rates:\n")
print(round(x$etaE,digits))
}
cat("Power: 100*(1-beta)=",
round((1-x$beta)*100,digits), "%\n",sep="")
cat("Type I error (", x$sided,
"-sided): 100*alpha=",
100*x$alpha, "%\n", sep="")
if (min(x$ratio==1)==1)
cat("Equal randomization: ratio=1\n")
else cat("Randomization (Exp/Control): ratio=",
x$ratio, "\n")
}
###################################################
### code chunk number 19: KTZ
###################################################
KTZ <- function(x=NULL, minfup=NULL, n1Target=NULL,
lambdaC=log(2) / 6, etaC=0, etaE=0,
gamma=1, ratio=1, R=18, S=NULL, beta=.1,
alpha=.025, sided=1, hr0=1, hr=.5, simple=TRUE)
{ zalpha<- -qnorm(alpha/sided)
Qc <- 1/(1+ratio)
Qe <- 1 - Qc
# set minimum follow-up to x if that is missing and x is given
if (!is.null(x) && is.null(minfup))
{ minfup <- x
if (sum(R)==Inf)
stop("If minimum follow-up is sought, enrollment duration must be finite")
T <- sum(R)+minfup
variable <- "Follow-up duration"
}
else if (!is.null(x)&&!is.null(minfup))
{ # otherwise, if x is given, set it to accrual duration
T <- x+minfup
R[length(R)]<-Inf
variable <- "Accrual duration"
}
else
{ # otherwise, set follow-up time to accrual plus follow-up
T <- sum(R) + minfup
variable <- "Power"
}
# compute H0 failure rates as average of control, experimental
if (length(ratio)==1){
lambdaC0 <- (1 + hr * ratio) / (1 + hr0 * ratio) * lambdaC
gammaC <- gamma*Qc
gammaE <- gamma*Qe
}else{
lambdaC0 <- lambdaC %*% diag((1 + hr * ratio) / (1 + hr0 * ratio))
gammaC <- gamma%*%diag(Qc)
gammaE <- gamma%*%diag(Qe)
}
# do computations
eDC <- eEvents(lambda=lambdaC, eta=etaC, gamma=gammaC,
R=R, S=S, T=T, minfup=minfup)
eDE <- eEvents(lambda=lambdaC * hr, eta=etaE, gamma=gammaE,
R=R, S=S, T=T, minfup=minfup)
# if this is all that is needed, return difference
# from targeted number of events
if (simple && !is.null(n1Target))
return(sum(eDC$d+eDE$d)-n1Target)
eDC0 <- eEvents(lambda=lambdaC0, eta=etaC, gamma=gammaC,
R=R, S=S, T=T, minfup=minfup)
eDE0 <- eEvents(lambda=lambdaC0 * hr0, eta=etaE, gamma=gammaE,
R=R, S=S, T=T, minfup=minfup)
# compute Z-value related to power from power equation
zb <- (log(hr0 / hr) -
zalpha * sqrt(1 / sum(eDC0$d) + 1 / sum(eDE0$d)))/
sqrt(1 / sum(eDC$d) + 1 / sum(eDE$d))
# if that is all that is needed, return difference from
# targeted value
if (simple)
{ if (!is.null(beta)) return(zb + qnorm(beta))
else return(pnorm(-zb))
}
# compute power
power <- pnorm(zb)
beta <- 1-power
# set accrual period durations
if (sum(R) != T-minfup)
{ if (length(R)==1) R <- T-minfup
else
{ nR <- length(R)
cR <- cumsum(R)
cR[cR>T-minfup] <- T - minfup
cR <- unique(cR)
cR[length(R)] <- T - minfup
if (length(cR)==1) R <- cR
else R <- cR - c(0,cR[1:(length(cR)-1)])
if (length(R) != nR)
{ gamma <- matrix(gamma[1:length(R),], nrow=length(R))
gdim <- dim(gamma)
}
}
}
rval <- list(alpha=alpha, sided=sided, beta=beta, power=power,
lambdaC=lambdaC, etaC=etaC, etaE=etaE,
gamma=gamma, ratio=ratio, R=R, S=S, T=T,
minfup=minfup, hr=hr, hr0=hr0, n=sum(eDC$n + eDE$n),
d=sum(eDC$d + eDE$d), tol=NULL, eDC=eDC$d, eDE=eDE$d,
eDC0=eDC0$d, eDE0=eDE0$d, eNC=eDC$n, eNE=eDE$n,
variable=variable)
class(rval)<-"nSurv"
return(rval)
}
###################################################
### code chunk number 21: KT
###################################################
KT <- function(alpha=.025, sided=1, beta=.1,
lambdaC=log(2) / 6, hr=.5, hr0=1, etaC=0, etaE=0,
gamma=1, ratio=1, R=18, S=NULL, minfup=NULL,
n1Target=NULL, tol = .Machine$double.eps^0.25)
{ # set up parameters
ngamma <- length(R)
if (is.null(S)) {nlambda <- 1
}else nlambda <- length(S) + 1
Qe <- ratio / (1 + ratio)
Qc <- 1 - Qe
if (!is.matrix(lambdaC)) lambdaC <- matrix(lambdaC)
ldim <- dim(lambdaC)
nstrata <- ldim[2]
nlambda <- ldim[1]
etaC <- matrix(etaC,nrow=nlambda,ncol=nstrata)
etaE <- matrix(etaE,nrow=nlambda,ncol=nstrata)
if (!is.matrix(gamma)) gamma <- matrix(gamma)
gdim <- dim(gamma)
eCdim <- dim(etaC)
eEdim <- dim(etaE)
# search for trial duration needed to achieve desired power
if (is.null(minfup))
{ if (sum(R)==Inf){
stop("Enrollment duration must be specified as finite")}
left <- KTZ(.01, lambdaC=lambdaC, n1Target=n1Target,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, beta=beta, alpha=alpha, sided=sided,
hr0=hr0, hr=hr)
right <- KTZ(1000, lambdaC=lambdaC, n1Target=n1Target,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, beta=beta, alpha=alpha, sided=sided,
hr0=hr0, hr=hr)
if (left>0) stop("Enrollment duration over-powers trial")
if (right<0) stop("Enrollment duration insufficient to power trial")
y <- uniroot(f=KTZ,interval=c(.01,10000), lambdaC=lambdaC,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, beta=beta, alpha=alpha, sided=sided,
hr0=hr0, hr=hr, tol=tol, n1Target=n1Target)
minfup <- y$root
xx<-KTZ(x=y$root,lambdaC=lambdaC,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, minfup=NULL, beta=beta, alpha=alpha,
sided=sided, hr0=hr0, hr=hr, simple=F)
xx$tol <- tol
return(xx)
}else
{ y <- uniroot(f=KTZ, interval=minfup+c(.01,10000), lambdaC=lambdaC,
n1Target=n1Target,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, minfup=minfup, beta=beta,
alpha=alpha, sided=sided, hr0=hr0, hr=hr, tol=tol)
xx <- KTZ(x=y$root,lambdaC=lambdaC,
etaC=etaC, etaE=etaE, gamma=gamma, ratio=ratio,
R=R, S=S, minfup=minfup, beta=beta, alpha=alpha,
sided=sided, hr0=hr0, hr=hr, simple=F)
xx$tol<-tol
return(xx)
}
}
###################################################
### code chunk number 25: nSurv
###################################################
nSurv <- function(lambdaC=log(2)/6, hr=.6, hr0=1, eta = 0, etaE=NULL,
gamma=1, R=12, S=NULL, T=NULL, minfup = NULL, ratio = 1,
alpha = 0.025, beta = 0.10, sided = 1, tol = .Machine$double.eps^0.25)
{ if (is.null(etaE)) etaE<-eta
# set up rates as matrices with row and column names
# default is 1 stratum if lambdaC not input as matrix
if (is.vector(lambdaC)) lambdaC <- matrix(lambdaC)
ldim <- dim(lambdaC)
nstrata <- ldim[2]
nlambda <- ldim[1]
rownames(lambdaC) <- paste("Period", 1:nlambda)
colnames(lambdaC) <- paste("Stratum", 1:nstrata)
etaC <- matrix(eta,nrow=nlambda,ncol=nstrata)
etaE <- matrix(etaE,nrow=nlambda,ncol=nstrata)
if (!is.matrix(gamma)) gamma <- matrix(gamma)
gdim <- dim(gamma)
eCdim <- dim(etaC)
eEdim <- dim(etaE)
if (is.null(minfup) || is.null(T))
xx<-KT(lambdaC=lambdaC, hr=hr, hr0=hr0, etaC=etaC, etaE=etaE,
gamma=gamma, R=R, S=S, minfup = minfup, ratio = ratio,
alpha=alpha, sided=sided, beta = beta, tol = tol)
else if (is.null(beta))
xx<-KTZ(lambdaC=lambdaC, hr=hr, hr0=hr0, etaC=etaC, etaE=etaE,
gamma=gamma, R=R, S=S, minfup = minfup, ratio = ratio,
alpha=alpha, sided=sided, beta = beta, simple=F)
else
xx<-LFPWE(lambdaC=lambdaC, hr=hr, hr0=hr0, etaC=etaC, etaE=etaE,
gamma=gamma, R=R, S=S, T=T, minfup = minfup, ratio = ratio,
alpha=alpha, sided=sided, beta = beta)
nameR<-nameperiod(cumsum(xx$R))
stratnames <- paste("Stratum",1:ncol(xx$lambdaC))
if (is.null(xx$S)) nameS<-"0-Inf"
else nameS <- nameperiod(cumsum(c(xx$S,Inf)))
rownames(xx$lambdaC)<-nameS
colnames(xx$lambdaC)<-stratnames
rownames(xx$etaC)<-nameS
colnames(xx$etaC)<-stratnames
rownames(xx$etaE)<-nameS
colnames(xx$etaE)<-stratnames
rownames(xx$gamma)<-nameR
colnames(xx$gamma)<-stratnames
return(xx)
}
###################################################
### code chunk number 33: gsnSurv
###################################################
gsnSurv <- function(x,nEvents)
{ if (x$variable=="Accrual rate")
{ Ifct <- nEvents/x$d
rval <- list(lambdaC=x$lambdaC, etaC=x$etaC, etaE=x$etaE,
gamma=x$gamma*Ifct, ratio=x$ratio, R=x$R, S=x$S, T=x$T,
minfup=x$minfup, hr=x$hr, hr0=x$hr0, n=x$n*Ifct, d=nEvents,
eDC=x$eDC*Ifct,eDE=x$eDE*Ifct,eDC0=x$eDC0*Ifct,
eDE0=x$eDE0*Ifct,eNC=x$eNC*Ifct,eNE=x$eNE*Ifct,
variable=x$variable)
}
else if (x$variable=="Accrual duration")
{ y<-KT(n1Target=nEvents,minfup=x$minfup,lambdaC=x$lambdaC,etaC=x$etaC,
etaE=x$etaE,R=x$R,S=x$S,hr=x$hr,hr0=x$hr0,gamma=x$gamma,
ratio=x$ratio,tol=x$tol)
rval <- list(lambdaC=x$lambdaC, etaC=x$etaC, etaE=x$etaE,
gamma=x$gamma, ratio=x$ratio, R=y$R, S=x$S, T=y$T,
minfup=y$minfup, hr=x$hr, hr0=x$hr0, n=y$n, d=nEvents,
eDC=y$eDC,eDE=y$eDE,eDC0=y$eDC0,
eDE0=y$eDE0,eNC=y$eNC,eNE=y$eNE,tol=x$tol,
variable=x$variable)
}
else
{ y<-KT(n1Target=nEvents,minfup=NULL,lambdaC=x$lambdaC,etaC=x$etaC,
etaE=x$etaE,R=x$R,S=x$S,hr=x$hr,hr0=x$hr0,gamma=x$gamma,
ratio=x$ratio,tol=x$tol)
rval <- list(lambdaC=x$lambdaC, etaC=x$etaC, etaE=x$etaE,
gamma=x$gamma, ratio=x$ratio, R=x$R, S=x$S, T=y$T,
minfup=y$minfup, hr=x$hr, hr0=x$hr0, n=y$n, d=nEvents,
eDC=y$eDC,eDE=y$eDE,eDC0=y$eDC0,
eDE0=y$eDE0,eNC=y$eNC,eNE=y$eNE,tol=x$tol,
variable=x$variable)
}
class(rval) <- "gsSize"
return(rval)
}
###################################################
### code chunk number 35: tEventsIA
###################################################
tEventsIA<-function(x, timing=.25, tol = .Machine$double.eps^0.25)
{ T <- x$T[length(x$T)]
z <- uniroot(f=nEventsIA, interval=c(0.0001,T-.0001), x=x,
target=timing, tol=tol,simple=TRUE)
return(nEventsIA(tIA=z$root,x=x,simple=FALSE))
}
nEventsIA <- function(tIA=5, x=NULL, target=0, simple=TRUE)
{ Qe <- x$ratio/(1+x$ratio)
eDC <- eEvents(lambda=x$lambdaC, eta=x$etaC,
gamma=x$gamma*(1-Qe), R=x$R, S=x$S, T=tIA,
Tfinal=x$T[length(x$T)], minfup=x$minfup)
eDE <- eEvents(lambda=x$lambdaC * x$hr, eta=x$etaC,
gamma=x$gamma*Qe, R=x$R, S=x$S, T=tIA,
Tfinal=x$T[length(x$T)], minfup=x$minfup)
if (simple)
{ if (class(x)[1] == "gsSize") d<-x$d
#OLD else d <- sum(x$eDC[length(x$eDC)]+x$eDE[length(x$eDE)])
else if(!is.matrix(x$eDC)) d <- sum(x$eDC[length(x$eDC)]+x$eDE[length(x$eDE)])
else d <- sum(x$eDC[nrow(x$eDC),]+x$eDE[nrow(x$eDE),])
return(sum(eDC$d+eDE$d)-target*d)
}
else return(list(T=tIA,eDC=eDC$d,eDE=eDE$d,eNC=eDC$n,eNE=eDE$n))
}
###################################################
### code chunk number 36: gsSurv
###################################################
gsSurv<-function(k=3, test.type=4, alpha=0.025, sided=1,
beta=0.1, astar=0, timing=1, sfu=sfHSD, sfupar=-4,
sfl=sfHSD, sflpar=-2, r=18,
lambdaC=log(2)/6, hr=.6, hr0=1, eta=0, etaE=NULL,
gamma=1, R=12, S=NULL, T=NULL, minfup=NULL, ratio=1,
tol = .Machine$double.eps^0.25)
{ x<-nSurv(lambdaC=lambdaC, hr=hr, hr0=hr0, eta = eta, etaE=etaE,
gamma=gamma, R=R, S=S, T=T, minfup = minfup, ratio = ratio,
alpha = alpha, beta = beta, sided = sided, tol = tol)
y<-gsDesign(k=k,test.type=test.type,alpha=alpha/sided,
beta=beta, astar=astar, n.fix=x$d, timing=timing,
sfu=sfu, sfupar=sfupar, sfl=sfl, sflpar=sflpar, tol=tol,
delta1=log(hr), delta0=log(hr0))
z<-gsnSurv(x,y$n.I[k])
eDC <- NULL
eDE <- NULL
eNC <- NULL
eNE <- NULL
T <- NULL
for(i in 1:(k-1)){
xx <- tEventsIA(z,y$timing[i],tol)
T <- c(T,xx$T)
eDC <- rbind(eDC,xx$eDC)
eDE <- rbind(eDE,xx$eDE)
eNC <- rbind(eNC,xx$eNC)
eNE <- rbind(eNE,xx$eNE)
}
y$T <- c(T,z$T)
y$eDC <- rbind(eDC,z$eDC)
y$eDE <- rbind(eDE,z$eDE)
y$eNC <- rbind(eNC,z$eNC)
y$eNE <- rbind(eNE,z$eNE)
y$hr=hr; y$hr0=hr0; y$R=z$R; y$S=z$S; y$minfup=z$minfup;
y$gamma=z$gamma; y$ratio=ratio; y$lambdaC=z$lambdaC;
y$etaC=z$etaC; y$etaE=z$etaE; y$variable=x$variable; y$tol=tol
class(y) <- c("gsSurv","gsDesign")
nameR<-nameperiod(cumsum(y$R))
stratnames <- paste("Stratum",1:ncol(y$lambdaC))
if (is.null(y$S)) nameS<-"0-Inf"
else nameS <- nameperiod(cumsum(c(y$S,Inf)))
rownames(y$lambdaC)<-nameS
colnames(y$lambdaC)<-stratnames
rownames(y$etaC)<-nameS
colnames(y$etaC)<-stratnames
rownames(y$etaE)<-nameS
colnames(y$etaE)<-stratnames
rownames(y$gamma)<-nameR
colnames(y$gamma)<-stratnames
return(y)
}
print.gsSurv<-function(x,digits=2,...){
cat("Time to event group sequential design with HR=",x$hr,"\n")
if (x$hr0 != 1) cat("Non-inferiority design with null HR=",x$hr0,"\n")
if (min(x$ratio==1)==1)
cat("Equal randomization: ratio=1\n")
else {cat("Randomization (Exp/Control): ratio=",
x$ratio, "\n")
if (length(x$ratio)>1) cat("(randomization ratios shown by strata)\n")
}
print.gsDesign(x)
if(x$test.type != 1){
y<-cbind(x$T,(x$eNC+x$eNE)%*%array(1,ncol(x$eNE)),
(x$eDC+x$eDE)%*%array(1,ncol(x$eNE)),
round(zn2hr(x$lower$bound,x$n.I,x$ratio,hr0=x$hr0,hr1=x$hr),3),
round(zn2hr(x$upper$bound,x$n.I,x$ratio,hr0=x$hr0,hr1=x$hr),3))
colnames(y)<-c("T","n","Events","HR futility","HR efficacy")
}else{
y<-cbind(x$T,(x$eNC+x$eNE)%*%array(1,ncol(x$eNE)),
(x$eDC+x$eDE)%*%array(1,ncol(x$eNE)),
round(zn2hr(x$upper$bound,x$n.I,x$ratio,hr0=x$hr0,hr1=x$hr),3))
colnames(y)<-c("T","n","Events","HR efficacy")
}
rnames<-paste("IA",1:(x$k))
rnames[length(rnames)]<-"Final"
rownames(y)<-rnames
print(y)
cat("Accrual rates:\n")
print(round(x$gamma,digits))
cat("Control event rates (H1):\n")
print(round(x$lambda,digits))
if (max(abs(x$etaC-x$etaE))==0)
{ cat("Censoring rates:\n")
print(round(x$etaC,digits))
}
else
{ cat("Control censoring rates:\n")
print(round(x$etaC,digits))
cat("Experimental censoring rates:\n")
print(round(x$etaE,digits))
}
}
xtable.gsSurv <- function(x, caption=NULL, label=NULL, align=NULL, digits=NULL,
display=NULL, auto=FALSE, footnote=NULL, fnwid="9cm", timename="months",...){
k <- x$k
stat <- c("Z-value","HR","p (1-sided)", paste("P\\{Cross\\} if HR=",x$hr0,sep=""),
paste("P\\{Cross\\} if HR=",x$hr,sep=""))
st <- stat
for (i in 2:k) stat <- c(stat,st)
an <- array(" ",5*k)
tim <- an
enrol <- an
fut <- an
eff <- an
an[5*(0:(k-1))+1]<-c(paste("IA ",as.character(1:(k-1)),": ",
as.character(round(100*x$timing[1:(k-1)],1)), "\\%",sep=""), "Final analysis")
an[5*(1:(k-1))+1] <- paste("\\hline",an[5*(1:(k-1))+1])
an[5*(0:(k-1))+2]<- paste("N:",ceiling(rowSums(x$eNC))+ceiling(rowSums(x$eNE)))
an[5*(0:(k-1))+3]<- paste("Events:",ceiling(rowSums(x$eDC+x$eDE)))
an[5*(0:(k-1))+4]<- paste(round(x$T,1),timename,sep=" ")
if (x$test.type != 1) fut[5*(0:(k-1))+1]<- as.character(round(x$lower$bound,2))
eff[5*(0:(k-1))+1]<- as.character(round(x$upper$bound,2))
if (x$test.type != 1) fut[5*(0:(k-1))+2]<- as.character(round(gsHR(z=x$lower$bound,i=1:k,x,ratio=x$ratio)*x$hr0,2))
eff[5*(0:(k-1))+2]<- as.character(round(gsHR(z=x$upper$bound,i=1:k,x,ratio=x$ratio)*x$hr0,2))
asp <- as.character(round(pnorm(-x$upper$bound),4))
asp[asp=="0"]<-"$< 0.0001$"
eff[5*(0:(k-1))+3] <- asp
asp <- as.character(round(cumsum(x$upper$prob[,1]),4))
asp[asp=="0"]<-"$< 0.0001$"
eff[5*(0:(k-1))+4] <- asp
asp <- as.character(round(cumsum(x$upper$prob[,2]),4))
asp[asp=="0"]<-"$< 0.0001$"
eff[5*(0:(k-1))+5] <- asp
if (x$test.type != 1) {
bsp <- as.character(round(pnorm(-x$lower$bound),4))
bsp[bsp=="0"]<-" $< 0.0001$"
fut[5*(0:(k-1))+3] <- bsp
bsp <- as.character(round(cumsum(x$lower$prob[,1]),4))
bsp[bsp=="0"]<-"$< 0.0001$"
fut[5*(0:(k-1))+4] <- bsp
bsp <- as.character(round(cumsum(x$lower$prob[,2]),4))
bsp[bsp=="0"]<-"$< 0.0001$"
fut[5*(0:(k-1))+5] <- bsp
}
neff <- length(eff)
if (!is.null(footnote)) eff[neff] <-
paste(eff[neff],"\\\\ \\hline \\multicolumn{4}{p{",fnwid,"}}{\\footnotesize",footnote,"}")
if (x$test.type != 1){
xxtab <- data.frame(cbind(an,stat,fut,eff))
colnames(xxtab) <- c("Analysis","Value","Futility","Efficacy")
}else{
xxtab <- data.frame(cbind(an,stat,eff))
colnames(xxtab) <- c("Analysis","Value","Efficacy")
}
return(xtable(xxtab, caption=caption, label=label, align=align, digits=digits,
display=display,auto=auto,...))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Inversion.R
\name{inversionFixingThreeCircles}
\alias{inversionFixingThreeCircles}
\title{Inversion fixing three circles}
\usage{
inversionFixingThreeCircles(circ1, circ2, circ3)
}
\arguments{
\item{circ1, circ2, circ3}{\code{Circle} objects}
}
\value{
An \code{Inversion} object, which lets each of \code{circ1},
\code{circ2} and \code{circ3} invariant.
}
\description{
Return the inversion which lets invariant three given circles.
}
| /man/inversionFixingThreeCircles.Rd | no_license | stla/PlaneGeometry | R | false | true | 514 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Inversion.R
\name{inversionFixingThreeCircles}
\alias{inversionFixingThreeCircles}
\title{Inversion fixing three circles}
\usage{
inversionFixingThreeCircles(circ1, circ2, circ3)
}
\arguments{
\item{circ1, circ2, circ3}{\code{Circle} objects}
}
\value{
An \code{Inversion} object, which lets each of \code{circ1},
\code{circ2} and \code{circ3} invariant.
}
\description{
Return the inversion which lets invariant three given circles.
}
|
mayor_impares<-function(y){
#Eliminacion de los numeros pares del vector y
for(i in 1:length(y)){
if(y[i]%%2==0){
y[i]<-0
}
}
#Asignacion del mayor valor dentro del vector con numeros impares
mayor_impar<-0
for(i in 1:length(y)){
if(y[i]>mayor_impar){
mayor_impar<-y[i]
}
}
return(mayor_impar)
} | /funcion mayor de los elementos impares vector.R | no_license | b62ropaa/R | R | false | false | 340 | r |
mayor_impares<-function(y){
#Eliminacion de los numeros pares del vector y
for(i in 1:length(y)){
if(y[i]%%2==0){
y[i]<-0
}
}
#Asignacion del mayor valor dentro del vector con numeros impares
mayor_impar<-0
for(i in 1:length(y)){
if(y[i]>mayor_impar){
mayor_impar<-y[i]
}
}
return(mayor_impar)
} |
##
# Copyright (c) 2010-2018 LabKey Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
labkey.insertRows <- function(baseUrl=NULL, folderPath, schemaName, queryName, toInsert, na=NULL)
{
baseUrl=labkey.getBaseUrl(baseUrl)
## Validate required parameters
if (missing(folderPath)) stop (paste("A value must be specified for folderPath."))
if (missing(schemaName)) stop (paste("A value must be specified for schemaName."))
if (missing(queryName)) stop (paste("A value must be specified for queryName."))
if (missing(toInsert)) stop (paste("A value must be specified for toInsert."))
## Default showAllRows=TRUE
showAllRows=TRUE
## normalize the folder path
folderPath <- encodeFolderPath(folderPath)
## URL encode folder path, JSON encode post body (if not already encoded)
toInsert <- convertFactorsToStrings(toInsert);
nrows <- nrow(toInsert)
ncols <- ncol(toInsert)
p1 <- toJSON(list(schemaName=schemaName, queryName=queryName, apiVersion=8.3), auto_unbox=TRUE)
cnames <- colnames(toInsert)
p3 <- NULL
for(j in 1:nrows)
{
cvalues <- as.list(toInsert[j,])
names(cvalues) <- cnames
cvalues[is.na(cvalues)] = na
p2 <- toJSON(cvalues, auto_unbox=TRUE)
p3 <- c(p3, p2)
}
p3 <- paste(p3, collapse=",")
pbody <- paste(substr(p1, 1, nchar(p1)-1), ', \"rows\":[' ,p3, "] }", sep="")
myurl <- paste(baseUrl, "query", folderPath, "insertRows.api", sep="")
## Execute via our standard POST function
mydata <- labkey.post(myurl, pbody)
newdata <- fromJSON(mydata, simplifyVector=FALSE, simplifyDataFrame=FALSE)
return(newdata)
}
| /Rlabkey/R/labkey.insertRows.R | permissive | hakanaku2009/labkey-api-r | R | false | false | 2,260 | r | ##
# Copyright (c) 2010-2018 LabKey Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
labkey.insertRows <- function(baseUrl=NULL, folderPath, schemaName, queryName, toInsert, na=NULL)
{
baseUrl=labkey.getBaseUrl(baseUrl)
## Validate required parameters
if (missing(folderPath)) stop (paste("A value must be specified for folderPath."))
if (missing(schemaName)) stop (paste("A value must be specified for schemaName."))
if (missing(queryName)) stop (paste("A value must be specified for queryName."))
if (missing(toInsert)) stop (paste("A value must be specified for toInsert."))
## Default showAllRows=TRUE
showAllRows=TRUE
## normalize the folder path
folderPath <- encodeFolderPath(folderPath)
## URL encode folder path, JSON encode post body (if not already encoded)
toInsert <- convertFactorsToStrings(toInsert);
nrows <- nrow(toInsert)
ncols <- ncol(toInsert)
p1 <- toJSON(list(schemaName=schemaName, queryName=queryName, apiVersion=8.3), auto_unbox=TRUE)
cnames <- colnames(toInsert)
p3 <- NULL
for(j in 1:nrows)
{
cvalues <- as.list(toInsert[j,])
names(cvalues) <- cnames
cvalues[is.na(cvalues)] = na
p2 <- toJSON(cvalues, auto_unbox=TRUE)
p3 <- c(p3, p2)
}
p3 <- paste(p3, collapse=",")
pbody <- paste(substr(p1, 1, nchar(p1)-1), ', \"rows\":[' ,p3, "] }", sep="")
myurl <- paste(baseUrl, "query", folderPath, "insertRows.api", sep="")
## Execute via our standard POST function
mydata <- labkey.post(myurl, pbody)
newdata <- fromJSON(mydata, simplifyVector=FALSE, simplifyDataFrame=FALSE)
return(newdata)
}
|
library(ggplot2)
library(shinydashboard)
library(shiny)
library(dplyr)
library(randomForest)
library(stringr)
library(class)
library(mice) | /global.R | no_license | roshankk9/MachineLearning | R | false | false | 138 | r | library(ggplot2)
library(shinydashboard)
library(shiny)
library(dplyr)
library(randomForest)
library(stringr)
library(class)
library(mice) |
# The functionality of this file has been incorporated into the cleanDataFnct.R file
marijuanaSource <- function(data) {
for(i in 1:length(data[,1])) {
if (grepl("oil", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Hash Oil"
print("Hash Oil")
}
if (grepl("brownie", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Brownies"
print("Brownies")
}
if (grepl("cookies", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Cookies"
print("Cookies")
}
if (grepl("candy", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Candy Bars"
print("Candy Bars")
}
if (grepl("food", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Other Fodd"
print("Other Food")
}
else {
data$MainSource[i] = "Marijuana Plant"
print("NA")
}
}
# return(data)
}
# Categories to keep: Candy Bars, Cookies, Brownies, Drinks, Hash Oil, Marijuana Plant, Misc.
#[2] "butane hash oil ((slang term for concentrated thc))"
#[3] "cannabidiol"
#[4] "cannabis sativa"
#[5] "concentrated thc"
#[6] "dab ((slang term for concentrated thc))"
#[7] "dabs((slang term for concentrated thc))"
#[8] "hash oil ((concentrated thc) )"
#[9] "hashish"
#[10] "marijuana"
#[11] "marijuana brownie"
#[12] "marijuana candy"
#[13] "marijuana containing beverage"
#[14] "marijuana containing drink"
#[15] "marijuana containing food (other)"
#[16] "marijuana cookies"
#[17] "marijuana oil"
#[18] "marijuana, crude extract"
#[19] "marijuana, plant"
#[20] "marijuana(common name for cannabis sativa)"
#[21] "marijuana(slang term) (marijuana)"
#[22] "medical marijuana"
#[23] "pot (slang term)"
#[24] "pot (slang term) (marijuana)"
#[25] "smash (slang term) (marijuana cooked with acetone, resulting oil is added to hashish)"
#[26] "thc"
#[27] "thc (tetrahydrocannabinol)"
#[28] "wax((slang term for concentrated thc))"
#[29] "weed"
#[30] "weed (slang term) (marijuana)" | /scripts/Unused/marijuanaSource.R | no_license | alichtner/WAstate_PotAnalysis | R | false | false | 4,175 | r | # The functionality of this file has been incorporated into the cleanDataFnct.R file
marijuanaSource <- function(data) {
for(i in 1:length(data[,1])) {
if (grepl("oil", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Hash Oil"
print("Hash Oil")
}
if (grepl("brownie", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Brownies"
print("Brownies")
}
if (grepl("cookies", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Cookies"
print("Cookies")
}
if (grepl("candy", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Candy Bars"
print("Candy Bars")
}
if (grepl("food", data$Source[i], ignore.case = TRUE)) {
data$MainSource[i] = "Other Fodd"
print("Other Food")
}
else {
data$MainSource[i] = "Marijuana Plant"
print("NA")
}
}
# return(data)
}
# Categories to keep: Candy Bars, Cookies, Brownies, Drinks, Hash Oil, Marijuana Plant, Misc.
#[2] "butane hash oil ((slang term for concentrated thc))"
#[3] "cannabidiol"
#[4] "cannabis sativa"
#[5] "concentrated thc"
#[6] "dab ((slang term for concentrated thc))"
#[7] "dabs((slang term for concentrated thc))"
#[8] "hash oil ((concentrated thc) )"
#[9] "hashish"
#[10] "marijuana"
#[11] "marijuana brownie"
#[12] "marijuana candy"
#[13] "marijuana containing beverage"
#[14] "marijuana containing drink"
#[15] "marijuana containing food (other)"
#[16] "marijuana cookies"
#[17] "marijuana oil"
#[18] "marijuana, crude extract"
#[19] "marijuana, plant"
#[20] "marijuana(common name for cannabis sativa)"
#[21] "marijuana(slang term) (marijuana)"
#[22] "medical marijuana"
#[23] "pot (slang term)"
#[24] "pot (slang term) (marijuana)"
#[25] "smash (slang term) (marijuana cooked with acetone, resulting oil is added to hashish)"
#[26] "thc"
#[27] "thc (tetrahydrocannabinol)"
#[28] "wax((slang term for concentrated thc))"
#[29] "weed"
#[30] "weed (slang term) (marijuana)" |
context("JSON parser")
test_that("JSON parser scalars", {
expect_equal(fromJSON('"foobar"'), "foobar" )
expect_equal(fromJSON('""'), "")
expect_equal(fromJSON("42"), 42)
expect_equal(fromJSON("-42"), -42)
expect_equal(fromJSON("42.42"), 42.42)
expect_equal(fromJSON("1e2"), 1e2)
expect_equal(fromJSON("-0.1e-2"), -0.1e-2)
expect_equal(fromJSON('null'), NULL)
expect_equal(fromJSON('true'), TRUE)
expect_equal(fromJSON('false'), FALSE)
})
test_that("JSON parser arrays", {
cases <- list(
list("[1,2,3]", list(1,2,3)),
list("[1]", list(1)),
list("[]", list()),
list('["foo"]', list("foo")),
list('["foo", 1, "bar", true]', list("foo", 1, "bar", TRUE))
)
for (c in cases) {
r <- fromJSON(c[[1]])
expect_equal(r, c[[2]], info = c[[1]])
}
})
test_that("JSON parser nested arrays", {
cases <- list(
list('[1,2, ["foo", "bar"], 3]', list(1,2, list("foo","bar"), 3)),
list('[ [ [ 1 ] ] ]', list(list(list(1)))),
list('[ [ [ ] ] ]', list(list(list())))
)
for (c in cases) {
r <- fromJSON(c[[1]])
expect_equal(r, c[[2]], info = c[[1]])
}
})
test_that("JSON parser, real examples", {
inp <- '
{
"sha": "e183ccdc515bbb8e7f32d8d16586aed9eea6de0b",
"commit": {
"author": {
"name": "Hadley Wickham",
"email": "h.wickham@gmail.com",
"date": "2015-03-30T13:55:18Z"
},
"committer": {
"name": "Hadley Wickham",
"email": "h.wickham@gmail.com",
"date": "2015-03-30T13:55:18Z"
},
"message": "Merge pull request #22 from paulstaab/master\\n\\nImprove error message for assertions of length 0",
"tree": {
"sha": "f2e840b7a134fbc118597842992aa50048e0fa04",
"url": "https://api.github.com/repos/hadley/assertthat/git/trees/f2e840b7a134fbc118597842992aa50048e0fa04"
},
"url": "https://api.github.com/repos/hadley/assertthat/git/commits/e183ccdc515bbb8e7f32d8d16586aed9eea6de0b",
"comment_count": 0
}
}'
exp <- list(
sha = "e183ccdc515bbb8e7f32d8d16586aed9eea6de0b",
commit = list(
author = list(
name = "Hadley Wickham",
email = "h.wickham@gmail.com",
date = "2015-03-30T13:55:18Z"),
committer = list(
name = "Hadley Wickham",
email = "h.wickham@gmail.com",
date = "2015-03-30T13:55:18Z"),
message = "Merge pull request #22 from paulstaab/master\\n\\nImprove error message for assertions of length 0",
tree = list(
sha = "f2e840b7a134fbc118597842992aa50048e0fa04",
url = "https://api.github.com/repos/hadley/assertthat/git/trees/f2e840b7a134fbc118597842992aa50048e0fa04"
),
url = "https://api.github.com/repos/hadley/assertthat/git/commits/e183ccdc515bbb8e7f32d8d16586aed9eea6de0b",
comment_count = 0
)
)
expect_equal(fromJSON(inp), exp)
})
test_that("JSON parser, errors", {
expect_error(
fromJSON("[1,2,3,"),
"EXPECTED value GOT EOF"
)
expect_error(
fromJSON('{ 123: "foo" }'),
"EXPECTED string GOT 123"
)
expect_error(
fromJSON('{ "foo" "foobar" }'),
'EXPECTED : GOT "foobar"'
)
expect_error(
fromJSON('{ "foo": "foobar" "foo2": "foobar2" }'),
'EXPECTED , or } GOT "foo2"'
)
expect_error(
fromJSON('[1,2,3 4]'),
'EXPECTED , GOT 4'
)
})
| /tests/testthat/test-json.R | no_license | yutannihilation/remotes | R | false | false | 3,344 | r |
context("JSON parser")
test_that("JSON parser scalars", {
expect_equal(fromJSON('"foobar"'), "foobar" )
expect_equal(fromJSON('""'), "")
expect_equal(fromJSON("42"), 42)
expect_equal(fromJSON("-42"), -42)
expect_equal(fromJSON("42.42"), 42.42)
expect_equal(fromJSON("1e2"), 1e2)
expect_equal(fromJSON("-0.1e-2"), -0.1e-2)
expect_equal(fromJSON('null'), NULL)
expect_equal(fromJSON('true'), TRUE)
expect_equal(fromJSON('false'), FALSE)
})
test_that("JSON parser arrays", {
cases <- list(
list("[1,2,3]", list(1,2,3)),
list("[1]", list(1)),
list("[]", list()),
list('["foo"]', list("foo")),
list('["foo", 1, "bar", true]', list("foo", 1, "bar", TRUE))
)
for (c in cases) {
r <- fromJSON(c[[1]])
expect_equal(r, c[[2]], info = c[[1]])
}
})
test_that("JSON parser nested arrays", {
cases <- list(
list('[1,2, ["foo", "bar"], 3]', list(1,2, list("foo","bar"), 3)),
list('[ [ [ 1 ] ] ]', list(list(list(1)))),
list('[ [ [ ] ] ]', list(list(list())))
)
for (c in cases) {
r <- fromJSON(c[[1]])
expect_equal(r, c[[2]], info = c[[1]])
}
})
test_that("JSON parser, real examples", {
inp <- '
{
"sha": "e183ccdc515bbb8e7f32d8d16586aed9eea6de0b",
"commit": {
"author": {
"name": "Hadley Wickham",
"email": "h.wickham@gmail.com",
"date": "2015-03-30T13:55:18Z"
},
"committer": {
"name": "Hadley Wickham",
"email": "h.wickham@gmail.com",
"date": "2015-03-30T13:55:18Z"
},
"message": "Merge pull request #22 from paulstaab/master\\n\\nImprove error message for assertions of length 0",
"tree": {
"sha": "f2e840b7a134fbc118597842992aa50048e0fa04",
"url": "https://api.github.com/repos/hadley/assertthat/git/trees/f2e840b7a134fbc118597842992aa50048e0fa04"
},
"url": "https://api.github.com/repos/hadley/assertthat/git/commits/e183ccdc515bbb8e7f32d8d16586aed9eea6de0b",
"comment_count": 0
}
}'
exp <- list(
sha = "e183ccdc515bbb8e7f32d8d16586aed9eea6de0b",
commit = list(
author = list(
name = "Hadley Wickham",
email = "h.wickham@gmail.com",
date = "2015-03-30T13:55:18Z"),
committer = list(
name = "Hadley Wickham",
email = "h.wickham@gmail.com",
date = "2015-03-30T13:55:18Z"),
message = "Merge pull request #22 from paulstaab/master\\n\\nImprove error message for assertions of length 0",
tree = list(
sha = "f2e840b7a134fbc118597842992aa50048e0fa04",
url = "https://api.github.com/repos/hadley/assertthat/git/trees/f2e840b7a134fbc118597842992aa50048e0fa04"
),
url = "https://api.github.com/repos/hadley/assertthat/git/commits/e183ccdc515bbb8e7f32d8d16586aed9eea6de0b",
comment_count = 0
)
)
expect_equal(fromJSON(inp), exp)
})
test_that("JSON parser, errors", {
expect_error(
fromJSON("[1,2,3,"),
"EXPECTED value GOT EOF"
)
expect_error(
fromJSON('{ 123: "foo" }'),
"EXPECTED string GOT 123"
)
expect_error(
fromJSON('{ "foo" "foobar" }'),
'EXPECTED : GOT "foobar"'
)
expect_error(
fromJSON('{ "foo": "foobar" "foo2": "foobar2" }'),
'EXPECTED , or } GOT "foo2"'
)
expect_error(
fromJSON('[1,2,3 4]'),
'EXPECTED , GOT 4'
)
})
|
context("ANSI mappings")
test_that("make_shifts1", {
str <- "pre \033[31mred\033[39m \033[1mbold\033[22m post"
obj <- make_shifts1(rematch2::re_exec_all(str, re_ansi()))
exp <- cbind( c(5,8,9,13), c(5,13,19,27), c(5,10,14,19))
expect_equal(obj, exp)
})
test_that("map_raw_to_ansi1, map_ansi1_to_raw", {
str <- "pre \033[31mred\033[39m \033[1mbold\033[22m post"
map <- make_ansi_map1(str)
cases <- list(
list(1, 1), list(2, 2), list(3, 3), list(4, 4),
list(5, 10), list(6, 11), list(7, 12),
list(8, 18),
list(9, 23), list(10, 24), list(11, 25), list(12, 26),
list(13, 32), list(14, 33), list(15, 34), list(16, 35), list(17, 36),
## This is longer than the string, but it should work
list(18, 37)
)
for (c in cases) {
expect_equal(map_raw_to_ansi1(map, c[[1]]), c[[2]], info = paste(c[[1]]))
}
for (c in cases) {
expect_equal(map_ansi_to_raw1(map, c[[2]]), c[[1]], info = paste(c[[2]]))
}
})
test_that("make_ansi_map1", {
str <- "pre \033[31mred\033[39m \033[1mbold\033[22m post"
map <- make_ansi_map1(str)
exp <- list(
map = data.frame(
stringsAsFactors = FALSE,
start = c(5, 9),
end = c(7, 12),
open = c("\033[31m", "\033[1m"),
close = c("\033[39m", "\033[22m")
),
shifts = cbind(c(5, 8, 9, 13), c(5, 13, 19, 27), c(5, 10, 14, 19))
)
expect_equal(map, exp)
})
test_that("make_ansi_map with unclosed tags", {
str <- "pre \033[31mred \033[1mbold\033[22m post"
map <- make_ansi_map1(str)
exp <- data.frame(
stringsAsFactors = FALSE,
start = c(5, 9),
end = c(18, 12),
open = c("\033[31m", "\033[1m"),
close = c("", "\033[22m")
)
expect_equal(map$map, exp)
})
test_that("make_ansi_map1 corner cases", {
empty_map <- data.frame(
stringsAsFactors = FALSE,
start = numeric(),
end = numeric(),
open = character(),
close = character()
)
expect_equal(make_ansi_map1("")$map, empty_map)
expect_equal(make_ansi_map1("x")$map, empty_map)
expect_equal(make_ansi_map1("foobar")$map, empty_map)
})
test_that("make_ansi_map", {
str1 <- "pre \033[31mred\033[39m \033[1mbold\033[22m post"
str2 <- "another \033[3mone\033[23m"
expect_equal(
make_ansi_map(c(str1, str2)),
list(make_ansi_map1(str1), make_ansi_map1(str2))
)
})
| /tests/testthat/test-map.R | permissive | DavisVaughan/ansistrings | R | false | false | 2,305 | r |
context("ANSI mappings")
test_that("make_shifts1", {
str <- "pre \033[31mred\033[39m \033[1mbold\033[22m post"
obj <- make_shifts1(rematch2::re_exec_all(str, re_ansi()))
exp <- cbind( c(5,8,9,13), c(5,13,19,27), c(5,10,14,19))
expect_equal(obj, exp)
})
test_that("map_raw_to_ansi1, map_ansi1_to_raw", {
str <- "pre \033[31mred\033[39m \033[1mbold\033[22m post"
map <- make_ansi_map1(str)
cases <- list(
list(1, 1), list(2, 2), list(3, 3), list(4, 4),
list(5, 10), list(6, 11), list(7, 12),
list(8, 18),
list(9, 23), list(10, 24), list(11, 25), list(12, 26),
list(13, 32), list(14, 33), list(15, 34), list(16, 35), list(17, 36),
## This is longer than the string, but it should work
list(18, 37)
)
for (c in cases) {
expect_equal(map_raw_to_ansi1(map, c[[1]]), c[[2]], info = paste(c[[1]]))
}
for (c in cases) {
expect_equal(map_ansi_to_raw1(map, c[[2]]), c[[1]], info = paste(c[[2]]))
}
})
test_that("make_ansi_map1", {
str <- "pre \033[31mred\033[39m \033[1mbold\033[22m post"
map <- make_ansi_map1(str)
exp <- list(
map = data.frame(
stringsAsFactors = FALSE,
start = c(5, 9),
end = c(7, 12),
open = c("\033[31m", "\033[1m"),
close = c("\033[39m", "\033[22m")
),
shifts = cbind(c(5, 8, 9, 13), c(5, 13, 19, 27), c(5, 10, 14, 19))
)
expect_equal(map, exp)
})
test_that("make_ansi_map with unclosed tags", {
str <- "pre \033[31mred \033[1mbold\033[22m post"
map <- make_ansi_map1(str)
exp <- data.frame(
stringsAsFactors = FALSE,
start = c(5, 9),
end = c(18, 12),
open = c("\033[31m", "\033[1m"),
close = c("", "\033[22m")
)
expect_equal(map$map, exp)
})
test_that("make_ansi_map1 corner cases", {
empty_map <- data.frame(
stringsAsFactors = FALSE,
start = numeric(),
end = numeric(),
open = character(),
close = character()
)
expect_equal(make_ansi_map1("")$map, empty_map)
expect_equal(make_ansi_map1("x")$map, empty_map)
expect_equal(make_ansi_map1("foobar")$map, empty_map)
})
test_that("make_ansi_map", {
str1 <- "pre \033[31mred\033[39m \033[1mbold\033[22m post"
str2 <- "another \033[3mone\033[23m"
expect_equal(
make_ansi_map(c(str1, str2)),
list(make_ansi_map1(str1), make_ansi_map1(str2))
)
})
|
getwd()
setwd("~/GitHub/Programacion_Actuarial_III")
rankhospital <- function(estado, resultado, op = "mejor"){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
#encontramos el nombre de cada
#estado y resultado y validamos
est <- levels(factor(data[,7]))
res <- c("infarto", "falla", "neumonia")
if (estado %in% est == F){
stop("estado invalido")
}
#asignamos la columna dependiendo de lo que elegimos
if (resultado == "infarto") columna <- 11
else if (resultado == "falla") columna <- 17
else if (resultado == "neumonia") columna <- 23
else if (!((resultado == "ataque")|(resultado == "falla")| (resultado == "neumonia"))){
stop("resultado invalido")
}
datos <- data[data$State == estado,]
enc <- datos[,c(2,columna)]
if (sum(enc[,2]=="Not Available") < 1) {
x <- enc[order(as.numeric(final[,2])),]
if (op == "mejor") op <- 1
else if (op == "peor") op <- nrow(x)
else if (op > nrow(x)) {
stop(return(NA))
}
i <- 0
while (x[i+1,2] != x[op,2]){
i <- i + 1
}
f <- op - i
y <- x[which(x[,2] == x[num,2]),]
z <- y[order(y[,1]),]
z[f,1]
}
else {
final <- enc[- grep("Not", enc[,2]),]
x <- final[order(as.numeric(final[,2])),]
if (op == "mejor") op <- 1
else if (op == "peor") op <- nrow(x)
else if (op > nrow(x)) {
stop(return(NA))
}
i <- 0
while (x[i+1,2] != x[op,2]){
i <- i + 1
}
f <- op - i
y <- x[which(x[,2] == x[op,2]),]
z <- y[order(y[,1]),]
z[f,1]
}
}
rankhospital("TX","falla","peor")
| /RankHospital.R | no_license | GaticaCMC/Programacion_Actuarial_III | R | false | false | 1,789 | r | getwd()
setwd("~/GitHub/Programacion_Actuarial_III")
rankhospital <- function(estado, resultado, op = "mejor"){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
#encontramos el nombre de cada
#estado y resultado y validamos
est <- levels(factor(data[,7]))
res <- c("infarto", "falla", "neumonia")
if (estado %in% est == F){
stop("estado invalido")
}
#asignamos la columna dependiendo de lo que elegimos
if (resultado == "infarto") columna <- 11
else if (resultado == "falla") columna <- 17
else if (resultado == "neumonia") columna <- 23
else if (!((resultado == "ataque")|(resultado == "falla")| (resultado == "neumonia"))){
stop("resultado invalido")
}
datos <- data[data$State == estado,]
enc <- datos[,c(2,columna)]
if (sum(enc[,2]=="Not Available") < 1) {
x <- enc[order(as.numeric(final[,2])),]
if (op == "mejor") op <- 1
else if (op == "peor") op <- nrow(x)
else if (op > nrow(x)) {
stop(return(NA))
}
i <- 0
while (x[i+1,2] != x[op,2]){
i <- i + 1
}
f <- op - i
y <- x[which(x[,2] == x[num,2]),]
z <- y[order(y[,1]),]
z[f,1]
}
else {
final <- enc[- grep("Not", enc[,2]),]
x <- final[order(as.numeric(final[,2])),]
if (op == "mejor") op <- 1
else if (op == "peor") op <- nrow(x)
else if (op > nrow(x)) {
stop(return(NA))
}
i <- 0
while (x[i+1,2] != x[op,2]){
i <- i + 1
}
f <- op - i
y <- x[which(x[,2] == x[op,2]),]
z <- y[order(y[,1]),]
z[f,1]
}
}
rankhospital("TX","falla","peor")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shadow_class.R
\docType{class}
\name{config_Shadow-class}
\alias{config_Shadow-class}
\alias{createShadowTestConfig}
\title{Create a config_Shadow object}
\usage{
createShadowTestConfig(
item_selection = NULL,
content_balancing = NULL,
MIP = NULL,
MCMC = NULL,
exclude_policy = NULL,
refresh_policy = NULL,
exposure_control = NULL,
stopping_criterion = NULL,
interim_theta = NULL,
final_theta = NULL,
theta_grid = seq(-4, 4, 0.1)
)
}
\arguments{
\item{item_selection}{a named list containing item selection criteria.
\itemize{
\item{\code{method}} the type of selection criteria. Accepts \code{MFI, MPWI, FB, EB, GFI}. (default = \code{MFI})
\item{\code{info_type}} the type of information. Accepts \code{FISHER}. (default = \code{FISHER})
\item{\code{initial_theta}} (optional) initial theta values to use.
\item{\code{fixed_theta}} (optional) fixed theta values to use throughout all item positions.
\item{\code{target_value}} (optional) the target value to use for \code{method = 'GFI'}.
}}
\item{content_balancing}{a named list containing content balancing options.
\itemize{
\item{\code{method}} the type of balancing method. Accepts \code{NONE, STA}. (default = \code{STA})
}}
\item{MIP}{a named list containing solver options.
\itemize{
\item{\code{solver}} the type of solver. Accepts \code{Rsymphony, gurobi, lpSolve, Rglpk}. (default = \code{LPSOLVE})
\item{\code{verbosity}} verbosity level of the solver. (default = \code{-2})
\item{\code{time_limit}} time limit in seconds. Used in solvers \code{Rsymphony, gurobi, Rglpk}. (default = \code{60})
\item{\code{gap_limit}} search termination criterion. Gap limit in relative scale passed onto the solver. Used in solver \code{gurobi}. (default = \code{.05})
\item{\code{gap_limit_abs}} search termination criterion. Gap limit in absolute scale passed onto the solver. Used in solvers \code{Rsymphony}. (default = \code{0.05})
\item{\code{obj_tol}} search termination criterion. The lower bound to use on the minimax deviation variable. Used when \code{item_selection$method} is \code{GFI}, and ignored otherwise. (default = \code{0.05})
\item{\code{retry}} number of times to retry running the solver if the solver returns no solution. Some solvers incorrectly return no solution even when a solution exists. This is the number of attempts to verify that the problem is indeed infeasible in such cases. Set to \code{0} to not retry. (default = \code{5})
}}
\item{MCMC}{a named list containing Markov-chain Monte Carlo configurations for obtaining posterior samples.
\itemize{
\item{\code{burn_in}} the number of chains from the start to discard. (default = \code{100})
\item{\code{post_burn_in}} the number of chains to use after discarding the first \code{burn_in} chains. (default = \code{500})
\item{\code{thin}} thinning interval to apply. \code{1} represents no thinning. (default = \code{1})
\item{\code{jump_factor}} the jump (scaling) factor for the proposal distribution. \code{1} represents no jumping. (default = \code{2.4})
}}
\item{exclude_policy}{a named list containing the exclude policy for use with the \code{exclude} argument in \code{\link{Shadow}}.
\itemize{
\item{\code{method}} the type of policy. Accepts \code{HARD, SOFT}. (default = \code{HARD})
\item{\code{M}} the Big M penalty to use on item information. Used in the \code{SOFT} method.
}}
\item{refresh_policy}{a named list containing the refresh policy for when to obtain a new shadow test.
\itemize{
\item{\code{method}} the type of policy. Accepts \code{ALWAYS, POSITION, INTERVAL, THRESHOLD, INTERVAL-THRESHOLD, STIMULUS, SET, PASSAGE}. (default = \code{ALWAYS})
\item{\code{interval}} used in methods \code{INTERVAL, INTERVAL-THRESHOLD}. Set to 1 to refresh at each position, 2 to refresh at every two positions, and so on. (default = \code{1})
\item{\code{threshold}} used in methods \code{THRESHOLD, INTERVAL-THRESHOLD}. The absolute change in between interim theta estimates to trigger the refresh. (default = \code{0.1})
\item{\code{position}} used in methods \code{POSITION}. Item positions to trigger the refresh. (default = \code{1})
}}
\item{exposure_control}{a named list containing exposure control settings.
\itemize{
\item{\code{method}} the type of exposure control method. Accepts \code{NONE, ELIGIBILITY, BIGM, BIGM-BAYESIAN}. (default = \code{ELIGIBILITY})
\item{\code{M}} used in methods \code{BIGM, BIGM-BAYESIAN}. the Big M penalty to use on item information.
\item{\code{max_exposure_rate}} target exposure rates for each segment. (default = \code{rep(0.25, 7)})
\item{\code{acceleration_factor}} the acceleration factor to apply. (default = \code{1})
\item{\code{n_segment}} the number of theta segments to use. (default = \code{7})
\item{\code{first_segment}} (optional) the theta segment assumed at the beginning of test for all participants.
\item{\code{segment_cut}} theta segment cuts. (default = \code{c(-Inf, seq(-2.5, 2.5, 1), Inf)})
\item{\code{initial_eligibility_stats}} (optional) initial eligibility statistics to use.
\item{\code{fading_factor}} the fading factor to apply. (default = \code{.999})
\item{\code{diagnostic_stats}} set to \code{TRUE} to generate segment-wise diagnostic statistics. (default = \code{FALSE})
}}
\item{stopping_criterion}{a named list containing stopping criterion.
\itemize{
\item{\code{method}} the type of stopping criterion. Accepts \code{FIXED}. (default = \code{FIXED})
\item{\code{test_length}} test length.
\item{\code{min_ni}} the maximum number of items to administer.
\item{\code{max_ni}} the minimum number of items to administer.
\item{\code{se_threshold}} standard error threshold. Item administration is stopped when theta estimate standard error becomes lower than this value.
}}
\item{interim_theta}{a named list containing interim theta estimation options.
\itemize{
\item{\code{method}} the type of estimation. Accepts \code{EAP, MLE, MLEF, EB, FB}. (default = \code{EAP})
\item{\code{shrinkage_correction}} set \code{TRUE} to apply shrinkage correction. Used when \code{method} is \code{EAP}. (default = \code{FALSE})
\item{\code{prior_dist}} the type of prior distribution. Accepts \code{NORMAL, UNIFORM}. (default = \code{NORMAL})
\item{\code{prior_par}} distribution parameters for \code{prior_dist}. (default = \code{c(0, 1)})
\item{\code{bound_ML}} theta bound in \code{c(lower_bound, upper_bound)} format. Used when \code{method} is \code{MLE}. (default = \code{-4, 4})
\item{\code{truncate_ML}} set \code{TRUE} to truncate ML estimate within \code{bound_ML}. (default = \code{FALSE})
\item{\code{max_iter}} maximum number of Newton-Raphson iterations. Used when \code{method} is \code{MLE}. (default = \code{50})
\item{\code{crit}} convergence criterion. Used when \code{method} is \code{MLE}. (default = \code{1e-03})
\item{\code{max_change}} maximum change in ML estimates between iterations. Changes exceeding this value is clipped to this value. Used when \code{method} is \code{MLE}. (default = \code{1.0})
\item{\code{use_step_size}} set \code{TRUE} to use \code{step_size}. Used when \code{method} is \code{MLE} or \code{MLEF}. (default = \code{FALSE})
\item{\code{step_size}} upper bound to impose on the absolute change in initial theta and estimated theta. Absolute changes exceeding this value will be capped to \code{step_size}. Used when \code{method} is \code{MLE} or \code{MLEF}. (default = \code{0.5})
\item{\code{do_Fisher}} set \code{TRUE} to use Fisher's method of scoring. Used when \code{method} is \code{MLE}. (default = \code{TRUE})
\item{\code{fence_slope}} slope parameter to use for \code{method = 'MLEF'}. This must have two values in total, for the lower and upper bound item respectively. Use one value to use the same value for both bounds. (default = \code{5})
\item{\code{fence_difficulty}} difficulty parameters to use for \code{method = 'MLEF'}. This must have two values in total, for the lower and upper bound item respectively. (default = \code{c(-5, 5)})
\item{\code{hand_scored_attribute}} (optional) the item attribute name for whether each item is hand-scored or not. The attribute should have \code{TRUE} (hand-scored) and \code{FALSE} (machine-scored) values. If a hand-scored item is administered to an examinee, the previous interim theta (or the starting theta if this occurs for the first item) is reused without updating the estimate.
}}
\item{final_theta}{a named list containing final theta estimation options.
\itemize{
\item{\code{method}} the type of estimation. Accepts \code{EAP, MLE, MLEF, EB, FB}. (default = \code{EAP})
\item{\code{shrinkage_correction}} set \code{TRUE} to apply shrinkage correction. Used when \code{method} is \code{EAP}. (default = \code{FALSE})
\item{\code{prior_dist}} the type of prior distribution. Accepts \code{NORMAL, UNIFORM}. (default = \code{NORMAL})
\item{\code{prior_par}} distribution parameters for \code{prior_dist}. (default = \code{c(0, 1)})
\item{\code{bound_ML}} theta bound in \code{c(lower_bound, upper_bound)} format. Used when \code{method} is \code{MLE}. (default = \code{-4, 4})
\item{\code{truncate_ML}} set \code{TRUE} to truncate ML estimate within \code{bound_ML}. (default = \code{FALSE})
\item{\code{max_iter}} maximum number of Newton-Raphson iterations. Used when \code{method} is \code{MLE}. (default = \code{50})
\item{\code{crit}} convergence criterion. Used when \code{method} is \code{MLE}. (default = \code{1e-03})
\item{\code{max_change}} maximum change in ML estimates between iterations. Changes exceeding this value is clipped to this value. Used when \code{method} is \code{MLE}. (default = \code{1.0})
\item{\code{use_step_size}} set \code{TRUE} to use \code{step_size}. Used when \code{method} is \code{MLE} or \code{MLEF}. (default = \code{FALSE})
\item{\code{step_size}} upper bound to impose on the absolute change in initial theta and estimated theta. Absolute changes exceeding this value will be capped to \code{step_size}. Used when \code{method} is \code{MLE} or \code{MLEF}. (default = \code{0.5})
\item{\code{do_Fisher}} set \code{TRUE} to use Fisher's method of scoring. Used when \code{method} is \code{MLE}. (default = \code{TRUE})
\item{\code{fence_slope}} slope parameter to use for \code{method = 'MLEF'}. This must have two values in total, for the lower and upper bound item respectively. Use one value to use the same value for both bounds. (default = \code{5})
\item{\code{fence_difficulty}} difficulty parameters to use for \code{method = 'MLEF'}. This must have two values in total, for the lower and upper bound item respectively. (default = \code{c(-5, 5)})
}}
\item{theta_grid}{the theta grid to use as quadrature points.}
}
\description{
\code{\link{createShadowTestConfig}} is a config function to create a \code{\linkS4class{config_Shadow}} object for Shadow test assembly.
Default values are used for any unspecified parameters/slots.
}
\examples{
cfg1 <- createShadowTestConfig(refresh_policy = list(
method = "STIMULUS"
))
cfg2 <- createShadowTestConfig(refresh_policy = list(
method = "POSITION",
position = c(1, 5, 9)
))
}
| /man/createShadowTestConfig.Rd | no_license | choi-phd/TestDesign | R | false | true | 11,265 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shadow_class.R
\docType{class}
\name{config_Shadow-class}
\alias{config_Shadow-class}
\alias{createShadowTestConfig}
\title{Create a config_Shadow object}
\usage{
createShadowTestConfig(
item_selection = NULL,
content_balancing = NULL,
MIP = NULL,
MCMC = NULL,
exclude_policy = NULL,
refresh_policy = NULL,
exposure_control = NULL,
stopping_criterion = NULL,
interim_theta = NULL,
final_theta = NULL,
theta_grid = seq(-4, 4, 0.1)
)
}
\arguments{
\item{item_selection}{a named list containing item selection criteria.
\itemize{
\item{\code{method}} the type of selection criteria. Accepts \code{MFI, MPWI, FB, EB, GFI}. (default = \code{MFI})
\item{\code{info_type}} the type of information. Accepts \code{FISHER}. (default = \code{FISHER})
\item{\code{initial_theta}} (optional) initial theta values to use.
\item{\code{fixed_theta}} (optional) fixed theta values to use throughout all item positions.
\item{\code{target_value}} (optional) the target value to use for \code{method = 'GFI'}.
}}
\item{content_balancing}{a named list containing content balancing options.
\itemize{
\item{\code{method}} the type of balancing method. Accepts \code{NONE, STA}. (default = \code{STA})
}}
\item{MIP}{a named list containing solver options.
\itemize{
\item{\code{solver}} the type of solver. Accepts \code{Rsymphony, gurobi, lpSolve, Rglpk}. (default = \code{LPSOLVE})
\item{\code{verbosity}} verbosity level of the solver. (default = \code{-2})
\item{\code{time_limit}} time limit in seconds. Used in solvers \code{Rsymphony, gurobi, Rglpk}. (default = \code{60})
\item{\code{gap_limit}} search termination criterion. Gap limit in relative scale passed onto the solver. Used in solver \code{gurobi}. (default = \code{.05})
\item{\code{gap_limit_abs}} search termination criterion. Gap limit in absolute scale passed onto the solver. Used in solvers \code{Rsymphony}. (default = \code{0.05})
\item{\code{obj_tol}} search termination criterion. The lower bound to use on the minimax deviation variable. Used when \code{item_selection$method} is \code{GFI}, and ignored otherwise. (default = \code{0.05})
\item{\code{retry}} number of times to retry running the solver if the solver returns no solution. Some solvers incorrectly return no solution even when a solution exists. This is the number of attempts to verify that the problem is indeed infeasible in such cases. Set to \code{0} to not retry. (default = \code{5})
}}
\item{MCMC}{a named list containing Markov-chain Monte Carlo configurations for obtaining posterior samples.
\itemize{
\item{\code{burn_in}} the number of chains from the start to discard. (default = \code{100})
\item{\code{post_burn_in}} the number of chains to use after discarding the first \code{burn_in} chains. (default = \code{500})
\item{\code{thin}} thinning interval to apply. \code{1} represents no thinning. (default = \code{1})
\item{\code{jump_factor}} the jump (scaling) factor for the proposal distribution. \code{1} represents no jumping. (default = \code{2.4})
}}
\item{exclude_policy}{a named list containing the exclude policy for use with the \code{exclude} argument in \code{\link{Shadow}}.
\itemize{
\item{\code{method}} the type of policy. Accepts \code{HARD, SOFT}. (default = \code{HARD})
\item{\code{M}} the Big M penalty to use on item information. Used in the \code{SOFT} method.
}}
\item{refresh_policy}{a named list containing the refresh policy for when to obtain a new shadow test.
\itemize{
\item{\code{method}} the type of policy. Accepts \code{ALWAYS, POSITION, INTERVAL, THRESHOLD, INTERVAL-THRESHOLD, STIMULUS, SET, PASSAGE}. (default = \code{ALWAYS})
\item{\code{interval}} used in methods \code{INTERVAL, INTERVAL-THRESHOLD}. Set to 1 to refresh at each position, 2 to refresh at every two positions, and so on. (default = \code{1})
\item{\code{threshold}} used in methods \code{THRESHOLD, INTERVAL-THRESHOLD}. The absolute change in between interim theta estimates to trigger the refresh. (default = \code{0.1})
\item{\code{position}} used in methods \code{POSITION}. Item positions to trigger the refresh. (default = \code{1})
}}
\item{exposure_control}{a named list containing exposure control settings.
\itemize{
\item{\code{method}} the type of exposure control method. Accepts \code{NONE, ELIGIBILITY, BIGM, BIGM-BAYESIAN}. (default = \code{ELIGIBILITY})
\item{\code{M}} used in methods \code{BIGM, BIGM-BAYESIAN}. the Big M penalty to use on item information.
\item{\code{max_exposure_rate}} target exposure rates for each segment. (default = \code{rep(0.25, 7)})
\item{\code{acceleration_factor}} the acceleration factor to apply. (default = \code{1})
\item{\code{n_segment}} the number of theta segments to use. (default = \code{7})
\item{\code{first_segment}} (optional) the theta segment assumed at the beginning of test for all participants.
\item{\code{segment_cut}} theta segment cuts. (default = \code{c(-Inf, seq(-2.5, 2.5, 1), Inf)})
\item{\code{initial_eligibility_stats}} (optional) initial eligibility statistics to use.
\item{\code{fading_factor}} the fading factor to apply. (default = \code{.999})
\item{\code{diagnostic_stats}} set to \code{TRUE} to generate segment-wise diagnostic statistics. (default = \code{FALSE})
}}
\item{stopping_criterion}{a named list containing stopping criterion.
\itemize{
\item{\code{method}} the type of stopping criterion. Accepts \code{FIXED}. (default = \code{FIXED})
\item{\code{test_length}} test length.
\item{\code{min_ni}} the maximum number of items to administer.
\item{\code{max_ni}} the minimum number of items to administer.
\item{\code{se_threshold}} standard error threshold. Item administration is stopped when theta estimate standard error becomes lower than this value.
}}
\item{interim_theta}{a named list containing interim theta estimation options.
\itemize{
\item{\code{method}} the type of estimation. Accepts \code{EAP, MLE, MLEF, EB, FB}. (default = \code{EAP})
\item{\code{shrinkage_correction}} set \code{TRUE} to apply shrinkage correction. Used when \code{method} is \code{EAP}. (default = \code{FALSE})
\item{\code{prior_dist}} the type of prior distribution. Accepts \code{NORMAL, UNIFORM}. (default = \code{NORMAL})
\item{\code{prior_par}} distribution parameters for \code{prior_dist}. (default = \code{c(0, 1)})
\item{\code{bound_ML}} theta bound in \code{c(lower_bound, upper_bound)} format. Used when \code{method} is \code{MLE}. (default = \code{-4, 4})
\item{\code{truncate_ML}} set \code{TRUE} to truncate ML estimate within \code{bound_ML}. (default = \code{FALSE})
\item{\code{max_iter}} maximum number of Newton-Raphson iterations. Used when \code{method} is \code{MLE}. (default = \code{50})
\item{\code{crit}} convergence criterion. Used when \code{method} is \code{MLE}. (default = \code{1e-03})
\item{\code{max_change}} maximum change in ML estimates between iterations. Changes exceeding this value is clipped to this value. Used when \code{method} is \code{MLE}. (default = \code{1.0})
\item{\code{use_step_size}} set \code{TRUE} to use \code{step_size}. Used when \code{method} is \code{MLE} or \code{MLEF}. (default = \code{FALSE})
\item{\code{step_size}} upper bound to impose on the absolute change in initial theta and estimated theta. Absolute changes exceeding this value will be capped to \code{step_size}. Used when \code{method} is \code{MLE} or \code{MLEF}. (default = \code{0.5})
\item{\code{do_Fisher}} set \code{TRUE} to use Fisher's method of scoring. Used when \code{method} is \code{MLE}. (default = \code{TRUE})
\item{\code{fence_slope}} slope parameter to use for \code{method = 'MLEF'}. This must have two values in total, for the lower and upper bound item respectively. Use one value to use the same value for both bounds. (default = \code{5})
\item{\code{fence_difficulty}} difficulty parameters to use for \code{method = 'MLEF'}. This must have two values in total, for the lower and upper bound item respectively. (default = \code{c(-5, 5)})
\item{\code{hand_scored_attribute}} (optional) the item attribute name for whether each item is hand-scored or not. The attribute should have \code{TRUE} (hand-scored) and \code{FALSE} (machine-scored) values. If a hand-scored item is administered to an examinee, the previous interim theta (or the starting theta if this occurs for the first item) is reused without updating the estimate.
}}
\item{final_theta}{a named list containing final theta estimation options.
\itemize{
\item{\code{method}} the type of estimation. Accepts \code{EAP, MLE, MLEF, EB, FB}. (default = \code{EAP})
\item{\code{shrinkage_correction}} set \code{TRUE} to apply shrinkage correction. Used when \code{method} is \code{EAP}. (default = \code{FALSE})
\item{\code{prior_dist}} the type of prior distribution. Accepts \code{NORMAL, UNIFORM}. (default = \code{NORMAL})
\item{\code{prior_par}} distribution parameters for \code{prior_dist}. (default = \code{c(0, 1)})
\item{\code{bound_ML}} theta bound in \code{c(lower_bound, upper_bound)} format. Used when \code{method} is \code{MLE}. (default = \code{-4, 4})
\item{\code{truncate_ML}} set \code{TRUE} to truncate ML estimate within \code{bound_ML}. (default = \code{FALSE})
\item{\code{max_iter}} maximum number of Newton-Raphson iterations. Used when \code{method} is \code{MLE}. (default = \code{50})
\item{\code{crit}} convergence criterion. Used when \code{method} is \code{MLE}. (default = \code{1e-03})
\item{\code{max_change}} maximum change in ML estimates between iterations. Changes exceeding this value is clipped to this value. Used when \code{method} is \code{MLE}. (default = \code{1.0})
\item{\code{use_step_size}} set \code{TRUE} to use \code{step_size}. Used when \code{method} is \code{MLE} or \code{MLEF}. (default = \code{FALSE})
\item{\code{step_size}} upper bound to impose on the absolute change in initial theta and estimated theta. Absolute changes exceeding this value will be capped to \code{step_size}. Used when \code{method} is \code{MLE} or \code{MLEF}. (default = \code{0.5})
\item{\code{do_Fisher}} set \code{TRUE} to use Fisher's method of scoring. Used when \code{method} is \code{MLE}. (default = \code{TRUE})
\item{\code{fence_slope}} slope parameter to use for \code{method = 'MLEF'}. This must have two values in total, for the lower and upper bound item respectively. Use one value to use the same value for both bounds. (default = \code{5})
\item{\code{fence_difficulty}} difficulty parameters to use for \code{method = 'MLEF'}. This must have two values in total, for the lower and upper bound item respectively. (default = \code{c(-5, 5)})
}}
\item{theta_grid}{the theta grid to use as quadrature points.}
}
\description{
\code{\link{createShadowTestConfig}} is a config function to create a \code{\linkS4class{config_Shadow}} object for Shadow test assembly.
Default values are used for any unspecified parameters/slots.
}
\examples{
cfg1 <- createShadowTestConfig(refresh_policy = list(
method = "STIMULUS"
))
cfg2 <- createShadowTestConfig(refresh_policy = list(
method = "POSITION",
position = c(1, 5, 9)
))
}
|
statistics <- function(object,iter) UseMethod("statistics")
statistics.default <- function(object,iter) "Default statistics"
statistics.Population <- function(object,iter) {
#print(object@populationmatrix)
#print(object@fitness)
#m <- which.max(object@fitness);
#print(m)
print(paste("Iteration", iter));
cat("\n");
cat(paste("Iteration", iter),"\n", file = "console.txt", sep = " ", fill = FALSE, labels = NULL, append = TRUE)
#print("Best individual");
#print(object@populationmatrix[m,])
#cat("\n");
print("Best fitness:");
print(max((object@fitness)));
cat(paste("Best fitness:", max((object@fitness))),"\n",'--------------------------------------------------------',"\n", file = "console.txt", sep = " ", fill = FALSE, labels = NULL, append = TRUE)
cat("\n");
print('--------------------------------------------------------')
cat("\n");
flush.console();
} | /Functions/StatisticFunction.R | no_license | cybervalient/CEBA | R | false | false | 898 | r | statistics <- function(object,iter) UseMethod("statistics")
statistics.default <- function(object,iter) "Default statistics"
statistics.Population <- function(object,iter) {
#print(object@populationmatrix)
#print(object@fitness)
#m <- which.max(object@fitness);
#print(m)
print(paste("Iteration", iter));
cat("\n");
cat(paste("Iteration", iter),"\n", file = "console.txt", sep = " ", fill = FALSE, labels = NULL, append = TRUE)
#print("Best individual");
#print(object@populationmatrix[m,])
#cat("\n");
print("Best fitness:");
print(max((object@fitness)));
cat(paste("Best fitness:", max((object@fitness))),"\n",'--------------------------------------------------------',"\n", file = "console.txt", sep = " ", fill = FALSE, labels = NULL, append = TRUE)
cat("\n");
print('--------------------------------------------------------')
cat("\n");
flush.console();
} |
2*(3+5/2)
2*((3+5)/2)
2/3+5
2%/%3+5
2%%3
2^3*5
2^-4
2^(-4)
#division con entero
725%/%7
#Residuo
725%%7
# D = d * q + r
# r = D - d * q
# q = D %/% d
# x = D %% d
725 - 103*7
pi
2*pi
3^pi
pi^2
#Infinito
Inf
#Menos Infinito
-Inf
#No disponible
NA
#Operaciones no disponibles
NaN
5/0
0/0
# Notación cientifica
# Se mueve el punto decimal 15 numeros a la derecha
2^50 # = 1.1259e+15 = 1125900000000000
# Se mueve el punto decimal 15 numeros a la izquierda
2 ^(-15) # 3.051758e-05 = 0.00003051758
# Vector
c(2^30, 2^(-15), 1, 2, 3/2)
# Raiz
sqrt(25)
# Constante de euler
exp(1)
# Logaritmo de PI
log(pi)
# Logaritmo de 32 en base 2
log(32, 2)
# Logaritmo de 32 en base a
# una variable definida como 2
log(32, base = 2)
log(base=2, 32)
# Valor absoluto
abs(-pi)
# Factorial, se define como
# numero factorial de un numero
# entero positivo hasta llegar a 1
factorial(7)
factorial(4)
factorial(0)
# COEFICIENTE BINOMIAL
# Se define el
# coeficiente binomial de n sobre m como
" n!
------------
m!(n-m)!"
#Si tuviera 5 platos y me quisiera comer 3 de ellos
# de cuantas formas lo puedo hacer
# Numero de subjconjuntos que se puede sacar de un conjunto
choose(5, 3)
choose(3,5)
log(4^6, 4)
# Los argumentos dentro de una funcion, se separa
# por comas
6^log(4,6)
choose(5,2)
# FUNCIONES TRIGONOMETRICAS
# Estan dadas en radianes
# Utilizan los argumentos en radianes
sin(60*pi/180)
cos(60 * pi/180)
cos(120 * pi/180)
sinpi(1/2) # = sin (pi/2)
tan(pi) # -1.224647e-16 ~ (Tiende a...) 0
tan(pi/2) # 1.633124e+16 ~ (Tiende a...) Inf
asin(0.8660254) #arc sin en radianes
asin(0.8660254) * 180 /pi #arc sin en grados
asin(5) #arc sin x in [-1,1]
acos(-8)
# Numeros en coma flotante
# Imprime las n cifras siginificativa del numero x
print(sqrt(2),10)
# Redondea a n cifras significativas un resultado o un vector numerico x
round(sqrt(2), 3)
# [x], parte entera por defecto de x, redondea
floor(sqrt(2))
floor(pi)
# [x], parte entera por exceso de x
ceiling(sqrt(2))
ceiling(pi)
# Parte entera de x, eliminando la parte decimal
trunc(sqrt(2))
trunc(pi)
(sqrt(2)^2)-2
round(sqrt(2), 4)^2
2^50
# MANTIZA, obtener los 15 numeros mas significativos
print(2^50, 15)
# MANTIZA, obtener los 2 numeros mas significativos
print(2^50, 2)
print(pi, 22)
#3.141592653589793115998
#3.141592653589793238462
round(1.25,1)
round(1.35,1)
# Es lo mismo
round(sqrt(2),0)
# Que esto
round(sqrt(2))
?round(digits = 5, sqrt(2))
# Se puede cambiar el orden de los argumentos, cuando se hace
# se tiene que especificar
round(5, sqrt(2))
floor(-3.45)
ceiling(-3.45)
trunc(-3.45)
Video 23
x = (pi^2)/2
x
y <- cos(pi/4)
y
sin(pi/4) + cos(pi/4) -> z
z
edad <- 30
nombre = "Juan Gabriel"
HOLA = 1
hola = 5
pi.4 = 4*pi
pi.4
x = 2
x = x^2
x = sqrt(x)
## Función f(x) = x^3 - (3^x)* sen(x)
f = function(x) {
x^3 - (3^x) * sin(x)
}
f(4) # 4^3 - 3^4 * sin(4)
f(5)
f(pi/2)
suma1 <- function(t){
t + 1
}
suma1(6)
suma1(-5)
product <- function(x, y){
x*y
}
product(5,7)
g <- function(x,y,z) {
exp(x^2 + y^2) * sin(z)
}
g(1, 2, 3)
g(1, -1, pi)
suma5 <- function(numero){
numero = suma1(numero);
numero = suma1(numero);
numero = suma1(numero);
numero = suma1(numero);
suma1(numero)
}
suma5(3)
ls()
rm(product)
ls()
rm(list = ls())
class(3+2i)
(3+2i)*5
(3+2i)*(-1+3i)
(3+2i)/(-1+3i)
#Esto es un error:
#2+7*i
#pi + sqrt(2)i
complex(real = pi, imaginary = sqrt(2)) -> z1
z1
sqrt(-5)
sqrt(as.complex(-5))
#La raíz cuadrada devuelve, de las dos soluciones la de
#Re(z)>0, para obtener la otra, hay que multiplicar por -1
sqrt(3+2i) # z^2 = 3+2i
exp(3+2i)
sin(3+2i)
cos(3+2i)
#Módulo = sqrt(Re(z)^2 + Im(z)^2)
Mod(z1)
#Argumento = arctan(Im(z)/Re(z))
# = arccos(Re(z)/Mod(z))
# = arcsin(Im(z)/Mod(z))
# va de (-pi, pi]
Arg(-1+0i)
Arg(-1-2i)
#Conjugado = Re(z)- Im(z)i
Conj(z1)
#Parte Real y Parte Imaginaria
Re(z1)
Im(z1)
### z = Mod(z) * (cos(Arg(z))+sin(Arg(z))i)
complex(modulus = 2, argument = pi/2) -> z2
z2
Mod(z2)
Arg(z2)
pi/2 | /scripts/tema1/04-calculadora.R | no_license | AlexisFrancoR/r-basic | R | false | false | 4,003 | r | 2*(3+5/2)
2*((3+5)/2)
2/3+5
2%/%3+5
2%%3
2^3*5
2^-4
2^(-4)
#division con entero
725%/%7
#Residuo
725%%7
# D = d * q + r
# r = D - d * q
# q = D %/% d
# x = D %% d
725 - 103*7
pi
2*pi
3^pi
pi^2
#Infinito
Inf
#Menos Infinito
-Inf
#No disponible
NA
#Operaciones no disponibles
NaN
5/0
0/0
# Notación cientifica
# Se mueve el punto decimal 15 numeros a la derecha
2^50 # = 1.1259e+15 = 1125900000000000
# Se mueve el punto decimal 15 numeros a la izquierda
2 ^(-15) # 3.051758e-05 = 0.00003051758
# Vector
c(2^30, 2^(-15), 1, 2, 3/2)
# Raiz
sqrt(25)
# Constante de euler
exp(1)
# Logaritmo de PI
log(pi)
# Logaritmo de 32 en base 2
log(32, 2)
# Logaritmo de 32 en base a
# una variable definida como 2
log(32, base = 2)
log(base=2, 32)
# Valor absoluto
abs(-pi)
# Factorial, se define como
# numero factorial de un numero
# entero positivo hasta llegar a 1
factorial(7)
factorial(4)
factorial(0)
# COEFICIENTE BINOMIAL
# Se define el
# coeficiente binomial de n sobre m como
" n!
------------
m!(n-m)!"
#Si tuviera 5 platos y me quisiera comer 3 de ellos
# de cuantas formas lo puedo hacer
# Numero de subjconjuntos que se puede sacar de un conjunto
choose(5, 3)
choose(3,5)
log(4^6, 4)
# Los argumentos dentro de una funcion, se separa
# por comas
6^log(4,6)
choose(5,2)
# FUNCIONES TRIGONOMETRICAS
# Estan dadas en radianes
# Utilizan los argumentos en radianes
sin(60*pi/180)
cos(60 * pi/180)
cos(120 * pi/180)
sinpi(1/2) # = sin (pi/2)
tan(pi) # -1.224647e-16 ~ (Tiende a...) 0
tan(pi/2) # 1.633124e+16 ~ (Tiende a...) Inf
asin(0.8660254) #arc sin en radianes
asin(0.8660254) * 180 /pi #arc sin en grados
asin(5) #arc sin x in [-1,1]
acos(-8)
# Numeros en coma flotante
# Imprime las n cifras siginificativa del numero x
print(sqrt(2),10)
# Redondea a n cifras significativas un resultado o un vector numerico x
round(sqrt(2), 3)
# [x], parte entera por defecto de x, redondea
floor(sqrt(2))
floor(pi)
# [x], parte entera por exceso de x
ceiling(sqrt(2))
ceiling(pi)
# Parte entera de x, eliminando la parte decimal
trunc(sqrt(2))
trunc(pi)
(sqrt(2)^2)-2
round(sqrt(2), 4)^2
2^50
# MANTIZA, obtener los 15 numeros mas significativos
print(2^50, 15)
# MANTIZA, obtener los 2 numeros mas significativos
print(2^50, 2)
print(pi, 22)
#3.141592653589793115998
#3.141592653589793238462
round(1.25,1)
round(1.35,1)
# Es lo mismo
round(sqrt(2),0)
# Que esto
round(sqrt(2))
?round(digits = 5, sqrt(2))
# Se puede cambiar el orden de los argumentos, cuando se hace
# se tiene que especificar
round(5, sqrt(2))
floor(-3.45)
ceiling(-3.45)
trunc(-3.45)
Video 23
x = (pi^2)/2
x
y <- cos(pi/4)
y
sin(pi/4) + cos(pi/4) -> z
z
edad <- 30
nombre = "Juan Gabriel"
HOLA = 1
hola = 5
pi.4 = 4*pi
pi.4
x = 2
x = x^2
x = sqrt(x)
## Función f(x) = x^3 - (3^x)* sen(x)
f = function(x) {
x^3 - (3^x) * sin(x)
}
f(4) # 4^3 - 3^4 * sin(4)
f(5)
f(pi/2)
suma1 <- function(t){
t + 1
}
suma1(6)
suma1(-5)
product <- function(x, y){
x*y
}
product(5,7)
g <- function(x,y,z) {
exp(x^2 + y^2) * sin(z)
}
g(1, 2, 3)
g(1, -1, pi)
suma5 <- function(numero){
numero = suma1(numero);
numero = suma1(numero);
numero = suma1(numero);
numero = suma1(numero);
suma1(numero)
}
suma5(3)
ls()
rm(product)
ls()
rm(list = ls())
class(3+2i)
(3+2i)*5
(3+2i)*(-1+3i)
(3+2i)/(-1+3i)
#Esto es un error:
#2+7*i
#pi + sqrt(2)i
complex(real = pi, imaginary = sqrt(2)) -> z1
z1
sqrt(-5)
sqrt(as.complex(-5))
#La raíz cuadrada devuelve, de las dos soluciones la de
#Re(z)>0, para obtener la otra, hay que multiplicar por -1
sqrt(3+2i) # z^2 = 3+2i
exp(3+2i)
sin(3+2i)
cos(3+2i)
#Módulo = sqrt(Re(z)^2 + Im(z)^2)
Mod(z1)
#Argumento = arctan(Im(z)/Re(z))
# = arccos(Re(z)/Mod(z))
# = arcsin(Im(z)/Mod(z))
# va de (-pi, pi]
Arg(-1+0i)
Arg(-1-2i)
#Conjugado = Re(z)- Im(z)i
Conj(z1)
#Parte Real y Parte Imaginaria
Re(z1)
Im(z1)
### z = Mod(z) * (cos(Arg(z))+sin(Arg(z))i)
complex(modulus = 2, argument = pi/2) -> z2
z2
Mod(z2)
Arg(z2)
pi/2 |
new_stage_pre <- function(actions = list(), mold = NULL) {
if (!is.null(mold) && !is.list(mold)) {
abort("`mold` must be a result of calling `hardhat::mold()`.")
}
new_stage(actions = actions, mold = mold, subclass = "stage_pre")
}
new_stage_fit <- function(actions = list(), fit = NULL) {
if (!is.null(fit) && !is_model_fit(fit)) {
abort("`fit` must be a `model_fit`.")
}
new_stage(actions = actions, fit = fit, subclass = "stage_fit")
}
new_stage_post <- function(actions = list()) {
new_stage(actions, subclass = "stage_post")
}
# ------------------------------------------------------------------------------
# A `stage` is a collection of `action`s
# There are 3 stages that actions can fall into:
# - pre
# - fit
# - post
new_stage <- function(actions = list(), ..., subclass = character()) {
if (!is_list_of_actions(actions)) {
abort("`actions` must be a list of actions.")
}
if (!is_uniquely_named(actions)) {
abort("`actions` must be uniquely named.")
}
fields <- list2(...)
if (!is_uniquely_named(fields)) {
abort("`...` must be uniquely named.")
}
fields <- list2(actions = actions, !!! fields)
structure(fields, class = c(subclass, "stage"))
}
# ------------------------------------------------------------------------------
is_stage <- function(x) {
inherits(x, "stage")
}
has_action <- function(stage, name) {
name %in% names(stage$actions)
}
# ------------------------------------------------------------------------------
| /R/stage.R | permissive | dkgaraujo/workflows | R | false | false | 1,511 | r | new_stage_pre <- function(actions = list(), mold = NULL) {
if (!is.null(mold) && !is.list(mold)) {
abort("`mold` must be a result of calling `hardhat::mold()`.")
}
new_stage(actions = actions, mold = mold, subclass = "stage_pre")
}
new_stage_fit <- function(actions = list(), fit = NULL) {
if (!is.null(fit) && !is_model_fit(fit)) {
abort("`fit` must be a `model_fit`.")
}
new_stage(actions = actions, fit = fit, subclass = "stage_fit")
}
new_stage_post <- function(actions = list()) {
new_stage(actions, subclass = "stage_post")
}
# ------------------------------------------------------------------------------
# A `stage` is a collection of `action`s
# There are 3 stages that actions can fall into:
# - pre
# - fit
# - post
new_stage <- function(actions = list(), ..., subclass = character()) {
if (!is_list_of_actions(actions)) {
abort("`actions` must be a list of actions.")
}
if (!is_uniquely_named(actions)) {
abort("`actions` must be uniquely named.")
}
fields <- list2(...)
if (!is_uniquely_named(fields)) {
abort("`...` must be uniquely named.")
}
fields <- list2(actions = actions, !!! fields)
structure(fields, class = c(subclass, "stage"))
}
# ------------------------------------------------------------------------------
is_stage <- function(x) {
inherits(x, "stage")
}
has_action <- function(stage, name) {
name %in% names(stage$actions)
}
# ------------------------------------------------------------------------------
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/generatePartialPrediction.R
\name{generatePartialPredictionData}
\alias{generatePartialPredictionData}
\title{Generate partial predictions}
\usage{
generatePartialPredictionData(obj, data, features, interaction = FALSE,
fun = mean, resample = "none", fmin = sapply(features, function(x)
ifelse(!is.factor(data[[x]]), min(data[[x]], na.rm = TRUE), NA)),
fmax = sapply(features, function(x) ifelse(!is.factor(data[[x]]),
max(data[[x]], na.rm = TRUE), NA)), gridsize = 10L, ...)
}
\arguments{
\item{obj}{a \code{\link{WrappedModel}} returned from \code{\link{train}}.}
\item{data}{a \code{data.frame} with the same columns as are present in the training data.}
\item{features}{\code{character}\cr
A vector of feature names matching the training data.}
\item{interaction}{\code{logical(1)}\cr
Whether the \code{features} should be interacted or not. If \code{TRUE} then the Cartesian product of the
prediction grid for each feature is taken, and the partial prediction at each unique combination of
values of the features is estimated. Note that if the length of \code{features} is greater than two,
\code{\link{plotPartialPrediction}} and \code{\link{plotPartialPredictionGGVIS}} cannot be used.
If \code{FALSE} each feature is considered separately. In this case \code{features} can be much longer
than two.
Default is \code{FALSE}.}
\item{fun}{for regression, a function that accepts a numeric vector and returns either a single number
such as a measure of location such as the mean, or three numbers, which give a lower bound,
a measure of location, and an upper bound. Note if three numbers are returned they must be
in this order. For classification with \code{predict.type = "prob"} the function must accept
a numeric matrix with the number of columns equal to the number of class levels of the target.
For classification with \code{predict.type = "response"} (the default) the function must accept
a character vector and output a numeric vector with length equal to the number of classes in the
target feature.
The default is the mean, unless \code{obj} is classification with \code{predict.type = "response"}
in which case the default is the proportion of observations predicted to be in each class.}
\item{resample}{\code{character(1)}\cr
Defines how the prediction grid for each feature is created. If \dQuote{bootstrap} then
values are sampled with replacement from the training data. If \dQuote{subsample} then
values are sampled without replacement from the training data. If \dQuote{none} an evenly spaced
grid between either the empirical minimum and maximum, or the minimum and maximum defined by
\code{fmin} and \code{fmax}, is created.
Default is \dQuote{none}.}
\item{fmin}{\code{numeric}\cr
The minimum value that each element of \code{features} can take.
This argument is only applicable if \code{resample = NULL} and when the empirical minimum is higher
than the theoretical minimum for a given feature.
Default is the empirical minimum of each numeric feature and NA for factor features.}
\item{fmax}{\code{numeric}\cr
The maximum value that each element of \code{features} can take.
This argument is only applicable if \code{resample = "none"} and when the empirical maximum is lower
than the theoretical maximum for a given feature.
Default is the empirical maximum of each numeric feature and NA for factor features.}
\item{gridsize}{\code{integer(1)}\cr
The length of the prediction grid created for each feature.
If \code{resample = "bootstrap"} or \code{resample = "subsample"} then this defines
the number of (possibly non-unique) values resampled. If \code{resample = NULL} it defines the
length of the evenly spaced grid created.}
\item{...}{additional arguments to be passed to \code{\link{predict}}.}
}
\value{
an object of class \code{PartialPredictionData}, a named list, which contains the data,
the target, the features, and the task description.
}
\description{
Estimate how the learned prediction function is affected by one or more features
}
\examples{
lrn = makeLearner("classif.rpart", predict.type = "prob")
fit = train(lrn, iris.task)
pd = generatePartialPredictionData(fit, getTaskData(iris.task), c("Petal.Width", "Petal.Length"))
plotPartialPrediction(pd)
}
\seealso{
Other generate_plot_data: \code{\link{generateBenchmarkSummaryData}};
\code{\link{generateCalibrationData}};
\code{\link{generateCritDifferencesData}};
\code{\link{generateFilterValuesData}};
\code{\link{generateLearningCurveData}};
\code{\link{generateROCRCurvesData}};
\code{\link{generateRankMatrixAsBarData}};
\code{\link{generateThreshVsPerfData}};
\code{\link{getFilterValues}}
Other partial_prediction: \code{\link{plotPartialPredictionGGVIS}};
\code{\link{plotPartialPrediction}}
}
| /man/generatePartialPredictionData.Rd | no_license | hetong007/mlr | R | false | false | 4,829 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/generatePartialPrediction.R
\name{generatePartialPredictionData}
\alias{generatePartialPredictionData}
\title{Generate partial predictions}
\usage{
generatePartialPredictionData(obj, data, features, interaction = FALSE,
fun = mean, resample = "none", fmin = sapply(features, function(x)
ifelse(!is.factor(data[[x]]), min(data[[x]], na.rm = TRUE), NA)),
fmax = sapply(features, function(x) ifelse(!is.factor(data[[x]]),
max(data[[x]], na.rm = TRUE), NA)), gridsize = 10L, ...)
}
\arguments{
\item{obj}{a \code{\link{WrappedModel}} returned from \code{\link{train}}.}
\item{data}{a \code{data.frame} with the same columns as are present in the training data.}
\item{features}{\code{character}\cr
A vector of feature names matching the training data.}
\item{interaction}{\code{logical(1)}\cr
Whether the \code{features} should be interacted or not. If \code{TRUE} then the Cartesian product of the
prediction grid for each feature is taken, and the partial prediction at each unique combination of
values of the features is estimated. Note that if the length of \code{features} is greater than two,
\code{\link{plotPartialPrediction}} and \code{\link{plotPartialPredictionGGVIS}} cannot be used.
If \code{FALSE} each feature is considered separately. In this case \code{features} can be much longer
than two.
Default is \code{FALSE}.}
\item{fun}{for regression, a function that accepts a numeric vector and returns either a single number
such as a measure of location such as the mean, or three numbers, which give a lower bound,
a measure of location, and an upper bound. Note if three numbers are returned they must be
in this order. For classification with \code{predict.type = "prob"} the function must accept
a numeric matrix with the number of columns equal to the number of class levels of the target.
For classification with \code{predict.type = "response"} (the default) the function must accept
a character vector and output a numeric vector with length equal to the number of classes in the
target feature.
The default is the mean, unless \code{obj} is classification with \code{predict.type = "response"}
in which case the default is the proportion of observations predicted to be in each class.}
\item{resample}{\code{character(1)}\cr
Defines how the prediction grid for each feature is created. If \dQuote{bootstrap} then
values are sampled with replacement from the training data. If \dQuote{subsample} then
values are sampled without replacement from the training data. If \dQuote{none} an evenly spaced
grid between either the empirical minimum and maximum, or the minimum and maximum defined by
\code{fmin} and \code{fmax}, is created.
Default is \dQuote{none}.}
\item{fmin}{\code{numeric}\cr
The minimum value that each element of \code{features} can take.
This argument is only applicable if \code{resample = NULL} and when the empirical minimum is higher
than the theoretical minimum for a given feature.
Default is the empirical minimum of each numeric feature and NA for factor features.}
\item{fmax}{\code{numeric}\cr
The maximum value that each element of \code{features} can take.
This argument is only applicable if \code{resample = "none"} and when the empirical maximum is lower
than the theoretical maximum for a given feature.
Default is the empirical maximum of each numeric feature and NA for factor features.}
\item{gridsize}{\code{integer(1)}\cr
The length of the prediction grid created for each feature.
If \code{resample = "bootstrap"} or \code{resample = "subsample"} then this defines
the number of (possibly non-unique) values resampled. If \code{resample = NULL} it defines the
length of the evenly spaced grid created.}
\item{...}{additional arguments to be passed to \code{\link{predict}}.}
}
\value{
an object of class \code{PartialPredictionData}, a named list, which contains the data,
the target, the features, and the task description.
}
\description{
Estimate how the learned prediction function is affected by one or more features
}
\examples{
lrn = makeLearner("classif.rpart", predict.type = "prob")
fit = train(lrn, iris.task)
pd = generatePartialPredictionData(fit, getTaskData(iris.task), c("Petal.Width", "Petal.Length"))
plotPartialPrediction(pd)
}
\seealso{
Other generate_plot_data: \code{\link{generateBenchmarkSummaryData}};
\code{\link{generateCalibrationData}};
\code{\link{generateCritDifferencesData}};
\code{\link{generateFilterValuesData}};
\code{\link{generateLearningCurveData}};
\code{\link{generateROCRCurvesData}};
\code{\link{generateRankMatrixAsBarData}};
\code{\link{generateThreshVsPerfData}};
\code{\link{getFilterValues}}
Other partial_prediction: \code{\link{plotPartialPredictionGGVIS}};
\code{\link{plotPartialPrediction}}
}
|
#' @export
vec_proxy_compare.raw <- function(x, ...) {
# because:
# order(as.raw(1:3))
# #> Error in order(as.raw(1:3)): unimplemented type 'raw' in 'orderVector1'
as.integer(x)
}
| /R/type-raw.R | no_license | rcodo/vctrs | R | false | false | 189 | r |
#' @export
vec_proxy_compare.raw <- function(x, ...) {
# because:
# order(as.raw(1:3))
# #> Error in order(as.raw(1:3)): unimplemented type 'raw' in 'orderVector1'
as.integer(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_gt3x_parse_info_txt.R
\name{parse_info_txt}
\alias{parse_info_txt}
\title{Parse the info component of a gt3x file}
\usage{
parse_info_txt(info, tz = "UTC", verbose, ...)
}
\arguments{
\item{info}{connection to the info.txt file}
\item{tz}{character. The timezone}
\item{verbose}{logical. Print updates to console?}
\item{...}{further arguments/methods. Currently unused.}
}
\description{
Parse the info component of a gt3x file
}
\keyword{internal}
| /man/parse_info_txt.Rd | permissive | cran/AGread | R | false | true | 556 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_gt3x_parse_info_txt.R
\name{parse_info_txt}
\alias{parse_info_txt}
\title{Parse the info component of a gt3x file}
\usage{
parse_info_txt(info, tz = "UTC", verbose, ...)
}
\arguments{
\item{info}{connection to the info.txt file}
\item{tz}{character. The timezone}
\item{verbose}{logical. Print updates to console?}
\item{...}{further arguments/methods. Currently unused.}
}
\description{
Parse the info component of a gt3x file
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match_mixed.R
\name{match_mixed}
\alias{match_mixed}
\title{Test and Control Selector for Groups/Individuals, with Mixed Input Variables/Metrics.}
\usage{
match_mixed(df, n = 10, test_list = NULL)
}
\arguments{
\item{df}{data frame of numeric, or mixed inputs. First column must have group/individuals names, 1 line per group/individuals.}
\item{n}{size of the test group, and matching control group. Defaults to 10. Will be ignored if df provide to the "test_list" parameter.}
\item{test_list}{df with one column named "TEST." This has a list of members in the current test. Defaults to NULL.}
}
\value{
If the "n" parameter is used, the function outputs a data frame with a list of randomized test groups/individuals from the supplied df with matching control groups/individuals, a 1 to 1 match.
If a data frame is supplied to the "test_list" parameter, 1 to 1 matching control stores will be created for the groups/individuals in the "TEST" column supplied to the "test_list" parameter.
}
\description{
Randomly select test groups/individuals and create matching control
groups/individuals by using Euclidean distance on scaled numeric variables,
or with Gower's method for datasets with numeric and categorical variables.
This function can handle both numeric and categorical as well as just numeric
variables with Gower's methodology from cluster::daisy() function.
}
\details{
The data frame must contain the group/individual labels in the first column
and the other variables must be in levels, in other words not scaled.
In the case where duplicates arise in the Control, the function iterates
through the test control list until there are no duplicates in the Control.
In each iteration, it re-ranks the remaining possible control groups/individuals
and matches to the test on the lowest distance.
You can supply a data frame of pre-selected test groups/individuals to the
parameter test_list and the function will provide you with a list of control
groups/individuals.
}
\examples{
library(dplyr)
library(magrittr)
df <- datasets::USArrests \%>\% dplyr::mutate(state = base::row.names(datasets::USArrests)) \%>\%
base::cbind(datasets::state.division) \%>\%
dplyr::select(state, dplyr::everything())
TEST_CONTROL_LIST <- TestContR::match_mixed(df, n = 15)
}
| /man/match_mixed.Rd | permissive | Fredo-XVII/TestContR | R | false | true | 2,354 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match_mixed.R
\name{match_mixed}
\alias{match_mixed}
\title{Test and Control Selector for Groups/Individuals, with Mixed Input Variables/Metrics.}
\usage{
match_mixed(df, n = 10, test_list = NULL)
}
\arguments{
\item{df}{data frame of numeric, or mixed inputs. First column must have group/individuals names, 1 line per group/individuals.}
\item{n}{size of the test group, and matching control group. Defaults to 10. Will be ignored if df provide to the "test_list" parameter.}
\item{test_list}{df with one column named "TEST." This has a list of members in the current test. Defaults to NULL.}
}
\value{
If the "n" parameter is used, the function outputs a data frame with a list of randomized test groups/individuals from the supplied df with matching control groups/individuals, a 1 to 1 match.
If a data frame is supplied to the "test_list" parameter, 1 to 1 matching control stores will be created for the groups/individuals in the "TEST" column supplied to the "test_list" parameter.
}
\description{
Randomly select test groups/individuals and create matching control
groups/individuals by using Euclidean distance on scaled numeric variables,
or with Gower's method for datasets with numeric and categorical variables.
This function can handle both numeric and categorical as well as just numeric
variables with Gower's methodology from cluster::daisy() function.
}
\details{
The data frame must contain the group/individual labels in the first column
and the other variables must be in levels, in other words not scaled.
In the case where duplicates arise in the Control, the function iterates
through the test control list until there are no duplicates in the Control.
In each iteration, it re-ranks the remaining possible control groups/individuals
and matches to the test on the lowest distance.
You can supply a data frame of pre-selected test groups/individuals to the
parameter test_list and the function will provide you with a list of control
groups/individuals.
}
\examples{
library(dplyr)
library(magrittr)
df <- datasets::USArrests \%>\% dplyr::mutate(state = base::row.names(datasets::USArrests)) \%>\%
base::cbind(datasets::state.division) \%>\%
dplyr::select(state, dplyr::everything())
TEST_CONTROL_LIST <- TestContR::match_mixed(df, n = 15)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeData.R
\name{writeData}
\alias{writeData}
\title{Write an object to a worksheet}
\usage{
writeData(wb, sheet, x, startCol = 1, startRow = 1, xy = NULL,
colNames = TRUE, rowNames = FALSE, headerStyle = NULL,
borders = c("none", "surrounding", "rows", "columns", "all"),
borderColour = getOption("openxlsx.borderColour", "black"),
borderStyle = getOption("openxlsx.borderStyle", "thin"),
withFilter = FALSE, keepNA = FALSE, name = NULL, sep = ", ")
}
\arguments{
\item{wb}{A Workbook object containing a worksheet.}
\item{sheet}{The worksheet to write to. Can be the worksheet index or name.}
\item{x}{Object to be written. For classes supported look at the examples.}
\item{startCol}{A vector specifying the starting column to write to.}
\item{startRow}{A vector specifying the starting row to write to.}
\item{xy}{An alternative to specifying \code{startCol} and
\code{startRow} individually. A vector of the form
\code{c(startCol, startRow)}.}
\item{colNames}{If \code{TRUE}, column names of x are written.}
\item{rowNames}{If \code{TRUE}, data.frame row names of x are written.}
\item{headerStyle}{Custom style to apply to column names.}
\item{borders}{Either "\code{none}" (default), "\code{surrounding}",
"\code{columns}", "\code{rows}" or \emph{respective abbreviations}. If
"\code{surrounding}", a border is drawn around the data. If "\code{rows}",
a surrounding border is drawn with a border around each row. If
"\code{columns}", a surrounding border is drawn with a border between
each column. If "\code{all}" all cell borders are drawn.}
\item{borderColour}{Colour of cell border. A valid colour (belonging to \code{colours()} or a hex colour code, eg see \href{http://www.colorpicker.com}{here}).}
\item{borderStyle}{Border line style
\itemize{
\item{\bold{none}}{ no border}
\item{\bold{thin}}{ thin border}
\item{\bold{medium}}{ medium border}
\item{\bold{dashed}}{ dashed border}
\item{\bold{dotted}}{ dotted border}
\item{\bold{thick}}{ thick border}
\item{\bold{double}}{ double line border}
\item{\bold{hair}}{ hairline border}
\item{\bold{mediumDashed}}{ medium weight dashed border}
\item{\bold{dashDot}}{ dash-dot border}
\item{\bold{mediumDashDot}}{ medium weight dash-dot border}
\item{\bold{dashDotDot}}{ dash-dot-dot border}
\item{\bold{mediumDashDotDot}}{ medium weight dash-dot-dot border}
\item{\bold{slantDashDot}}{ slanted dash-dot border}
}}
\item{withFilter}{If \code{TRUE}, add filters to the column name row. NOTE can only have one filter per worksheet.}
\item{keepNA}{If \code{TRUE}, NA values are converted to #N/A in Excel else NA cells will be empty.}
\item{name}{If not NULL, a named region is defined.}
\item{sep}{Only applies to list columns. The separator used to collapse list columns to a character vector e.g. sapply(x$list_column, paste, collapse = sep).}
}
\value{
invisible(0)
}
\description{
Write an object to worksheet with optional styling.
}
\details{
Formulae written using writeFormula to a Workbook object will not get picked up by read.xlsx().
This is because only the formula is written and left to Excel to evaluate the formula when the file is opened in Excel.
}
\examples{
## See formatting vignette for further examples.
## Options for default styling (These are the defaults)
options("openxlsx.borderColour" = "black")
options("openxlsx.borderStyle" = "thin")
options("openxlsx.dateFormat" = "mm/dd/yyyy")
options("openxlsx.datetimeFormat" = "yyyy-mm-dd hh:mm:ss")
options("openxlsx.numFmt" = NULL)
## Change the default border colour to #4F81BD
options("openxlsx.borderColour" = "#4F81BD")
#####################################################################################
## Create Workbook object and add worksheets
wb <- createWorkbook()
## Add worksheets
addWorksheet(wb, "Cars")
addWorksheet(wb, "Formula")
x <- mtcars[1:6,]
writeData(wb, "Cars", x, startCol = 2, startRow = 3, rowNames = TRUE)
#####################################################################################
## Bordering
writeData(wb, "Cars", x, rowNames = TRUE, startCol = "O", startRow = 3,
borders="surrounding", borderColour = "black") ## black border
writeData(wb, "Cars", x, rowNames = TRUE,
startCol = 2, startRow = 12, borders="columns")
writeData(wb, "Cars", x, rowNames = TRUE,
startCol="O", startRow = 12, borders="rows")
#####################################################################################
## Header Styles
hs1 <- createStyle(fgFill = "#DCE6F1", halign = "CENTER", textDecoration = "italic",
border = "Bottom")
writeData(wb, "Cars", x, colNames = TRUE, rowNames = TRUE, startCol="B",
startRow = 23, borders="rows", headerStyle = hs1, borderStyle = "dashed")
hs2 <- createStyle(fontColour = "#ffffff", fgFill = "#4F80BD",
halign = "center", valign = "center", textDecoration = "bold",
border = "TopBottomLeftRight")
writeData(wb, "Cars", x, colNames = TRUE, rowNames = TRUE,
startCol="O", startRow = 23, borders="columns", headerStyle = hs2)
#####################################################################################
## Hyperlinks
## - vectors/columns with class 'hyperlink' are written as hyperlinks'
v <- rep("https://CRAN.R-project.org/", 4)
names(v) <- paste("Hyperlink", 1:4) # Optional: names will be used as display text
class(v) <- 'hyperlink'
writeData(wb, "Cars", x = v, xy = c("B", 32))
#####################################################################################
## Formulas
## - vectors/columns with class 'formula' are written as formulas'
df <- data.frame(x=1:3, y = 1:3,
z = paste(paste0("A", 1:3+1L), paste0("B", 1:3+1L), sep = " + "),
stringsAsFactors = FALSE)
class(df$z) <- c(class(df$z), "formula")
writeData(wb, sheet = "Formula", x = df)
#####################################################################################
## Save workbook
## Open in excel without saving file: openXL(wb)
\dontrun{saveWorkbook(wb, "writeDataExample.xlsx", overwrite = TRUE)}
}
\seealso{
\code{\link{writeDataTable}}
}
\author{
Alexander Walker
}
| /man/writeData.Rd | no_license | david-f1976/openxlsx | R | false | true | 6,273 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeData.R
\name{writeData}
\alias{writeData}
\title{Write an object to a worksheet}
\usage{
writeData(wb, sheet, x, startCol = 1, startRow = 1, xy = NULL,
colNames = TRUE, rowNames = FALSE, headerStyle = NULL,
borders = c("none", "surrounding", "rows", "columns", "all"),
borderColour = getOption("openxlsx.borderColour", "black"),
borderStyle = getOption("openxlsx.borderStyle", "thin"),
withFilter = FALSE, keepNA = FALSE, name = NULL, sep = ", ")
}
\arguments{
\item{wb}{A Workbook object containing a worksheet.}
\item{sheet}{The worksheet to write to. Can be the worksheet index or name.}
\item{x}{Object to be written. For classes supported look at the examples.}
\item{startCol}{A vector specifying the starting column to write to.}
\item{startRow}{A vector specifying the starting row to write to.}
\item{xy}{An alternative to specifying \code{startCol} and
\code{startRow} individually. A vector of the form
\code{c(startCol, startRow)}.}
\item{colNames}{If \code{TRUE}, column names of x are written.}
\item{rowNames}{If \code{TRUE}, data.frame row names of x are written.}
\item{headerStyle}{Custom style to apply to column names.}
\item{borders}{Either "\code{none}" (default), "\code{surrounding}",
"\code{columns}", "\code{rows}" or \emph{respective abbreviations}. If
"\code{surrounding}", a border is drawn around the data. If "\code{rows}",
a surrounding border is drawn with a border around each row. If
"\code{columns}", a surrounding border is drawn with a border between
each column. If "\code{all}" all cell borders are drawn.}
\item{borderColour}{Colour of cell border. A valid colour (belonging to \code{colours()} or a hex colour code, eg see \href{http://www.colorpicker.com}{here}).}
\item{borderStyle}{Border line style
\itemize{
\item{\bold{none}}{ no border}
\item{\bold{thin}}{ thin border}
\item{\bold{medium}}{ medium border}
\item{\bold{dashed}}{ dashed border}
\item{\bold{dotted}}{ dotted border}
\item{\bold{thick}}{ thick border}
\item{\bold{double}}{ double line border}
\item{\bold{hair}}{ hairline border}
\item{\bold{mediumDashed}}{ medium weight dashed border}
\item{\bold{dashDot}}{ dash-dot border}
\item{\bold{mediumDashDot}}{ medium weight dash-dot border}
\item{\bold{dashDotDot}}{ dash-dot-dot border}
\item{\bold{mediumDashDotDot}}{ medium weight dash-dot-dot border}
\item{\bold{slantDashDot}}{ slanted dash-dot border}
}}
\item{withFilter}{If \code{TRUE}, add filters to the column name row. NOTE can only have one filter per worksheet.}
\item{keepNA}{If \code{TRUE}, NA values are converted to #N/A in Excel else NA cells will be empty.}
\item{name}{If not NULL, a named region is defined.}
\item{sep}{Only applies to list columns. The separator used to collapse list columns to a character vector e.g. sapply(x$list_column, paste, collapse = sep).}
}
\value{
invisible(0)
}
\description{
Write an object to worksheet with optional styling.
}
\details{
Formulae written using writeFormula to a Workbook object will not get picked up by read.xlsx().
This is because only the formula is written and left to Excel to evaluate the formula when the file is opened in Excel.
}
\examples{
## See formatting vignette for further examples.
## Options for default styling (These are the defaults)
options("openxlsx.borderColour" = "black")
options("openxlsx.borderStyle" = "thin")
options("openxlsx.dateFormat" = "mm/dd/yyyy")
options("openxlsx.datetimeFormat" = "yyyy-mm-dd hh:mm:ss")
options("openxlsx.numFmt" = NULL)
## Change the default border colour to #4F81BD
options("openxlsx.borderColour" = "#4F81BD")
#####################################################################################
## Create Workbook object and add worksheets
wb <- createWorkbook()
## Add worksheets
addWorksheet(wb, "Cars")
addWorksheet(wb, "Formula")
x <- mtcars[1:6,]
writeData(wb, "Cars", x, startCol = 2, startRow = 3, rowNames = TRUE)
#####################################################################################
## Bordering
writeData(wb, "Cars", x, rowNames = TRUE, startCol = "O", startRow = 3,
borders="surrounding", borderColour = "black") ## black border
writeData(wb, "Cars", x, rowNames = TRUE,
startCol = 2, startRow = 12, borders="columns")
writeData(wb, "Cars", x, rowNames = TRUE,
startCol="O", startRow = 12, borders="rows")
#####################################################################################
## Header Styles
hs1 <- createStyle(fgFill = "#DCE6F1", halign = "CENTER", textDecoration = "italic",
border = "Bottom")
writeData(wb, "Cars", x, colNames = TRUE, rowNames = TRUE, startCol="B",
startRow = 23, borders="rows", headerStyle = hs1, borderStyle = "dashed")
hs2 <- createStyle(fontColour = "#ffffff", fgFill = "#4F80BD",
halign = "center", valign = "center", textDecoration = "bold",
border = "TopBottomLeftRight")
writeData(wb, "Cars", x, colNames = TRUE, rowNames = TRUE,
startCol="O", startRow = 23, borders="columns", headerStyle = hs2)
#####################################################################################
## Hyperlinks
## - vectors/columns with class 'hyperlink' are written as hyperlinks'
v <- rep("https://CRAN.R-project.org/", 4)
names(v) <- paste("Hyperlink", 1:4) # Optional: names will be used as display text
class(v) <- 'hyperlink'
writeData(wb, "Cars", x = v, xy = c("B", 32))
#####################################################################################
## Formulas
## - vectors/columns with class 'formula' are written as formulas'
df <- data.frame(x=1:3, y = 1:3,
z = paste(paste0("A", 1:3+1L), paste0("B", 1:3+1L), sep = " + "),
stringsAsFactors = FALSE)
class(df$z) <- c(class(df$z), "formula")
writeData(wb, sheet = "Formula", x = df)
#####################################################################################
## Save workbook
## Open in excel without saving file: openXL(wb)
\dontrun{saveWorkbook(wb, "writeDataExample.xlsx", overwrite = TRUE)}
}
\seealso{
\code{\link{writeDataTable}}
}
\author{
Alexander Walker
}
|
plot1 <- function( ) {
# R x64 3.1.0:
# > getwd()
# File > Change dir...
# source("plot1.R")
# The UC Irvine Machine Learning Repository (http://archive.ics.uci.edu/ml/) is a popular repository for machine learning datasets.
# Using the "Individual household electric power consumption Data Set", four plots are produced.
#
# Dataset: Electric power consumption [20 MB].
#
# Description: Measurements of electric power consumption in one household with a one-minute sampling rate
# over a period of almost 4 years. Different electrical quantities and some sub-metering values are available.
#
# Variables -
# Date: date in format dd/mm/yyyy
# Time: time in format hh:mm:ss
# Global_active_power: household global minute-averaged active power (in kilowatts)
# Global_reactive_power: household global minute-averaged reactive power (in kilowatts)
# Voltage: minute-averaged voltage (in volts)
# Global_intensity: household global minute-averaged current intensity (in amperes)
# Sub_metering_1: energy sub-metering No. 1 (in watt-hours of active energy) [corresponds to the kitchen]
# Sub_metering_2: energy sub-metering No. 2 (in watt-hours of active energy) [corresponds to the laundry room]
# Sub_metering_3: energy sub-metering No. 3 (in watt-hours of active energy) [corresponds to an electric water-heater and an air-conditioner].
#
# The dataset has 2,075,259 rows.
#
# Missing values are coded as "?" in this dataset.
#
# The plots include only data from the dates 2007-02-01 and 2007-02-02.
#
# Overall goal: simply examine how household energy usage varies over a 2-day period in February, 2007.
#
# Each plot is constructed using the base plotting system and save to a PNG file (width = 480 pixels, height = 480 pixels).
######## course project ########
df1 = read.table("household_power_consumption.txt", header = TRUE, stringsAsFactors = FALSE, sep = ";", na.strings = "?")
# print(class(df1))
# print(names(df1))
print(dim(df1))
df1 <- subset(df1, !(is.na(Date) | is.na(Time)))
df1$DateTime <- strptime(paste(df1$Date, df1$Time), "%d/%m/%Y %H:%M:%S") # create new "POSIXlt" column
df1$Date <- as.Date(df1$Date, "%d/%m/%Y") # re-caste existing column as "Date"
# print(str(df1))
startDate <- as.Date("2007-02-01", "%Y-%m-%d")
endDate <- as.Date("2007-02-02", "%Y-%m-%d")
df2 <- subset(df1, !((Date < startDate) | (Date > endDate)))
# print(head(df2))
# print(tail(df2))
print(dim(df2))
#### plot 1 ####
hist(as.numeric(df2$Global_active_power), col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot1.png") # 480 x 480
dev.off()
}
| /plot1.R | no_license | murphytm/ExData_Plotting1 | R | false | false | 2,659 | r | plot1 <- function( ) {
# R x64 3.1.0:
# > getwd()
# File > Change dir...
# source("plot1.R")
# The UC Irvine Machine Learning Repository (http://archive.ics.uci.edu/ml/) is a popular repository for machine learning datasets.
# Using the "Individual household electric power consumption Data Set", four plots are produced.
#
# Dataset: Electric power consumption [20 MB].
#
# Description: Measurements of electric power consumption in one household with a one-minute sampling rate
# over a period of almost 4 years. Different electrical quantities and some sub-metering values are available.
#
# Variables -
# Date: date in format dd/mm/yyyy
# Time: time in format hh:mm:ss
# Global_active_power: household global minute-averaged active power (in kilowatts)
# Global_reactive_power: household global minute-averaged reactive power (in kilowatts)
# Voltage: minute-averaged voltage (in volts)
# Global_intensity: household global minute-averaged current intensity (in amperes)
# Sub_metering_1: energy sub-metering No. 1 (in watt-hours of active energy) [corresponds to the kitchen]
# Sub_metering_2: energy sub-metering No. 2 (in watt-hours of active energy) [corresponds to the laundry room]
# Sub_metering_3: energy sub-metering No. 3 (in watt-hours of active energy) [corresponds to an electric water-heater and an air-conditioner].
#
# The dataset has 2,075,259 rows.
#
# Missing values are coded as "?" in this dataset.
#
# The plots include only data from the dates 2007-02-01 and 2007-02-02.
#
# Overall goal: simply examine how household energy usage varies over a 2-day period in February, 2007.
#
# Each plot is constructed using the base plotting system and save to a PNG file (width = 480 pixels, height = 480 pixels).
######## course project ########
df1 = read.table("household_power_consumption.txt", header = TRUE, stringsAsFactors = FALSE, sep = ";", na.strings = "?")
# print(class(df1))
# print(names(df1))
print(dim(df1))
df1 <- subset(df1, !(is.na(Date) | is.na(Time)))
df1$DateTime <- strptime(paste(df1$Date, df1$Time), "%d/%m/%Y %H:%M:%S") # create new "POSIXlt" column
df1$Date <- as.Date(df1$Date, "%d/%m/%Y") # re-caste existing column as "Date"
# print(str(df1))
startDate <- as.Date("2007-02-01", "%Y-%m-%d")
endDate <- as.Date("2007-02-02", "%Y-%m-%d")
df2 <- subset(df1, !((Date < startDate) | (Date > endDate)))
# print(head(df2))
# print(tail(df2))
print(dim(df2))
#### plot 1 ####
hist(as.numeric(df2$Global_active_power), col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.copy(png, file = "plot1.png") # 480 x 480
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate_cvd.R
\name{simulate_cvd}
\alias{simulate_cvd}
\title{Simulate color vision deficiency given a cvd transform matrix}
\usage{
simulate_cvd(col, cvd_transform)
}
\arguments{
\item{col}{A color or vector of colors e.g., "#FFA801" or "blue"}
\item{cvd_transform}{A 3x3 matrix specifying the color vision deficiency transform matrix}
}
\description{
This function takes valid R colors and transforms them according to a cvd transform matrix.
}
\examples{
simulate_cvd(c("#005000","blue","#00BB00"),
tritanomaly_cvd['6'][[1]])
}
\keyword{colorblind}
\keyword{colors,}
\keyword{cvd,}
| /man/simulate_cvd.Rd | permissive | emilelatour/colorblindr | R | false | true | 669 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate_cvd.R
\name{simulate_cvd}
\alias{simulate_cvd}
\title{Simulate color vision deficiency given a cvd transform matrix}
\usage{
simulate_cvd(col, cvd_transform)
}
\arguments{
\item{col}{A color or vector of colors e.g., "#FFA801" or "blue"}
\item{cvd_transform}{A 3x3 matrix specifying the color vision deficiency transform matrix}
}
\description{
This function takes valid R colors and transforms them according to a cvd transform matrix.
}
\examples{
simulate_cvd(c("#005000","blue","#00BB00"),
tritanomaly_cvd['6'][[1]])
}
\keyword{colorblind}
\keyword{colors,}
\keyword{cvd,}
|
###################### Form Queries #######################
#' Creates concept objects from concept ids to use in export form
concept_ids_to_concept_objs <- function(connection, concept_ids){
concepts = get_concepts(connection)
needed_root_concept_objs = list()
queries = list()
# build query for each concept id
for (concept_id in concept_ids){
# get root concept to extract tables (and label if concept is root concept)
root_concept_id = root_of_concept_id(concept_id)
root_concept = concepts[[root_concept_id]]
if (is.null(root_concept)){
stop(paste("Could not find root",root_concept_id,"for concept id", concept_id))
}
# when concept id is child concept, get root concept obj to check if concept id is valid and to extract label
if (root_concept_id == concept_id){
label = root_concept$label
} else {
if (!root_concept_id %in% names(needed_root_concept_objs)){
needed_root_concept_objs[[root_concept_id]] = get_concept(connection, root_concept_id)
}
root_concept_obj = needed_root_concept_objs[[root_concept_id]][[concept_id]]
if (is.null(root_concept_obj)){
stop(paste("Could not find concept id", concept_id))
}
label = root_concept_obj$label
}
tables = list()
for (table in root_concept$tables){
tables = list.append(tables, list(id=table$connectorId))
}
queries = list.append(queries,
list(
"type"="CONCEPT",
"ids"=concept_id,
"label"=label,
"tables"=tables
))
}
return(queries)
}
#' Create form query of type 'ABSOLUTE'
#'
#' Creates query of type 'ABSOLUTE'
#'
#' @param query_id query_id for query that defines population
#' @param start_date start date of date restriction for features
#' @param end_date end date of date restriction for features
#' @param queries list of queries
#' (type 'CONCEPT_QUERY' - from create_query function)
#' @param concept_ids list of concept ids to use instead of queries
#' @param connection connection object is needed when using concept_ids
#' @param resolution time resolution of output - 'COMPLETE', 'YEARS', 'QUARTERS'
#' @return absolute form query
#' @example man/examples/form_query.R
#' @export
absolute_form_query <- function(query_id, start_date, end_date, queries = NULL,
concept_ids = NULL, connection = NULL,
resolution='COMPLETE'){
if (is.null(queries) & is.null(concept_ids)){
stop("To create a form query, either queries or concept_ids have to be defined.")
}
if (is.null(connection) & !is.null(concept_ids)){
stop("To create a form query from concept_ids, a connection object is required.")
}
if (!is.null(concept_ids)){
queries = concept_ids_to_concept_objs(connection, concept_ids)
}
validate_resolution(resolution)
return(list(
type="EXPORT_FORM",
queryGroup=query_id,
resolution=resolution,
timeMode=list(
value="ABSOLUTE",
dateRange=list(
min=start_date,
max=end_date
),
features=queries
)
))
}
#' Create form query of type 'ABSOLUTE'
#'
#' Creates query of type 'ABSOLUTE'
#'
#' @param query_id query_id for query that defines population
#' @param before_queries list of queries for time before index date
#' (type 'CONCEPT_QUERY' - from create_query function)
#' @param after_queries list of queries for time after index date
#' (type 'CONCEPT_QUERY' - from create_query function)
#' @param before_concept_ids list of concept ids to use instead of before_queries
#' @param after_concept_ids list of concept ids to use instead of after_queries
#' @param connection connection object is needed when using concept_ids
#' @param resolution time resolution of output - 'COMPLETE', 'YEARS', 'QUARTERS'
#' @param time_unit time unit of for before and after date range
#' - 'QUARTERS', 'DAYS'
#' @param time_count_before number of time units in date range before index date
#' @param time_count_after number of time units in date range after index date
#' @param index_selector specifies how the index date will be retrieved
#' from date range per person - 'EARLIEST', 'RANDOM', 'LATEST'
#' @param index_placement specifies if time unit of index date is counted to
#' before date range, after date range or if it lies inbetween.
#' - 'BEFORE', 'NEUTRAL', 'AFTER'
#' @return relative form query
#' @example man/examples/form_query.R
#' @export
relative_form_query <- function(query_id,
before_queries = NULL, after_queries = NULL,
before_concept_ids = NULL,
after_concept_ids = NULL,
connection=NULL,
resolution='COMPLETE',
time_unit = 'QUARTERS',
time_count_before = 1, time_count_after = 1,
index_selector = 'EARLIEST',
index_placement = 'BEFORE'){
if (is.null(before_queries) & is.null(before_concept_ids)){
stop("To create a form query, either before_queries or
before_concept_ids have to be defined.")
}
if (is.null(after_queries) & is.null(after_concept_ids)){
stop("To create a form query, either after_queries or
after_concept_ids have to be defined.")
}
if (is.null(connection) &
(!is.null(before_concept_ids) | !is.null(after_concept_ids))){
stop("To create a form query from concept_ids,
a connection object is required.")
}
# TODO: convert before and after ids in one go to save concept loading when
# same root has to be loaded for both date ranges
if (!is.null(before_concept_ids)){
before_queries = concept_ids_to_concept_objs(connection, before_concept_ids)
}
if (!is.null(before_concept_ids)){
after_queries = concept_ids_to_concept_objs(connection, after_concept_ids)
}
validate_resolution(resolution)
validate_time_unit(time_unit)
validate_index_selector(index_selector)
validate_index_placement(index_placement)
return(list(
type="EXPORT_FORM",
queryGroup=query_id,
resolution=resolution,
timeMode=list(
value="RELATIVE",
timeUnit=time_unit,
timeCountBefore=time_count_before,
timeCountAfter=time_count_after,
indexSelector=index_selector,
indexPlacement=index_placement,
features=before_queries,
outcomes=after_queries
)
))
}
| /R/form_queries.R | no_license | ingef/cqapiR | R | false | false | 6,637 | r |
###################### Form Queries #######################
#' Creates concept objects from concept ids to use in export form
concept_ids_to_concept_objs <- function(connection, concept_ids){
concepts = get_concepts(connection)
needed_root_concept_objs = list()
queries = list()
# build query for each concept id
for (concept_id in concept_ids){
# get root concept to extract tables (and label if concept is root concept)
root_concept_id = root_of_concept_id(concept_id)
root_concept = concepts[[root_concept_id]]
if (is.null(root_concept)){
stop(paste("Could not find root",root_concept_id,"for concept id", concept_id))
}
# when concept id is child concept, get root concept obj to check if concept id is valid and to extract label
if (root_concept_id == concept_id){
label = root_concept$label
} else {
if (!root_concept_id %in% names(needed_root_concept_objs)){
needed_root_concept_objs[[root_concept_id]] = get_concept(connection, root_concept_id)
}
root_concept_obj = needed_root_concept_objs[[root_concept_id]][[concept_id]]
if (is.null(root_concept_obj)){
stop(paste("Could not find concept id", concept_id))
}
label = root_concept_obj$label
}
tables = list()
for (table in root_concept$tables){
tables = list.append(tables, list(id=table$connectorId))
}
queries = list.append(queries,
list(
"type"="CONCEPT",
"ids"=concept_id,
"label"=label,
"tables"=tables
))
}
return(queries)
}
#' Create form query of type 'ABSOLUTE'
#'
#' Creates query of type 'ABSOLUTE'
#'
#' @param query_id query_id for query that defines population
#' @param start_date start date of date restriction for features
#' @param end_date end date of date restriction for features
#' @param queries list of queries
#' (type 'CONCEPT_QUERY' - from create_query function)
#' @param concept_ids list of concept ids to use instead of queries
#' @param connection connection object is needed when using concept_ids
#' @param resolution time resolution of output - 'COMPLETE', 'YEARS', 'QUARTERS'
#' @return absolute form query
#' @example man/examples/form_query.R
#' @export
absolute_form_query <- function(query_id, start_date, end_date, queries = NULL,
concept_ids = NULL, connection = NULL,
resolution='COMPLETE'){
if (is.null(queries) & is.null(concept_ids)){
stop("To create a form query, either queries or concept_ids have to be defined.")
}
if (is.null(connection) & !is.null(concept_ids)){
stop("To create a form query from concept_ids, a connection object is required.")
}
if (!is.null(concept_ids)){
queries = concept_ids_to_concept_objs(connection, concept_ids)
}
validate_resolution(resolution)
return(list(
type="EXPORT_FORM",
queryGroup=query_id,
resolution=resolution,
timeMode=list(
value="ABSOLUTE",
dateRange=list(
min=start_date,
max=end_date
),
features=queries
)
))
}
#' Create form query of type 'ABSOLUTE'
#'
#' Creates query of type 'ABSOLUTE'
#'
#' @param query_id query_id for query that defines population
#' @param before_queries list of queries for time before index date
#' (type 'CONCEPT_QUERY' - from create_query function)
#' @param after_queries list of queries for time after index date
#' (type 'CONCEPT_QUERY' - from create_query function)
#' @param before_concept_ids list of concept ids to use instead of before_queries
#' @param after_concept_ids list of concept ids to use instead of after_queries
#' @param connection connection object is needed when using concept_ids
#' @param resolution time resolution of output - 'COMPLETE', 'YEARS', 'QUARTERS'
#' @param time_unit time unit of for before and after date range
#' - 'QUARTERS', 'DAYS'
#' @param time_count_before number of time units in date range before index date
#' @param time_count_after number of time units in date range after index date
#' @param index_selector specifies how the index date will be retrieved
#' from date range per person - 'EARLIEST', 'RANDOM', 'LATEST'
#' @param index_placement specifies if time unit of index date is counted to
#' before date range, after date range or if it lies inbetween.
#' - 'BEFORE', 'NEUTRAL', 'AFTER'
#' @return relative form query
#' @example man/examples/form_query.R
#' @export
relative_form_query <- function(query_id,
before_queries = NULL, after_queries = NULL,
before_concept_ids = NULL,
after_concept_ids = NULL,
connection=NULL,
resolution='COMPLETE',
time_unit = 'QUARTERS',
time_count_before = 1, time_count_after = 1,
index_selector = 'EARLIEST',
index_placement = 'BEFORE'){
if (is.null(before_queries) & is.null(before_concept_ids)){
stop("To create a form query, either before_queries or
before_concept_ids have to be defined.")
}
if (is.null(after_queries) & is.null(after_concept_ids)){
stop("To create a form query, either after_queries or
after_concept_ids have to be defined.")
}
if (is.null(connection) &
(!is.null(before_concept_ids) | !is.null(after_concept_ids))){
stop("To create a form query from concept_ids,
a connection object is required.")
}
# TODO: convert before and after ids in one go to save concept loading when
# same root has to be loaded for both date ranges
if (!is.null(before_concept_ids)){
before_queries = concept_ids_to_concept_objs(connection, before_concept_ids)
}
if (!is.null(before_concept_ids)){
after_queries = concept_ids_to_concept_objs(connection, after_concept_ids)
}
validate_resolution(resolution)
validate_time_unit(time_unit)
validate_index_selector(index_selector)
validate_index_placement(index_placement)
return(list(
type="EXPORT_FORM",
queryGroup=query_id,
resolution=resolution,
timeMode=list(
value="RELATIVE",
timeUnit=time_unit,
timeCountBefore=time_count_before,
timeCountAfter=time_count_after,
indexSelector=index_selector,
indexPlacement=index_placement,
features=before_queries,
outcomes=after_queries
)
))
}
|
#' Return market profit and loss
#'
#' \code{listMarketPandL} Retrieve profit and loss for a given list of OPEN
#' markets. The values are calculated using matched bets and optionally settled
#' bets
#'
#' @seealso \code{\link{loginBF}}, which must be executed first. Do NOT use the
#' DELAY application key. The DELAY application key does not support price
#' data.
#'
#' @seealso \code{\link{listClearedOrders}} to retrieve your profit and loss for
#' CLOSED markets
#'
#' Note that \code{listMarketPandL} does not include any information about the
#' value of your bets on the markets e.g. value of profit/loss if you were to
#' cashout at current prices. It simply returns the money you'd win/lose if
#' specific selections were to win. If you wish to calculate your cashout
#' position, then we'll need to design a new function combining
#' \code{listCurrentOrders} and \code{listMarketBook} (it's on the to-do
#' list).
#'
#' @param marketIds Vector<String>. A set of market ID strings from which the
#' corresponding market profit and losses will be returned. Required.
#' No default.
#'
#' @param includeSettledBetsValue Boolean. Option to include settled bets
#' (partially settled markets only). This parameter defaults to NULL, which
#' Betfair interprets as false. Optional.
#'
#' @param includeBspBetsValue Boolean. Option to include Betfair Starting Price
#' (BSP) bets. This parameter defaults to NULL, which Betfair interprets as
#' FALSE. Optional.
#'
#' @param netOfCommissionValue Boolean. Option to return profit and loss net of
#' user's current commission rate for this market, including any special
#' tariffs. This parameter defaults to NULL, which Betfair interprets as
#' FALSE. Optional.
#'
#' @param sslVerify Boolean. This argument defaults to TRUE and is optional. In
#' some cases, where users have a self signed SSL Certificate, for example
#' they may be behind a proxy server, Betfair will fail login with "SSL
#' certificate problem: self signed certificate in certificate chain". If this
#' error occurs you may set sslVerify to FALSE. This does open a small
#' security risk of a man-in-the-middle intercepting your login credentials.
#'
#'@param suppress Boolean. By default, this parameter is set to FALSE, meaning
#' that a warning is posted when the listMarketPandL call throws an error.
#' Changing this parameter to TRUE will suppress this warning.
#'
#' @return Response from Betfair is stored in listPandL variable, which is then
#' parsed from JSON as a data frame of at least two varialbes (more if the
#' optional parameters are included). The first column records the market IDs,
#' while the corresponding market P&Ls are stored within a list.
#'
#' @section Note on \code{listPandLOps} variable: The
#' \code{listPandLOps} variable is used to firstly build an R data frame
#' containing all the data to be passed to Betfair, in order for the function
#' to execute successfully. The data frame is then converted to JSON and
#' included in the HTTP POST request. If the listMarketPandL call throws an
#' error, a data frame containing error information is returned.
#'
#' @examples
#' \dontrun{
#' Return the P&L (net of comission) for the requested markets. The actual
#' market IDs are unlikely to work and are just for demonstration purposes.
#'
#' listMarketPandL(marketIds = c("1.122323121","1.123859413"),
#' netOfCommission = TRUE)
#' }
#'
listMarketPandL <-
function(marketIds, includeSettledBetsValue = NULL,includeBspBetsValue = NULL,
netOfCommissionValue = NULL, suppress = FALSE, sslVerify = TRUE) {
options(stringsAsFactors = FALSE)
listPandLOps <-
data.frame(jsonrpc = "2.0", method = "SportsAPING/v1.0/listMarketProfitAndLoss", id = "1")
listPandLOps$params <- data.frame(marketIds = c(marketIds))
listPandLOps$params$marketIds <- list(listPandLOps$params$marketIds)
listPandLOps$params$includeSettledBets <- includeSettledBetsValue
listPandLOps$params$includeBspBets <- includeBspBetsValue
listPandLOps$params$netOfCommission <- netOfCommissionValue
listPandLOps <- listPandLOps[c("jsonrpc", "method", "params", "id")]
listPandLOps <- jsonlite::toJSON(jsonlite::unbox(listPandLOps), pretty = TRUE)
# Read Environment variables for authorisation details
product <- Sys.getenv('product')
token <- Sys.getenv('token')
listPandL <- httr::content(
httr::POST(url = Sys.getenv('betfair-betting'),
config = httr::config(ssl_verifypeer = sslVerify),
body = listPandLOps,
httr::add_headers(Accept = "application/json", `X-Application` = product, `X-Authentication` = token)
)
)
if(is.null(listPandL$error))
as.data.frame(listPandL$result)
else({
if(!suppress)
warning("Error- See output for details")
as.data.frame(listPandL$error)})
}
| /R/listMarketPandL.R | no_license | tobiasstrebitzer/abettor | R | false | false | 4,960 | r | #' Return market profit and loss
#'
#' \code{listMarketPandL} Retrieve profit and loss for a given list of OPEN
#' markets. The values are calculated using matched bets and optionally settled
#' bets
#'
#' @seealso \code{\link{loginBF}}, which must be executed first. Do NOT use the
#' DELAY application key. The DELAY application key does not support price
#' data.
#'
#' @seealso \code{\link{listClearedOrders}} to retrieve your profit and loss for
#' CLOSED markets
#'
#' Note that \code{listMarketPandL} does not include any information about the
#' value of your bets on the markets e.g. value of profit/loss if you were to
#' cashout at current prices. It simply returns the money you'd win/lose if
#' specific selections were to win. If you wish to calculate your cashout
#' position, then we'll need to design a new function combining
#' \code{listCurrentOrders} and \code{listMarketBook} (it's on the to-do
#' list).
#'
#' @param marketIds Vector<String>. A set of market ID strings from which the
#' corresponding market profit and losses will be returned. Required.
#' No default.
#'
#' @param includeSettledBetsValue Boolean. Option to include settled bets
#' (partially settled markets only). This parameter defaults to NULL, which
#' Betfair interprets as false. Optional.
#'
#' @param includeBspBetsValue Boolean. Option to include Betfair Starting Price
#' (BSP) bets. This parameter defaults to NULL, which Betfair interprets as
#' FALSE. Optional.
#'
#' @param netOfCommissionValue Boolean. Option to return profit and loss net of
#' user's current commission rate for this market, including any special
#' tariffs. This parameter defaults to NULL, which Betfair interprets as
#' FALSE. Optional.
#'
#' @param sslVerify Boolean. This argument defaults to TRUE and is optional. In
#' some cases, where users have a self signed SSL Certificate, for example
#' they may be behind a proxy server, Betfair will fail login with "SSL
#' certificate problem: self signed certificate in certificate chain". If this
#' error occurs you may set sslVerify to FALSE. This does open a small
#' security risk of a man-in-the-middle intercepting your login credentials.
#'
#'@param suppress Boolean. By default, this parameter is set to FALSE, meaning
#' that a warning is posted when the listMarketPandL call throws an error.
#' Changing this parameter to TRUE will suppress this warning.
#'
#' @return Response from Betfair is stored in listPandL variable, which is then
#' parsed from JSON as a data frame of at least two varialbes (more if the
#' optional parameters are included). The first column records the market IDs,
#' while the corresponding market P&Ls are stored within a list.
#'
#' @section Note on \code{listPandLOps} variable: The
#' \code{listPandLOps} variable is used to firstly build an R data frame
#' containing all the data to be passed to Betfair, in order for the function
#' to execute successfully. The data frame is then converted to JSON and
#' included in the HTTP POST request. If the listMarketPandL call throws an
#' error, a data frame containing error information is returned.
#'
#' @examples
#' \dontrun{
#' Return the P&L (net of comission) for the requested markets. The actual
#' market IDs are unlikely to work and are just for demonstration purposes.
#'
#' listMarketPandL(marketIds = c("1.122323121","1.123859413"),
#' netOfCommission = TRUE)
#' }
#'
listMarketPandL <-
function(marketIds, includeSettledBetsValue = NULL,includeBspBetsValue = NULL,
netOfCommissionValue = NULL, suppress = FALSE, sslVerify = TRUE) {
options(stringsAsFactors = FALSE)
listPandLOps <-
data.frame(jsonrpc = "2.0", method = "SportsAPING/v1.0/listMarketProfitAndLoss", id = "1")
listPandLOps$params <- data.frame(marketIds = c(marketIds))
listPandLOps$params$marketIds <- list(listPandLOps$params$marketIds)
listPandLOps$params$includeSettledBets <- includeSettledBetsValue
listPandLOps$params$includeBspBets <- includeBspBetsValue
listPandLOps$params$netOfCommission <- netOfCommissionValue
listPandLOps <- listPandLOps[c("jsonrpc", "method", "params", "id")]
listPandLOps <- jsonlite::toJSON(jsonlite::unbox(listPandLOps), pretty = TRUE)
# Read Environment variables for authorisation details
product <- Sys.getenv('product')
token <- Sys.getenv('token')
listPandL <- httr::content(
httr::POST(url = Sys.getenv('betfair-betting'),
config = httr::config(ssl_verifypeer = sslVerify),
body = listPandLOps,
httr::add_headers(Accept = "application/json", `X-Application` = product, `X-Authentication` = token)
)
)
if(is.null(listPandL$error))
as.data.frame(listPandL$result)
else({
if(!suppress)
warning("Error- See output for details")
as.data.frame(listPandL$error)})
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/zzz.r
\name{occ2df}
\alias{occ2df}
\title{Combine results from occ calls to a single data.frame}
\usage{
occ2df(obj, what = "data")
}
\arguments{
\item{obj}{Input from occ}
\item{what}{One of data (default) or all (with metadata)}
}
\description{
Combine results from occ calls to a single data.frame
}
\examples{
\dontrun{
spnames <- c('Accipiter striatus', 'Setophaga caerulescens', 'Carduelis tristis')
out <- occ(query=spnames, from='gbif', gbifopts=list(hasCoordinate=TRUE), limit=10)
occ2df(out)
}
}
| /man/occ2df.Rd | permissive | jhollist/spocc | R | false | false | 594 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/zzz.r
\name{occ2df}
\alias{occ2df}
\title{Combine results from occ calls to a single data.frame}
\usage{
occ2df(obj, what = "data")
}
\arguments{
\item{obj}{Input from occ}
\item{what}{One of data (default) or all (with metadata)}
}
\description{
Combine results from occ calls to a single data.frame
}
\examples{
\dontrun{
spnames <- c('Accipiter striatus', 'Setophaga caerulescens', 'Carduelis tristis')
out <- occ(query=spnames, from='gbif', gbifopts=list(hasCoordinate=TRUE), limit=10)
occ2df(out)
}
}
|
plot1 <- function(directory){
#Big data so use data.table instead of read.table as it is much faster
library(data.table)
#Read in Data
DT <- fread(paste(directory,list.files(directory,pattern="household.+txt"),sep=""),colClasses=c(rep("character",9)))
##Subset data set as we are only interested in a 2-day period in February, 2007
#Set key to column Date
setkey(DT,Date)
#Subset
DT<-DT[c("1/2/2007","2/2/2007"),]
#create new column datetime
DT[,Datetime:=paste(as.Date(Date,"%d/%m/%Y"),Time)]
#Convert columns from charactor to Date and numeric
#(for some odd reason "?" can't be used as na.string in fread, so we have to coerce the values the hard way)
DT[,c(
"Global_active_power",
"Global_reactive_power",
"Voltage",
"Global_intensity",
"Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3") :=
list(
DT[,as.numeric(Global_active_power)],
DT[,as.numeric(Global_reactive_power)],
DT[,as.numeric(Voltage)],
DT[,as.numeric(Global_intensity)],
DT[,as.numeric(Sub_metering_1)],
DT[,as.numeric(Sub_metering_2)],
DT[,as.numeric(Sub_metering_3)]
)]
#Open PNG file for ouptut
png(filename="plot1.png",width=480,height=480)
#Create first plot
DT[,hist(Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power")]
dev.off()
} | /plot1.R | no_license | hkruegr/ExData_Plotting1 | R | false | false | 1,602 | r | plot1 <- function(directory){
#Big data so use data.table instead of read.table as it is much faster
library(data.table)
#Read in Data
DT <- fread(paste(directory,list.files(directory,pattern="household.+txt"),sep=""),colClasses=c(rep("character",9)))
##Subset data set as we are only interested in a 2-day period in February, 2007
#Set key to column Date
setkey(DT,Date)
#Subset
DT<-DT[c("1/2/2007","2/2/2007"),]
#create new column datetime
DT[,Datetime:=paste(as.Date(Date,"%d/%m/%Y"),Time)]
#Convert columns from charactor to Date and numeric
#(for some odd reason "?" can't be used as na.string in fread, so we have to coerce the values the hard way)
DT[,c(
"Global_active_power",
"Global_reactive_power",
"Voltage",
"Global_intensity",
"Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3") :=
list(
DT[,as.numeric(Global_active_power)],
DT[,as.numeric(Global_reactive_power)],
DT[,as.numeric(Voltage)],
DT[,as.numeric(Global_intensity)],
DT[,as.numeric(Sub_metering_1)],
DT[,as.numeric(Sub_metering_2)],
DT[,as.numeric(Sub_metering_3)]
)]
#Open PNG file for ouptut
png(filename="plot1.png",width=480,height=480)
#Create first plot
DT[,hist(Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power")]
dev.off()
} |
\name{Orthogonal matching pursuit regression}
\alias{omp2}
\title{
Orthogonal matching pursuit regression
}
\description{
Orthogonal matching pursuit regression.
}
\usage{
omp2(y, x, xstand = TRUE, tol = qchisq(0.95, 1), type = "gamma" )
}
\arguments{
\item{y}{
The response variable, a numeric vector. For "omp" this can be either a vector with discrete
(count) data, 0 and 1, non negative values, strictly positive or a factor (categorical) variable.
}
\item{x}{
A matrix with the data, where the rows denote the observations and the columns are the variables.
}
\item{xstand}{
If this is TRUE the independent variables are standardised.
}
\item{tol}{
The tolerance value to terminate the algorithm. This is the change in the criterion value
between two successive steps. For "ompr" the default value is 2 because the default method
is "BIC". The default value is the 95\% quantile of the \eqn{\chi^2} distribution.
}
\item{type}{
This denotes the parametric model to be used each time. It depends upon the nature of y.
The possible values are "gamma", "negbin", or "multinomial".
}
}
\details{
This is the continuation of the "omp" function of the Rfast. We added some more regression models.
The "gamma" and the "multinomial" models have now been implemented in C++.
}
\value{
A list including:
\item{runtime}{
The runtime of the algorithm.
}
\item{info}{
A matrix with two columns. The selected variable(s) and the criterion value at every step.
}
}
\references{
Pati Y. C., Rezaiifar R. and Krishnaprasad P. S. (1993). Orthogonal matching pursuit: Recursive function approximation with applications to wavelet decomposition. In Signals, Systems and Computers. 1993 Conference Record of The Twenty-Seventh Asilomar Conference on. IEEE.
Mazin Abdulrasool Hameed (2012). Comparative analysis of orthogonal matching pursuit and least angle regression. MSc thesis, Michigan State University.
https://www.google.gr/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&ved=0ahUKEwik9P3Yto7XAhUiCZoKHQ8XDr8QFgglMAA&url=https%3A%2F%2Fd.lib.msu.edu%2Fetd%2F1711%2Fdatastream%2FOBJ%2Fdownload%2FComparative_analysis_of_orthogonal_matching_pursuit_and_least_angle_regression.pdf&usg=AOvVaw2fRcSemcbteyWUiAKYi-8B
Lozano A., Swirszcz G. and Abe N. (2011). Group orthogonal matching pursuit for logistic regression. In Proceedings of the Fourteenth International Conference on Artificial Intelligence and Statistics.
The \eqn{\gamma}-OMP algorithm for feature selection with application to gene expression data.
IEEE/ACM Transactions on Computational Biology and Bioinformatics (Accepted for publication)
https://arxiv.org/pdf/2004.00281.pdf
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@yahoo.gr>.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{mmpc2}, \link{pc.sel}
}
}
\examples{
x <- matrix( rnorm(100 * 50), ncol = 50 )
y <- rgamma(100, 4, 1)
a <- omp2(y, x)
a
x <- NULL
}
| /man/omp2.Rd | no_license | minghao2016/Rfast2 | R | false | false | 2,934 | rd | \name{Orthogonal matching pursuit regression}
\alias{omp2}
\title{
Orthogonal matching pursuit regression
}
\description{
Orthogonal matching pursuit regression.
}
\usage{
omp2(y, x, xstand = TRUE, tol = qchisq(0.95, 1), type = "gamma" )
}
\arguments{
\item{y}{
The response variable, a numeric vector. For "omp" this can be either a vector with discrete
(count) data, 0 and 1, non negative values, strictly positive or a factor (categorical) variable.
}
\item{x}{
A matrix with the data, where the rows denote the observations and the columns are the variables.
}
\item{xstand}{
If this is TRUE the independent variables are standardised.
}
\item{tol}{
The tolerance value to terminate the algorithm. This is the change in the criterion value
between two successive steps. For "ompr" the default value is 2 because the default method
is "BIC". The default value is the 95\% quantile of the \eqn{\chi^2} distribution.
}
\item{type}{
This denotes the parametric model to be used each time. It depends upon the nature of y.
The possible values are "gamma", "negbin", or "multinomial".
}
}
\details{
This is the continuation of the "omp" function of the Rfast. We added some more regression models.
The "gamma" and the "multinomial" models have now been implemented in C++.
}
\value{
A list including:
\item{runtime}{
The runtime of the algorithm.
}
\item{info}{
A matrix with two columns. The selected variable(s) and the criterion value at every step.
}
}
\references{
Pati Y. C., Rezaiifar R. and Krishnaprasad P. S. (1993). Orthogonal matching pursuit: Recursive function approximation with applications to wavelet decomposition. In Signals, Systems and Computers. 1993 Conference Record of The Twenty-Seventh Asilomar Conference on. IEEE.
Mazin Abdulrasool Hameed (2012). Comparative analysis of orthogonal matching pursuit and least angle regression. MSc thesis, Michigan State University.
https://www.google.gr/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&ved=0ahUKEwik9P3Yto7XAhUiCZoKHQ8XDr8QFgglMAA&url=https%3A%2F%2Fd.lib.msu.edu%2Fetd%2F1711%2Fdatastream%2FOBJ%2Fdownload%2FComparative_analysis_of_orthogonal_matching_pursuit_and_least_angle_regression.pdf&usg=AOvVaw2fRcSemcbteyWUiAKYi-8B
Lozano A., Swirszcz G. and Abe N. (2011). Group orthogonal matching pursuit for logistic regression. In Proceedings of the Fourteenth International Conference on Artificial Intelligence and Statistics.
The \eqn{\gamma}-OMP algorithm for feature selection with application to gene expression data.
IEEE/ACM Transactions on Computational Biology and Bioinformatics (Accepted for publication)
https://arxiv.org/pdf/2004.00281.pdf
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@yahoo.gr>.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{mmpc2}, \link{pc.sel}
}
}
\examples{
x <- matrix( rnorm(100 * 50), ncol = 50 )
y <- rgamma(100, 4, 1)
a <- omp2(y, x)
a
x <- NULL
}
|
# Link to puzzle:
# https://fivethirtyeight.com/features/how-many-more-palindrome-dates-will-you-see/
library(stringi)
start <- as.Date("02/03/2020", format = "%m/%d/%Y")
end <- as.Date("12/31/2099", format = "%m/%d/%Y")
dates <- seq.Date(from = start, to = end, by = "day")
dates_string <- as.character(dates)
dates_formatted <- paste0(substr(dates_string, 6, 7), substr(dates_string, 9, 10), substr(dates_string, 1, 4))
# There is a nice function in the "stringi" package to reverse a string
is_palindrome <- function(date_string){
if (date_string == stri_reverse(date_string)){
return(TRUE)
} else {
return(FALSE)
}
}
answer <- sapply(dates_formatted, is_palindrome)
print(length(answer[answer == TRUE])) | /riddler_express_2020-02-07.R | no_license | austinlesh/riddler-solutions | R | false | false | 728 | r | # Link to puzzle:
# https://fivethirtyeight.com/features/how-many-more-palindrome-dates-will-you-see/
library(stringi)
start <- as.Date("02/03/2020", format = "%m/%d/%Y")
end <- as.Date("12/31/2099", format = "%m/%d/%Y")
dates <- seq.Date(from = start, to = end, by = "day")
dates_string <- as.character(dates)
dates_formatted <- paste0(substr(dates_string, 6, 7), substr(dates_string, 9, 10), substr(dates_string, 1, 4))
# There is a nice function in the "stringi" package to reverse a string
is_palindrome <- function(date_string){
if (date_string == stri_reverse(date_string)){
return(TRUE)
} else {
return(FALSE)
}
}
answer <- sapply(dates_formatted, is_palindrome)
print(length(answer[answer == TRUE])) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parties.R
\name{to}
\alias{to}
\title{Add To field to message}
\usage{
to(msg, ...)
}
\arguments{
\item{msg}{A message object.}
\item{...}{Email addresses.}
}
\value{
A message object.
}
\description{
Add To field to message
}
\examples{
msg <- envelope()
to(msg, "bob@gmail.com", "alice@yahoo.com")
to(msg, c("bob@gmail.com", "alice@yahoo.com"))
}
\seealso{
\code{\link{cc}}, \code{\link{bcc}}, \code{\link{from}}, \code{\link{sender}}, \code{\link{reply}} and \code{\link{subject}}
}
| /man/to.Rd | no_license | adam-gruer/emayili | R | false | true | 565 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parties.R
\name{to}
\alias{to}
\title{Add To field to message}
\usage{
to(msg, ...)
}
\arguments{
\item{msg}{A message object.}
\item{...}{Email addresses.}
}
\value{
A message object.
}
\description{
Add To field to message
}
\examples{
msg <- envelope()
to(msg, "bob@gmail.com", "alice@yahoo.com")
to(msg, c("bob@gmail.com", "alice@yahoo.com"))
}
\seealso{
\code{\link{cc}}, \code{\link{bcc}}, \code{\link{from}}, \code{\link{sender}}, \code{\link{reply}} and \code{\link{subject}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_update_routing_profile_default_outbound_queue}
\alias{connect_update_routing_profile_default_outbound_queue}
\title{Updates the default outbound queue of a routing profile}
\usage{
connect_update_routing_profile_default_outbound_queue(InstanceId,
RoutingProfileId, DefaultOutboundQueueId)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance.}
\item{RoutingProfileId}{[required] The identifier of the routing profile.}
\item{DefaultOutboundQueueId}{[required] The identifier for the default outbound queue.}
}
\value{
An empty list.
}
\description{
Updates the default outbound queue of a routing profile.
}
\section{Request syntax}{
\preformatted{svc$update_routing_profile_default_outbound_queue(
InstanceId = "string",
RoutingProfileId = "string",
DefaultOutboundQueueId = "string"
)
}
}
\keyword{internal}
| /cran/paws.customer.engagement/man/connect_update_routing_profile_default_outbound_queue.Rd | permissive | TWarczak/paws | R | false | true | 970 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_update_routing_profile_default_outbound_queue}
\alias{connect_update_routing_profile_default_outbound_queue}
\title{Updates the default outbound queue of a routing profile}
\usage{
connect_update_routing_profile_default_outbound_queue(InstanceId,
RoutingProfileId, DefaultOutboundQueueId)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance.}
\item{RoutingProfileId}{[required] The identifier of the routing profile.}
\item{DefaultOutboundQueueId}{[required] The identifier for the default outbound queue.}
}
\value{
An empty list.
}
\description{
Updates the default outbound queue of a routing profile.
}
\section{Request syntax}{
\preformatted{svc$update_routing_profile_default_outbound_queue(
InstanceId = "string",
RoutingProfileId = "string",
DefaultOutboundQueueId = "string"
)
}
}
\keyword{internal}
|
#' Cleanly convert a rasterLayer to dataframe format
#'
#' This function makes a clean convert of a rasterLayer to a 3-column dataframe.
#' The first two columns will contain 'x' and 'y' coordinates, the third the
#' rasterLayer 'value'. NA's are automatically removed.
#'
#' @param surface rasterLayer to be converted to a dataframe
#' @return A 3-column dataframe with coordinates and value.
#'
#' @importFrom raster as.data.frame
#' @importFrom dplyr rename
#' @importFrom magrittr %>%
#'
#' @export
surface2df <- function(surface) {
surface %>%
raster::as.data.frame(
xy = T, na.rm = T
) %>%
dplyr::rename(value = 3 )
}
| /R/surface2df.R | no_license | cjcampbell/geoshift | R | false | false | 644 | r | #' Cleanly convert a rasterLayer to dataframe format
#'
#' This function makes a clean convert of a rasterLayer to a 3-column dataframe.
#' The first two columns will contain 'x' and 'y' coordinates, the third the
#' rasterLayer 'value'. NA's are automatically removed.
#'
#' @param surface rasterLayer to be converted to a dataframe
#' @return A 3-column dataframe with coordinates and value.
#'
#' @importFrom raster as.data.frame
#' @importFrom dplyr rename
#' @importFrom magrittr %>%
#'
#' @export
surface2df <- function(surface) {
surface %>%
raster::as.data.frame(
xy = T, na.rm = T
) %>%
dplyr::rename(value = 3 )
}
|
## This function calculates heights of trees given distance of each tree
## from its base and angle to its top, using the trigonometric formula
# Author: Rachel Bates (r.bates18@imperial.ac.uk)
# Version: 0.0.1
## Clear the directory ##
rm(list=ls())
## Load Data ##
Trees <- read.csv("../Data/trees.csv")
###############
TreeHeight <- function(degrees, distance){
for (species in Trees){
radians <- degrees * pi / 180
height <- distance * tan(radians)
print(paste("Tree height is:", height))
return (height) #Outputs the calculated height
}
}
#Creates a new column in Trees of the returned values from TreeHeight
Trees$Tree.height.m<-TreeHeight(Trees$Angle.degrees, Trees$Distance.m)
# Writes the dataframe to an output file
write.csv (Trees, "../Output/TreeHts.csv")
| /Week3/Code/TreeHeight.R | no_license | RLBat/CMEECourseWork | R | false | false | 825 | r | ## This function calculates heights of trees given distance of each tree
## from its base and angle to its top, using the trigonometric formula
# Author: Rachel Bates (r.bates18@imperial.ac.uk)
# Version: 0.0.1
## Clear the directory ##
rm(list=ls())
## Load Data ##
Trees <- read.csv("../Data/trees.csv")
###############
TreeHeight <- function(degrees, distance){
for (species in Trees){
radians <- degrees * pi / 180
height <- distance * tan(radians)
print(paste("Tree height is:", height))
return (height) #Outputs the calculated height
}
}
#Creates a new column in Trees of the returned values from TreeHeight
Trees$Tree.height.m<-TreeHeight(Trees$Angle.degrees, Trees$Distance.m)
# Writes the dataframe to an output file
write.csv (Trees, "../Output/TreeHts.csv")
|
dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subsetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
globalActivePower <- as.numeric(subsetData$Global_active_power)
png("plot1.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off() | /Plot1.R | no_license | TermiJAG/ExData_Plotting1 | R | false | false | 410 | r | dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subsetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
globalActivePower <- as.numeric(subsetData$Global_active_power)
png("plot1.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off() |
AntonB <- function() {
a0 <- c(0,
0.02674875741081,
-0.01686411844287,
-0.07822326652899,
0.26686411844288,
0.60294901823636,
0.26686411844287,
-0.07822326652899,
-0.01686411844287,
0.02674875741081,
0,
0)
a1 <- c(0,
0,
0,
0.04563588155712,
-0.02877176311425,
-0.29563588155712,
0.55754352622850,
-0.29563588155713,
-0.02877176311425,
0.04563588155712,
0,
0)
s0 <- c(0,
0,
0,
-0.04563588155712,
-0.02877176311425,
0.29563588155712,
0.55754352622850,
0.29563588155713,
-0.02877176311425,
-0.04563588155712,
0,
0)
s1 <- c(0,
0.02674875741081,
0.01686411844287,
-0.07822326652899,
-0.26686411844288,
0.60294901823636,
-0.26686411844287,
-0.07822326652899,
0.01686411844287,
0.02674875741081,
0,
0)
s0 <- 2 * s0
s1 <- 2 * s1
aa0 <- c(0,
0,
0.02674875741081,
-0.01686411844287,
-0.07822326652899,
0.26686411844288,
0.60294901823636,
0.26686411844287,
-0.07822326652899,
-0.01686411844287,
0.02674875741081,
0)
aa1 <- c(0,
0,
0,
0,
0.04563588155712,
-0.02877176311425,
-0.29563588155712,
0.55754352622850,
-0.29563588155713,
-0.02877176311425,
0.04563588155712,
0)
ss0 <- c(0,
0,
-0.04563588155712,
-0.02877176311425,
0.29563588155712,
0.55754352622850,
0.29563588155713,
-0.02877176311425,
-0.04563588155712,
0,
0,
0)
ss1 <- c(0.02674875741081,
0.01686411844287,
-0.07822326652899,
-0.26686411844288,
0.60294901823636,
-0.26686411844287,
-0.07822326652899,
0.01686411844287,
0.02674875741081,
0,
0,
0)
ss0 <- 2 * ss0
ss1 <- 2 * ss1
list(af = list(cbind(a0, a1), cbind(aa0, aa1)),
sf = list(cbind(s0, s1), cbind(ss0, ss1)))
}
| /R/Anton.R | no_license | andrewzm/waveslim | R | false | false | 2,490 | r | AntonB <- function() {
a0 <- c(0,
0.02674875741081,
-0.01686411844287,
-0.07822326652899,
0.26686411844288,
0.60294901823636,
0.26686411844287,
-0.07822326652899,
-0.01686411844287,
0.02674875741081,
0,
0)
a1 <- c(0,
0,
0,
0.04563588155712,
-0.02877176311425,
-0.29563588155712,
0.55754352622850,
-0.29563588155713,
-0.02877176311425,
0.04563588155712,
0,
0)
s0 <- c(0,
0,
0,
-0.04563588155712,
-0.02877176311425,
0.29563588155712,
0.55754352622850,
0.29563588155713,
-0.02877176311425,
-0.04563588155712,
0,
0)
s1 <- c(0,
0.02674875741081,
0.01686411844287,
-0.07822326652899,
-0.26686411844288,
0.60294901823636,
-0.26686411844287,
-0.07822326652899,
0.01686411844287,
0.02674875741081,
0,
0)
s0 <- 2 * s0
s1 <- 2 * s1
aa0 <- c(0,
0,
0.02674875741081,
-0.01686411844287,
-0.07822326652899,
0.26686411844288,
0.60294901823636,
0.26686411844287,
-0.07822326652899,
-0.01686411844287,
0.02674875741081,
0)
aa1 <- c(0,
0,
0,
0,
0.04563588155712,
-0.02877176311425,
-0.29563588155712,
0.55754352622850,
-0.29563588155713,
-0.02877176311425,
0.04563588155712,
0)
ss0 <- c(0,
0,
-0.04563588155712,
-0.02877176311425,
0.29563588155712,
0.55754352622850,
0.29563588155713,
-0.02877176311425,
-0.04563588155712,
0,
0,
0)
ss1 <- c(0.02674875741081,
0.01686411844287,
-0.07822326652899,
-0.26686411844288,
0.60294901823636,
-0.26686411844287,
-0.07822326652899,
0.01686411844287,
0.02674875741081,
0,
0,
0)
ss0 <- 2 * ss0
ss1 <- 2 * ss1
list(af = list(cbind(a0, a1), cbind(aa0, aa1)),
sf = list(cbind(s0, s1), cbind(ss0, ss1)))
}
|
##
## tokens_hashed tests
##
test_that("nsyllable works as expected", {
txt <- c(one = "super freakily yes",
two = "merrily all go aerodynamic")
toksh <- tokens(txt)
toks <- tokenize(txt)
expect_equivalent(nsyllable(toks), nsyllable(toksh), list(c(2, 3, 1), c(3, 1, 1, 5)))
})
test_that("nsyllable works as expected with padding = TRUE", {
txt <- c(one = "super freakily yes",
two = "merrily, all go aerodynamic")
toks <- tokens_remove(tokens(txt), c("yes", "merrily"), padding = TRUE)
expect_equivalent(nsyllable(toks), list(c(2, 3, NA), c(NA, NA, 1, 1, 5)))
})
test_that("tokens_wordstem works as expected for tokens_hashed", {
txt <- c(one = "Eating eater eaters eats ate.",
two = "Taxing taxes taxed my tax return.")
toks <- tokenize(char_tolower(txt), removePunct = TRUE)
toksh <- tokens(char_tolower(txt), removePunct = TRUE)
classic <- tokens_wordstem(toks)
hashed <- tokens_wordstem(toksh)
expect_equivalent(classic, as.tokenizedTexts(hashed))
})
test_that("ngrams works as expected for tokens_hashed", {
txt <- c(one = char_tolower("Insurgents killed in ongoing fighting."),
two = "A B C D E")
toks <- tokenize(txt, removePunct = TRUE)
toksh <- tokens(txt, removePunct = TRUE)
classic <- tokens_ngrams(toks, n = 2:3)
hashed <- as.tokenizedTexts(tokens_ngrams(toksh, n = 2:3))
# testthat::expect_equivalent(as.list(classic),
# as.list(hashed))
classic <- list(sort(unlist(classic$one)), sort(unlist(classic$two)))
hashed <- list(sort(unlist(hashed$one)), sort(unlist(hashed$two)))
expect_equivalent(lapply(classic, sort),
lapply(hashed, sort))
})
test_that("skipgrams works as expected for tokens_hashed", {
txt <- c(one = "insurgents killed in ongoing fighting")
toks <- tokenize(txt)
toksh <- tokens(txt)
classic <- skipgrams(toks, n = 3, skip = 0:2, concatenator = " ")
hashed <- skipgrams(toksh, n = 3, skip = 0:2, concatenator = " ")
expect_equivalent(classic, as.tokenizedTexts(hashed))
})
test_that("as.tokens tokenizedTexts works as expected", {
txt <- c(doc1 = "The first sentence is longer than the second.",
doc2 = "Told you so.")
toks <- tokenize(txt)
toksh <- tokens(txt)
expect_equivalent(toksh,
as.tokens(toks))
})
test_that("as.tokens list version works as expected", {
txt <- c(doc1 = "The first sentence is longer than the second.",
doc2 = "Told you so.")
toksh <- tokens(txt)
toks <- tokenize(txt)
attributes(toks) <- NULL
names(toks) <- names(txt)
expect_equal(class(toks), "list")
expect_equivalent(toksh,
as.tokens(toks))
})
test_that("as.tokens list version works as expected", {
txt <- c(doc1 = "The first sentence is longer than the second.",
doc2 = "Told you so.")
tokslist <- as.list(tokens(txt))
toks <- tokens(txt)
expect_equal(as.tokens(tokslist),
toks)
})
test_that("tokens indexing works as expected", {
toks <- tokens(c(d1 = "one two three", d2 = "four five six", d3 = "seven eight"))
expect_equal(toks$d1, c("one", "two", "three"))
expect_equal(toks[[1]], c("one", "two", "three"))
expect_equal(as.list(toks["d2"]), list(d2 = c("four", "five", "six")))
expect_equal(as.list(toks[2]), list(d2 = c("four", "five", "six")))
# issue #370
expect_equal(attr(toks[1], "types"), c("one", "two", "three"))
expect_equal(attr(toks[2], "types"), c("four", "five", "six"))
})
test_that("tokens_hashed_recompile combine duplicates is working", {
toksh <- tokens(c(one = "a b c d A B C D", two = "A B C d"))
expect_equivalent(attr(toksh, "types"),
c("a", "b", "c", "d", "A", "B", "C", "D"))
expect_equivalent(attr(tokens_tolower(toksh), "types"),
c("a", "b", "c", "d"))
attr(toksh, "types") <- char_tolower(attr(toksh, "types"))
expect_equivalent(attr(quanteda:::tokens_hashed_recompile(toksh), "types"),
c("a", "b", "c", "d"))
})
test_that("test `ngrams` with padding = FALSE: #428", {
toks <- tokens(c(doc1 = 'a b c d e f g'))
toks2 <- tokens_remove(toks, c('b', 'e'), padding = FALSE)
expect_equal(as.list(tokens_ngrams(toks2, n = 2)),
list(doc1 = c("a_c", "c_d", "d_f", "f_g")))
expect_equal(as.list(tokens_ngrams(toks2, n = 3)),
list(doc1 = c("a_c_d", "c_d_f", "d_f_g")))
expect_equal(as.list(tokens_ngrams(toks2, n = 2, skip = 2)),
list(doc1 = c("a_f", "c_g")))
})
test_that("test `ngrams` with padding = TRUE: #428", {
toks <- tokens(c(doc1 = 'a b c d e f g'))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = TRUE)
expect_equal(as.list(tokens_ngrams(toks3, n = 2)),
list(doc1 = c("c_d", "f_g")))
expect_equal(as.list(tokens_ngrams(toks3, n = 3)),
list(doc1 = character(0)))
expect_equal(as.list(tokens_ngrams(toks3, n = 2, skip = 2)),
list(doc1 = c("a_d", "c_f", "d_g")))
})
test_that("test dfm with padded tokens, padding = FALSE", {
toks <- tokens(c(doc1 = 'a b c d e f g',
doc2 = 'a b c g',
doc3 = ''))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = FALSE)
expect_equivalent(as.matrix(dfm(toks3)),
matrix(c(1, 1, 1, 1, 1,
1, 1, 0, 0, 1,
0, 0, 0, 0, 0), nrow = 3, byrow = TRUE))
})
test_that("test dfm with padded tokens, padding = TRUE", {
toks <- tokens(c(doc1 = 'a b c d e f g',
doc2 = 'a b c g',
doc3 = ''))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = TRUE)
expect_equivalent(as.matrix(dfm(toks3)),
matrix(c(2, 1, 1, 1, 1, 1,
1, 1, 1, 0, 0, 1,
0, 0, 0, 0, 0, 0), nrow = 3, byrow = TRUE))
})
test_that("test verious functions with padded tokens, padding = FALSE", {
toks <- tokens(c(doc1 = 'A b c d E f g',
doc2 = 'a b c g'))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = FALSE)
expect_equivalent(nfeature(toks3), 6)
expect_equivalent(nfeature(tokens_tolower(toks3)), 5)
expect_equivalent(nfeature(tokens_toupper(toks3)), 5)
expect_equivalent(as.character(toks3),
c("A", "c", "d", "f", "g", "a", "c", "g"))
})
test_that("test verious functions with padded tokens, padding = TRUE", {
toks <- tokens(c(doc1 = 'A b c d E f g',
doc2 = 'a b c g'))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = TRUE)
expect_equivalent(nfeature(toks3), 7)
expect_equivalent(nfeature(tokens_tolower(toks3)), 6)
expect_equivalent(nfeature(tokens_toupper(toks3)), 6)
expect_equivalent(as.character(toks3),
c("A", "", "c", "d", "", "f", "g", "a", "", "c", "g"))
})
test_that("docnames works for tokens", {
expect_equal(names(data_char_ukimmig2010),
docnames(tokens(data_char_ukimmig2010)))
})
test_that("longer features longer than documents do not crash (#447)", {
toks <- tokens(c(d1 = 'a b', d2 = 'a b c d e'))
feat <- 'b c d e'
# bugs in C++ needs repeated tests
expect_silent(replicate(10, tokens_select(toks, feat)))
expect_equal(
as.list(tokens_select(toks, feat)),
list(d1 = character(0), d2 = c("b", "c", "d", "e"))
)
})
test_that("tokens works as expected for what = \"character\"", {
expect_equal(
as.character(tokens("one, two three.", what = "character", removeSeparators = TRUE)),
c("o", "n", "e", ",", "t", "w", "o", "t", "h", "r", "e", "e", ".")
)
expect_equal(
as.character(tokens("one, two three.", what = "character", removeSeparators = FALSE)),
c("o", "n", "e", ",", " ", "t", "w", "o", " ", "t", "h", "r", "e", "e", ".")
)
expect_equal(
as.character(tokens("one, two three.", what = "character", removePunct = TRUE)),
c("o", "n", "e", "t", "w", "o", "t", "h", "r", "e", "e")
)
})
#' # coerce an object into a tokens class
#' as.tokens(toks)
#' \dontrun{
#' tokens2 <- tokenize(head(inaugTexts, 10), removePunct=TRUE)
#' tokens2_hashed <- hashTokens(tokens2)
#'
#' profvis::profvis({
#' ngrams(tokens2_hashed, n = 2:3, skip = 1:2, concatenator = "-")
#' })
#'
#' microbenchmark::microbenchmark(
#' old=ngrams(tokens2, n = 2:3, skip = 1:2, concatenator = "-"),
#' new=ngrams(tokens2_hashed, n = 2:3, skip = 1:2, concatenator = "-"),
#' times=10, unit='relative'
#' )
#'
#'
#' Rcpp::sourceCpp('src/ngrams_hashed.cpp')
#' Rcpp::sourceCpp('src/ngrams_class.cpp')
#' Rcpp::sourceCpp('src/ngrams.cpp')
#' nm <- new(ngramMaker)
#'
#' microbenchmark::microbenchmark(
#' old=skipgramcpp(tokens2[[1]], 2:3, 1:2, '-'),
#' new=qatd_cpp_ngram_hashed_vector(tokens2_hashed[[1]], 2:3, 1:2),
#' class=nm$generate(tokens2_hashed[[1]], 2:3, 1:2),
#' times=100, unit='relative'
#' )
#'
#' microbenchmark::microbenchmark(
#' obj=nm$generate_list(tokens2_hashed, 2:3, 1:2),
#' ptr=nm$generate_list_ptr(tokens2_hashed, 2:3, 1:2),
#' times=100, unit='relative'
#' )
#'
#'
#' tokens3 <- rep(letters, 50)
#' types3 <- unique(tokens3)
#' tokens3_hashed <- match(tokens3, types3)
#' microbenchmark::microbenchmark(
#' old=skipgramcpp(tokens3, 2:3, 1:2, '-'),
#' new=qatd_cpp_ngram_hashed_vector(tokens3_hashed, 2:3, 1:2),
#' times=10, unit='relative'
#' )
#'
#' # Test with greater lexical diversity
#' tokens4 <- paste0(sample(letters, length(tokens3), replace=TRUE),
#' sample(letters, length(tokens3), replace=TRUE))
#' types4 <- unique(tokens4)
#' tokens4_hashed <- match(tokens4, types4)
#' microbenchmark::microbenchmark(
#' low=qatd_cpp_ngram_hashed_vector(tokens3_hashed, 2:3, 1:2),
#' high=qatd_cpp_ngram_hashed_vector(tokens4_hashed, 2:3, 1:2),
#' times=100, unit='relative'
#' )
#'
#'
#' # Comparison with tokenizers's skip-grams
#' tokenizers::tokenize_skip_ngrams('a b c d e', n=3, k=1)
#' # "a c e" "a b c" "b c d" "c d e"
#' tokenizers::tokenize_skip_ngrams('a b c d e', n=3, k=2)
#' # "a c e" "a b c" "b c d" "c d e"
#'
#' ngrams(tokenize('a b c d e'), n=3, skip=0:1, concatenator=' ')
#' # "a b c" "a b d" "a c d" "a c e" "b c d" "b c e" "b d e" "c d e"
#'
#'}
| /tests/testthat/test-tokens.R | no_license | anavaldi/quanteda | R | false | false | 10,524 | r | ##
## tokens_hashed tests
##
test_that("nsyllable works as expected", {
txt <- c(one = "super freakily yes",
two = "merrily all go aerodynamic")
toksh <- tokens(txt)
toks <- tokenize(txt)
expect_equivalent(nsyllable(toks), nsyllable(toksh), list(c(2, 3, 1), c(3, 1, 1, 5)))
})
test_that("nsyllable works as expected with padding = TRUE", {
txt <- c(one = "super freakily yes",
two = "merrily, all go aerodynamic")
toks <- tokens_remove(tokens(txt), c("yes", "merrily"), padding = TRUE)
expect_equivalent(nsyllable(toks), list(c(2, 3, NA), c(NA, NA, 1, 1, 5)))
})
test_that("tokens_wordstem works as expected for tokens_hashed", {
txt <- c(one = "Eating eater eaters eats ate.",
two = "Taxing taxes taxed my tax return.")
toks <- tokenize(char_tolower(txt), removePunct = TRUE)
toksh <- tokens(char_tolower(txt), removePunct = TRUE)
classic <- tokens_wordstem(toks)
hashed <- tokens_wordstem(toksh)
expect_equivalent(classic, as.tokenizedTexts(hashed))
})
test_that("ngrams works as expected for tokens_hashed", {
txt <- c(one = char_tolower("Insurgents killed in ongoing fighting."),
two = "A B C D E")
toks <- tokenize(txt, removePunct = TRUE)
toksh <- tokens(txt, removePunct = TRUE)
classic <- tokens_ngrams(toks, n = 2:3)
hashed <- as.tokenizedTexts(tokens_ngrams(toksh, n = 2:3))
# testthat::expect_equivalent(as.list(classic),
# as.list(hashed))
classic <- list(sort(unlist(classic$one)), sort(unlist(classic$two)))
hashed <- list(sort(unlist(hashed$one)), sort(unlist(hashed$two)))
expect_equivalent(lapply(classic, sort),
lapply(hashed, sort))
})
test_that("skipgrams works as expected for tokens_hashed", {
txt <- c(one = "insurgents killed in ongoing fighting")
toks <- tokenize(txt)
toksh <- tokens(txt)
classic <- skipgrams(toks, n = 3, skip = 0:2, concatenator = " ")
hashed <- skipgrams(toksh, n = 3, skip = 0:2, concatenator = " ")
expect_equivalent(classic, as.tokenizedTexts(hashed))
})
test_that("as.tokens tokenizedTexts works as expected", {
txt <- c(doc1 = "The first sentence is longer than the second.",
doc2 = "Told you so.")
toks <- tokenize(txt)
toksh <- tokens(txt)
expect_equivalent(toksh,
as.tokens(toks))
})
test_that("as.tokens list version works as expected", {
txt <- c(doc1 = "The first sentence is longer than the second.",
doc2 = "Told you so.")
toksh <- tokens(txt)
toks <- tokenize(txt)
attributes(toks) <- NULL
names(toks) <- names(txt)
expect_equal(class(toks), "list")
expect_equivalent(toksh,
as.tokens(toks))
})
test_that("as.tokens list version works as expected", {
txt <- c(doc1 = "The first sentence is longer than the second.",
doc2 = "Told you so.")
tokslist <- as.list(tokens(txt))
toks <- tokens(txt)
expect_equal(as.tokens(tokslist),
toks)
})
test_that("tokens indexing works as expected", {
toks <- tokens(c(d1 = "one two three", d2 = "four five six", d3 = "seven eight"))
expect_equal(toks$d1, c("one", "two", "three"))
expect_equal(toks[[1]], c("one", "two", "three"))
expect_equal(as.list(toks["d2"]), list(d2 = c("four", "five", "six")))
expect_equal(as.list(toks[2]), list(d2 = c("four", "five", "six")))
# issue #370
expect_equal(attr(toks[1], "types"), c("one", "two", "three"))
expect_equal(attr(toks[2], "types"), c("four", "five", "six"))
})
test_that("tokens_hashed_recompile combine duplicates is working", {
toksh <- tokens(c(one = "a b c d A B C D", two = "A B C d"))
expect_equivalent(attr(toksh, "types"),
c("a", "b", "c", "d", "A", "B", "C", "D"))
expect_equivalent(attr(tokens_tolower(toksh), "types"),
c("a", "b", "c", "d"))
attr(toksh, "types") <- char_tolower(attr(toksh, "types"))
expect_equivalent(attr(quanteda:::tokens_hashed_recompile(toksh), "types"),
c("a", "b", "c", "d"))
})
test_that("test `ngrams` with padding = FALSE: #428", {
toks <- tokens(c(doc1 = 'a b c d e f g'))
toks2 <- tokens_remove(toks, c('b', 'e'), padding = FALSE)
expect_equal(as.list(tokens_ngrams(toks2, n = 2)),
list(doc1 = c("a_c", "c_d", "d_f", "f_g")))
expect_equal(as.list(tokens_ngrams(toks2, n = 3)),
list(doc1 = c("a_c_d", "c_d_f", "d_f_g")))
expect_equal(as.list(tokens_ngrams(toks2, n = 2, skip = 2)),
list(doc1 = c("a_f", "c_g")))
})
test_that("test `ngrams` with padding = TRUE: #428", {
toks <- tokens(c(doc1 = 'a b c d e f g'))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = TRUE)
expect_equal(as.list(tokens_ngrams(toks3, n = 2)),
list(doc1 = c("c_d", "f_g")))
expect_equal(as.list(tokens_ngrams(toks3, n = 3)),
list(doc1 = character(0)))
expect_equal(as.list(tokens_ngrams(toks3, n = 2, skip = 2)),
list(doc1 = c("a_d", "c_f", "d_g")))
})
test_that("test dfm with padded tokens, padding = FALSE", {
toks <- tokens(c(doc1 = 'a b c d e f g',
doc2 = 'a b c g',
doc3 = ''))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = FALSE)
expect_equivalent(as.matrix(dfm(toks3)),
matrix(c(1, 1, 1, 1, 1,
1, 1, 0, 0, 1,
0, 0, 0, 0, 0), nrow = 3, byrow = TRUE))
})
test_that("test dfm with padded tokens, padding = TRUE", {
toks <- tokens(c(doc1 = 'a b c d e f g',
doc2 = 'a b c g',
doc3 = ''))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = TRUE)
expect_equivalent(as.matrix(dfm(toks3)),
matrix(c(2, 1, 1, 1, 1, 1,
1, 1, 1, 0, 0, 1,
0, 0, 0, 0, 0, 0), nrow = 3, byrow = TRUE))
})
test_that("test verious functions with padded tokens, padding = FALSE", {
toks <- tokens(c(doc1 = 'A b c d E f g',
doc2 = 'a b c g'))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = FALSE)
expect_equivalent(nfeature(toks3), 6)
expect_equivalent(nfeature(tokens_tolower(toks3)), 5)
expect_equivalent(nfeature(tokens_toupper(toks3)), 5)
expect_equivalent(as.character(toks3),
c("A", "c", "d", "f", "g", "a", "c", "g"))
})
test_that("test verious functions with padded tokens, padding = TRUE", {
toks <- tokens(c(doc1 = 'A b c d E f g',
doc2 = 'a b c g'))
toks3 <- tokens_remove(toks, c('b', 'e'), padding = TRUE)
expect_equivalent(nfeature(toks3), 7)
expect_equivalent(nfeature(tokens_tolower(toks3)), 6)
expect_equivalent(nfeature(tokens_toupper(toks3)), 6)
expect_equivalent(as.character(toks3),
c("A", "", "c", "d", "", "f", "g", "a", "", "c", "g"))
})
test_that("docnames works for tokens", {
expect_equal(names(data_char_ukimmig2010),
docnames(tokens(data_char_ukimmig2010)))
})
test_that("longer features longer than documents do not crash (#447)", {
toks <- tokens(c(d1 = 'a b', d2 = 'a b c d e'))
feat <- 'b c d e'
# bugs in C++ needs repeated tests
expect_silent(replicate(10, tokens_select(toks, feat)))
expect_equal(
as.list(tokens_select(toks, feat)),
list(d1 = character(0), d2 = c("b", "c", "d", "e"))
)
})
test_that("tokens works as expected for what = \"character\"", {
expect_equal(
as.character(tokens("one, two three.", what = "character", removeSeparators = TRUE)),
c("o", "n", "e", ",", "t", "w", "o", "t", "h", "r", "e", "e", ".")
)
expect_equal(
as.character(tokens("one, two three.", what = "character", removeSeparators = FALSE)),
c("o", "n", "e", ",", " ", "t", "w", "o", " ", "t", "h", "r", "e", "e", ".")
)
expect_equal(
as.character(tokens("one, two three.", what = "character", removePunct = TRUE)),
c("o", "n", "e", "t", "w", "o", "t", "h", "r", "e", "e")
)
})
#' # coerce an object into a tokens class
#' as.tokens(toks)
#' \dontrun{
#' tokens2 <- tokenize(head(inaugTexts, 10), removePunct=TRUE)
#' tokens2_hashed <- hashTokens(tokens2)
#'
#' profvis::profvis({
#' ngrams(tokens2_hashed, n = 2:3, skip = 1:2, concatenator = "-")
#' })
#'
#' microbenchmark::microbenchmark(
#' old=ngrams(tokens2, n = 2:3, skip = 1:2, concatenator = "-"),
#' new=ngrams(tokens2_hashed, n = 2:3, skip = 1:2, concatenator = "-"),
#' times=10, unit='relative'
#' )
#'
#'
#' Rcpp::sourceCpp('src/ngrams_hashed.cpp')
#' Rcpp::sourceCpp('src/ngrams_class.cpp')
#' Rcpp::sourceCpp('src/ngrams.cpp')
#' nm <- new(ngramMaker)
#'
#' microbenchmark::microbenchmark(
#' old=skipgramcpp(tokens2[[1]], 2:3, 1:2, '-'),
#' new=qatd_cpp_ngram_hashed_vector(tokens2_hashed[[1]], 2:3, 1:2),
#' class=nm$generate(tokens2_hashed[[1]], 2:3, 1:2),
#' times=100, unit='relative'
#' )
#'
#' microbenchmark::microbenchmark(
#' obj=nm$generate_list(tokens2_hashed, 2:3, 1:2),
#' ptr=nm$generate_list_ptr(tokens2_hashed, 2:3, 1:2),
#' times=100, unit='relative'
#' )
#'
#'
#' tokens3 <- rep(letters, 50)
#' types3 <- unique(tokens3)
#' tokens3_hashed <- match(tokens3, types3)
#' microbenchmark::microbenchmark(
#' old=skipgramcpp(tokens3, 2:3, 1:2, '-'),
#' new=qatd_cpp_ngram_hashed_vector(tokens3_hashed, 2:3, 1:2),
#' times=10, unit='relative'
#' )
#'
#' # Test with greater lexical diversity
#' tokens4 <- paste0(sample(letters, length(tokens3), replace=TRUE),
#' sample(letters, length(tokens3), replace=TRUE))
#' types4 <- unique(tokens4)
#' tokens4_hashed <- match(tokens4, types4)
#' microbenchmark::microbenchmark(
#' low=qatd_cpp_ngram_hashed_vector(tokens3_hashed, 2:3, 1:2),
#' high=qatd_cpp_ngram_hashed_vector(tokens4_hashed, 2:3, 1:2),
#' times=100, unit='relative'
#' )
#'
#'
#' # Comparison with tokenizers's skip-grams
#' tokenizers::tokenize_skip_ngrams('a b c d e', n=3, k=1)
#' # "a c e" "a b c" "b c d" "c d e"
#' tokenizers::tokenize_skip_ngrams('a b c d e', n=3, k=2)
#' # "a c e" "a b c" "b c d" "c d e"
#'
#' ngrams(tokenize('a b c d e'), n=3, skip=0:1, concatenator=' ')
#' # "a b c" "a b d" "a c d" "a c e" "b c d" "b c e" "b d e" "c d e"
#'
#'}
|
context("tile_plot")
test_that("tile_plot works", {
g = tile_plot(mtcars, "cyl", "am", "1")
expect_equal(g[["data"]],
data.frame(x = c(4, 4, 6, 6, 8, 8),
y = c(0, 1, 0, 1, 0, 1),
z = c(3, 8, 4, 3, 12, 2)))
})
| /tests/testthat/test-tile_plot.R | permissive | wkostelecki/ezplot | R | false | false | 283 | r | context("tile_plot")
test_that("tile_plot works", {
g = tile_plot(mtcars, "cyl", "am", "1")
expect_equal(g[["data"]],
data.frame(x = c(4, 4, 6, 6, 8, 8),
y = c(0, 1, 0, 1, 0, 1),
z = c(3, 8, 4, 3, 12, 2)))
})
|
# pre-process: download and unzip file
# Assumption: "ExData_Plotting1" folder is present
if (!file.exists("./ExData_Plotting1/exdata-data-household_power_consumption.zip")){
print("File not there, proceed to download")
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile="./ExData_Plotting1/exdata-data-household_power_consumption.zip", mode="wb")
}
if (!file.exists("./ExData_Plotting1/household_power_consumption.txt")){
print("Extracted file not found, proceed with unzip")
unzip("./ExData_Plotting1/exdata-data-household_power_consumption.zip", exdir="./ExData_Plotting1")
}
print("Data preparation")
#data <- read.table("./ExData_Plotting1//household_power_consumption.txt", sep=";", nrows = 50, header=TRUE)
BaseData <- read.table("./ExData_Plotting1/household_power_consumption.txt", sep=";", header=TRUE, stringsAsFactors = FALSE, na.strings = "?")
#Convert Date
BaseData$Date <- as.Date(BaseData$Date, format="%d/%m/%Y")
data <- subset(BaseData, subset = (Date >="2007-02-01" & Date <= "2007-02-02"))
#Clean up BaseData
rm(BaseData)
#Append DateTime to new Column
date_time <- paste(as.Date(data$Date), data$Time)
data$DateTime <- as.POSIXct(date_time)
print("Data Preparation done")
#Prepare graph
hist(data$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "Red")
dev.copy(png, file = "./ExData_Plotting1/plot1.png", height = 480, width = 480)
dev.off()
| /plot1.R | no_license | woeihau/ExData_Plotting1 | R | false | false | 1,507 | r | # pre-process: download and unzip file
# Assumption: "ExData_Plotting1" folder is present
if (!file.exists("./ExData_Plotting1/exdata-data-household_power_consumption.zip")){
print("File not there, proceed to download")
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile="./ExData_Plotting1/exdata-data-household_power_consumption.zip", mode="wb")
}
if (!file.exists("./ExData_Plotting1/household_power_consumption.txt")){
print("Extracted file not found, proceed with unzip")
unzip("./ExData_Plotting1/exdata-data-household_power_consumption.zip", exdir="./ExData_Plotting1")
}
print("Data preparation")
#data <- read.table("./ExData_Plotting1//household_power_consumption.txt", sep=";", nrows = 50, header=TRUE)
BaseData <- read.table("./ExData_Plotting1/household_power_consumption.txt", sep=";", header=TRUE, stringsAsFactors = FALSE, na.strings = "?")
#Convert Date
BaseData$Date <- as.Date(BaseData$Date, format="%d/%m/%Y")
data <- subset(BaseData, subset = (Date >="2007-02-01" & Date <= "2007-02-02"))
#Clean up BaseData
rm(BaseData)
#Append DateTime to new Column
date_time <- paste(as.Date(data$Date), data$Time)
data$DateTime <- as.POSIXct(date_time)
print("Data Preparation done")
#Prepare graph
hist(data$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "Red")
dev.copy(png, file = "./ExData_Plotting1/plot1.png", height = 480, width = 480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filters.R
\name{filter_SPECIES}
\alias{filter_SPECIES}
\title{Add filter on specific epithet}
\usage{
filter_SPECIES(filter = list(), SPECIES)
}
\arguments{
\item{filter}{Existing filters (or blank list if not provided)}
\item{SPECIES}{List of specific epithets}
}
\description{
Add filter on specific epithet
}
| /man/filter_SPECIES.Rd | no_license | cran/genesysr | R | false | true | 391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filters.R
\name{filter_SPECIES}
\alias{filter_SPECIES}
\title{Add filter on specific epithet}
\usage{
filter_SPECIES(filter = list(), SPECIES)
}
\arguments{
\item{filter}{Existing filters (or blank list if not provided)}
\item{SPECIES}{List of specific epithets}
}
\description{
Add filter on specific epithet
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ops-function.R
\name{userfunc_clone}
\alias{userfunc_clone}
\title{Clone UserFunction}
\usage{
userfunc_clone(func, cloned_inputs)
}
\arguments{
\item{func}{- The CNTK `Function` instance on which to apply the operation}
\item{cloned_inputs}{}
}
\description{
Creates a clone of this user-defined function.
}
\details{
It assumes that the constructor signature of the user’s implementation of
the user function takes the inputs as individual arguments followed by the
operator name. If the signature is different, then this method needs to be
overriden.
}
| /man/userfunc_clone.Rd | permissive | Bhaskers-Blu-Org2/CNTK-R | R | false | true | 637 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ops-function.R
\name{userfunc_clone}
\alias{userfunc_clone}
\title{Clone UserFunction}
\usage{
userfunc_clone(func, cloned_inputs)
}
\arguments{
\item{func}{- The CNTK `Function` instance on which to apply the operation}
\item{cloned_inputs}{}
}
\description{
Creates a clone of this user-defined function.
}
\details{
It assumes that the constructor signature of the user’s implementation of
the user function takes the inputs as individual arguments followed by the
operator name. If the signature is different, then this method needs to be
overriden.
}
|
library(plotly)
library(shiny)
library(shinyjs)
shinyUI(fluidPage(
useShinyjs(),
navbarPage("Bills",
tabPanel("Bill",
column(4,wellPanel( div(id="form",
helpText("Enter Bill"),dateInput("dateid", "Date:", value = Sys.Date(),format = "dd-mm-yyyy"),
textInput("Item","Item"),
selectInput("Cat", "Cat:",
c("Food"="Food","Clothing"="Clothing","Electronics"="Electronics","Kitchen"="Kitchen","HouseAssc"="HouseAssc","Travel"="Travel",
"Medicine"="Medicine","Stationery"="Stationery","Uni"="Uni","Other"="Other"
)),
textInput("Cost","Cost")
),
actionButton("Add","Add"),
actionButton("Clear","Clear")
)),
column(4,wellPanel(
helpText("Update Bill"),
tabsetPanel(
tabPanel("Update",div(id="form1",textInput("Rownum","Enter RowNum"),
selectInput("Select","Select",
c("Date"="ChangDate","Item"="ChangeItem","Cost"="ChangCost","Cat"="Changecat"
)
)
,
uiOutput("ui"),
actionButton("Update","Update"),
actionButton("Reset","Reset")
)),
tabPanel("Delete",div(id="form2",textInput("Row","Enter Rownum "),
actionButton("Delete","Delete"),
actionButton("Reset1","Reset")
))
)
))
)
,tabPanel("Stats",
sidebarLayout(
sidebarPanel(
conditionalPanel(
'input.dataview==="Table" ',
radioButtons("Grouby", " GroupBy:",
c("Month" = "month",
"Day"="day",
"Year"="year",
"Item" = "item",
"Total"="total",
"monthitem"="monthitem",
"monthcat"="monthcat",
"itemyear"="itemyear",
"month_cat_item"="month_cat_item",
"year_cat_item"="year_cat_item"
),selected="total"),
actionButton("Modify","Modify"),
downloadLink('download','download')),
conditionalPanel(
'input.dataview==="PIE"',
numericInput("YEAR","Choose Year:",min=2017,value=2017),
radioButtons("Pie","Pie:",
c("CatYear"="CatYear",
"CatMon"="CatMon"
)),
numericInput("MONTH","Choose Month:",min=1,value=1,max=12)
),
conditionalPanel('input.dataview==="Analysis"' ,
numericInput("YEAR1","Choose Year:",min=2017,value=2017),
radioButtons("Pie1","Select:",
c("CatYearA"="CatYearA",
"CatMonA"="CatMonA",
"YearA"="YearA"
)),
numericInput("MONTH1","Choose Month:",min=1,value=1,max=12),
helpText("Check the Viewer pane in Rstudio")
)
),
mainPanel(
tabsetPanel(
id='dataview',
tabPanel( "Table", DT::dataTableOutput("mytable2")),
tabPanel("PIE",plotOutput("PiePlot")),
tabPanel("Analysis",plotlyOutput("Analysys"))
)))
),
tabPanel("View",
sidebarLayout(
sidebarPanel(
helpText("you can check your wages here"),
actionButton("Refresh","Refresh")
)
,
mainPanel(
id = 'dataset',
DT::dataTableOutput("mytable1")
)
)
)
)
))
| /Ui.R | no_license | naz947/Bills-App | R | false | false | 5,378 | r | library(plotly)
library(shiny)
library(shinyjs)
shinyUI(fluidPage(
useShinyjs(),
navbarPage("Bills",
tabPanel("Bill",
column(4,wellPanel( div(id="form",
helpText("Enter Bill"),dateInput("dateid", "Date:", value = Sys.Date(),format = "dd-mm-yyyy"),
textInput("Item","Item"),
selectInput("Cat", "Cat:",
c("Food"="Food","Clothing"="Clothing","Electronics"="Electronics","Kitchen"="Kitchen","HouseAssc"="HouseAssc","Travel"="Travel",
"Medicine"="Medicine","Stationery"="Stationery","Uni"="Uni","Other"="Other"
)),
textInput("Cost","Cost")
),
actionButton("Add","Add"),
actionButton("Clear","Clear")
)),
column(4,wellPanel(
helpText("Update Bill"),
tabsetPanel(
tabPanel("Update",div(id="form1",textInput("Rownum","Enter RowNum"),
selectInput("Select","Select",
c("Date"="ChangDate","Item"="ChangeItem","Cost"="ChangCost","Cat"="Changecat"
)
)
,
uiOutput("ui"),
actionButton("Update","Update"),
actionButton("Reset","Reset")
)),
tabPanel("Delete",div(id="form2",textInput("Row","Enter Rownum "),
actionButton("Delete","Delete"),
actionButton("Reset1","Reset")
))
)
))
)
,tabPanel("Stats",
sidebarLayout(
sidebarPanel(
conditionalPanel(
'input.dataview==="Table" ',
radioButtons("Grouby", " GroupBy:",
c("Month" = "month",
"Day"="day",
"Year"="year",
"Item" = "item",
"Total"="total",
"monthitem"="monthitem",
"monthcat"="monthcat",
"itemyear"="itemyear",
"month_cat_item"="month_cat_item",
"year_cat_item"="year_cat_item"
),selected="total"),
actionButton("Modify","Modify"),
downloadLink('download','download')),
conditionalPanel(
'input.dataview==="PIE"',
numericInput("YEAR","Choose Year:",min=2017,value=2017),
radioButtons("Pie","Pie:",
c("CatYear"="CatYear",
"CatMon"="CatMon"
)),
numericInput("MONTH","Choose Month:",min=1,value=1,max=12)
),
conditionalPanel('input.dataview==="Analysis"' ,
numericInput("YEAR1","Choose Year:",min=2017,value=2017),
radioButtons("Pie1","Select:",
c("CatYearA"="CatYearA",
"CatMonA"="CatMonA",
"YearA"="YearA"
)),
numericInput("MONTH1","Choose Month:",min=1,value=1,max=12),
helpText("Check the Viewer pane in Rstudio")
)
),
mainPanel(
tabsetPanel(
id='dataview',
tabPanel( "Table", DT::dataTableOutput("mytable2")),
tabPanel("PIE",plotOutput("PiePlot")),
tabPanel("Analysis",plotlyOutput("Analysys"))
)))
),
tabPanel("View",
sidebarLayout(
sidebarPanel(
helpText("you can check your wages here"),
actionButton("Refresh","Refresh")
)
,
mainPanel(
id = 'dataset',
DT::dataTableOutput("mytable1")
)
)
)
)
))
|
#*** gspline1.R ***/
##
## AUTHOR: Arnost Komarek (my name in TeX: Arno\v{s}t Kom\'arek)
## arnost.komarek[AT]mff.cuni.cz
##
## CREATED: 22/01/2007
##
## PURPOSE: Values of the density based on the UNIVARIATE G-spline
## + random numbers generation
##
## FUNCTIONS: 22/01/2007: dgspline1
## 29/01/2007: rgspline1
##
##
#* ********************************************************************************* */
## Random numbers generation from the UNIVARIATE G-spline
rgspline1 <- function(n, mu, sigma, weight, intcpt=0, scale=1, logw=TRUE)
{
thispackage <- "glmmAK"
#thispackage <- NULL
if (n <= 0) stop("Incorrect n supplied")
nknot <- length(mu)
if (!nknot) stop("Incorrect mu supplied")
if (length(sigma) == 1) sigma <- rep(sigma, nknot)
if (length(sigma) != nknot) stop("sigma and mu must be of the same length")
if (length(weight) != nknot) stop("Incorrect weight supplied")
if (length(intcpt) != 1) stop("Incorrect intcpt supplied")
if (length(scale) != 1) stop("Incorrect scale supplied")
if (scale <= 0) stop("Value of scale must be positive")
if (logw) weight <- exp(weight)
weight <- weight/sum(weight)
if (any(weight < 0)) stop("weights may not be negative")
sample <- .C("rGspline1R",
x=double(n), weight=as.double(weight), n=as.integer(n),
knots=as.double(mu), sigma=as.double(sigma), nknots=as.integer(nknot),
intcpt=as.double(intcpt), tau=as.double(scale), logw=as.integer(0),
PACKAGE = thispackage)
return(sample$x)
}
## Values of the density based on the UNIVARIATE G-spline
dgspline1 <- function(x, mu, sigma, weight, intcpt=0, scale=1, logw=TRUE)
{
thispackage <- "glmmAK"
#thispackage <- NULL
ngrid <- length(x)
if (!ngrid) stop("Incorrect x supplied")
nknot <- length(mu)
if (!nknot) stop("Incorrect mu supplied")
if (length(sigma) == 1) sigma <- rep(sigma, nknot)
if (length(sigma) != nknot) stop("sigma and mu must be of the same length")
if (length(weight) != nknot) stop("Incorrect weight supplied")
if (length(intcpt) != 1) stop("Incorrect intcpt supplied")
if (length(scale) != 1) stop("Incorrect scale supplied")
if (scale <= 0) stop("Value of scale must be positive")
if (logw) weight <- exp(weight)
weight <- weight/sum(weight)
if (any(weight < 0)) stop("weights may not be negative")
# gx <- x
# for (i in 1:length(x)){
# z <- (x[i] - intcpt)/scale
# gkx <- dnorm(z, mean=mu, sd=sigma)
# gx[i] <- sum(weight*gkx)/scale
# }
fit <- .C("eval_Gspline1",
average=double(ngrid), value=double(ngrid),
weight=as.double(weight), knots.tau=double(nknot), sigma.tau=double(nknot),
grid=as.double(x), ngrid=as.integer(ngrid), standard=as.integer(0),
knots=as.double(mu), sigma=as.double(sigma), nknots=as.integer(nknot),
intcpt=as.double(intcpt), tau=as.double(scale), logw=as.integer(0),
PACKAGE = thispackage)
if (!is.null(dim(x))){
attr(fit$value, "dim") <- attr(x, "dim")
attr(fit$value, "dimnames") <- attr(x, "dimnames")
}else{
attr(fit$value, "names") <- attr(x, "names")
}
return(fit$value)
}
| /R/gspline1.R | no_license | cran/glmmAK | R | false | false | 3,385 | r | #*** gspline1.R ***/
##
## AUTHOR: Arnost Komarek (my name in TeX: Arno\v{s}t Kom\'arek)
## arnost.komarek[AT]mff.cuni.cz
##
## CREATED: 22/01/2007
##
## PURPOSE: Values of the density based on the UNIVARIATE G-spline
## + random numbers generation
##
## FUNCTIONS: 22/01/2007: dgspline1
## 29/01/2007: rgspline1
##
##
#* ********************************************************************************* */
## Random numbers generation from the UNIVARIATE G-spline
rgspline1 <- function(n, mu, sigma, weight, intcpt=0, scale=1, logw=TRUE)
{
thispackage <- "glmmAK"
#thispackage <- NULL
if (n <= 0) stop("Incorrect n supplied")
nknot <- length(mu)
if (!nknot) stop("Incorrect mu supplied")
if (length(sigma) == 1) sigma <- rep(sigma, nknot)
if (length(sigma) != nknot) stop("sigma and mu must be of the same length")
if (length(weight) != nknot) stop("Incorrect weight supplied")
if (length(intcpt) != 1) stop("Incorrect intcpt supplied")
if (length(scale) != 1) stop("Incorrect scale supplied")
if (scale <= 0) stop("Value of scale must be positive")
if (logw) weight <- exp(weight)
weight <- weight/sum(weight)
if (any(weight < 0)) stop("weights may not be negative")
sample <- .C("rGspline1R",
x=double(n), weight=as.double(weight), n=as.integer(n),
knots=as.double(mu), sigma=as.double(sigma), nknots=as.integer(nknot),
intcpt=as.double(intcpt), tau=as.double(scale), logw=as.integer(0),
PACKAGE = thispackage)
return(sample$x)
}
## Values of the density based on the UNIVARIATE G-spline
dgspline1 <- function(x, mu, sigma, weight, intcpt=0, scale=1, logw=TRUE)
{
thispackage <- "glmmAK"
#thispackage <- NULL
ngrid <- length(x)
if (!ngrid) stop("Incorrect x supplied")
nknot <- length(mu)
if (!nknot) stop("Incorrect mu supplied")
if (length(sigma) == 1) sigma <- rep(sigma, nknot)
if (length(sigma) != nknot) stop("sigma and mu must be of the same length")
if (length(weight) != nknot) stop("Incorrect weight supplied")
if (length(intcpt) != 1) stop("Incorrect intcpt supplied")
if (length(scale) != 1) stop("Incorrect scale supplied")
if (scale <= 0) stop("Value of scale must be positive")
if (logw) weight <- exp(weight)
weight <- weight/sum(weight)
if (any(weight < 0)) stop("weights may not be negative")
# gx <- x
# for (i in 1:length(x)){
# z <- (x[i] - intcpt)/scale
# gkx <- dnorm(z, mean=mu, sd=sigma)
# gx[i] <- sum(weight*gkx)/scale
# }
fit <- .C("eval_Gspline1",
average=double(ngrid), value=double(ngrid),
weight=as.double(weight), knots.tau=double(nknot), sigma.tau=double(nknot),
grid=as.double(x), ngrid=as.integer(ngrid), standard=as.integer(0),
knots=as.double(mu), sigma=as.double(sigma), nknots=as.integer(nknot),
intcpt=as.double(intcpt), tau=as.double(scale), logw=as.integer(0),
PACKAGE = thispackage)
if (!is.null(dim(x))){
attr(fit$value, "dim") <- attr(x, "dim")
attr(fit$value, "dimnames") <- attr(x, "dimnames")
}else{
attr(fit$value, "names") <- attr(x, "names")
}
return(fit$value)
}
|
## Put comments here that give an overall description of what your
## functions do
## This function creates a cacheable matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function uses the above function to get inverse of matrix wither from cache or by calculating it
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | madforstrength/ProgrammingAssignment2 | R | false | false | 735 | r | ## Put comments here that give an overall description of what your
## functions do
## This function creates a cacheable matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function uses the above function to get inverse of matrix wither from cache or by calculating it
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
# Constants object
getConstants = function() {
a = list()
# Board measurements
a$R1 = 6.35 # center to DB wire
a$R2 = 15.9 # center to SB wire
a$R3 = 99 # center to inner triple ring
a$R4 = 107 # center to outer triple ring
a$R5 = 162 # center to inner double ring
a$R = 170 # center to outer double ring
# Dartboard scores arrangement, clockwise starting at top center
# Standard arrangement
a$standard = c(20,1,18,4,13,6,10,15,2,17,3,19,7,16,8,11,14,9,12,5)
# Curtis arrangement
a$curtis = c(20,1,19,3,17,5,15,7,13,9,11,10,12,8,14,6,16,4,18,2)
# Linear arrangement
a$linear = c(20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1)
# What is the arrangement of scores being used?
a$S = a$standard
return(a)
}
| /R/constants.R | no_license | cran/darts | R | false | false | 812 | r | # Constants object
getConstants = function() {
a = list()
# Board measurements
a$R1 = 6.35 # center to DB wire
a$R2 = 15.9 # center to SB wire
a$R3 = 99 # center to inner triple ring
a$R4 = 107 # center to outer triple ring
a$R5 = 162 # center to inner double ring
a$R = 170 # center to outer double ring
# Dartboard scores arrangement, clockwise starting at top center
# Standard arrangement
a$standard = c(20,1,18,4,13,6,10,15,2,17,3,19,7,16,8,11,14,9,12,5)
# Curtis arrangement
a$curtis = c(20,1,19,3,17,5,15,7,13,9,11,10,12,8,14,6,16,4,18,2)
# Linear arrangement
a$linear = c(20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1)
# What is the arrangement of scores being used?
a$S = a$standard
return(a)
}
|
library(leaps)
------------------------------------------ FatigueLevel 7 niveis -------------------------------------------
dados <- read.csv("~/OneDrive/Documentos/MiEI/3.º ano/2.º semestre/Sistemas de Representação de Conhecimento e Raciocínio/Trabalhos práticos/SRCR-1617/Exercício 3/exaustao_FatigueLevel_7.csv", header= TRUE, sep = ';', dec = ',' )
reggi <- regsubsets(FatigueLevel ~ Performance.KDTMean + Performance.MAMean + Performance.MVMean+ Performance.TBCMean + Performance.DDCMean +Performance.DMSMean + Performance.AEDMean + Performance.ADMSLMean + Performance.Task,dados, method = "backward")
summary(reggi) | /Licenciatura_BSc/3º Ano/2º Semestre/Sistemas Representação Conhecimento Raciocínio/Exercício 3/antigo/Procura.R | permissive | MikeDM16/Computer-Science-UMinho | R | false | false | 632 | r | library(leaps)
------------------------------------------ FatigueLevel 7 niveis -------------------------------------------
dados <- read.csv("~/OneDrive/Documentos/MiEI/3.º ano/2.º semestre/Sistemas de Representação de Conhecimento e Raciocínio/Trabalhos práticos/SRCR-1617/Exercício 3/exaustao_FatigueLevel_7.csv", header= TRUE, sep = ';', dec = ',' )
reggi <- regsubsets(FatigueLevel ~ Performance.KDTMean + Performance.MAMean + Performance.MVMean+ Performance.TBCMean + Performance.DDCMean +Performance.DMSMean + Performance.AEDMean + Performance.ADMSLMean + Performance.Task,dados, method = "backward")
summary(reggi) |
## Performs CS CEL processing
SNP6.Process <- function(CEL = NULL, samplename = NULL, l2r.level = "normal", gc.renorm = TRUE, gc.rda = NULL, wave.renorm = TRUE, wave.rda = NULL, mingap = 1E+06, out.dir = getwd(), oschp.keep = FALSE, force.OS = NULL, apt.version = "1.20.0", apt.build = "na35.r1", genome.pkg = "BSgenome.Hsapiens.UCSC.hg19", return.data = FALSE, write.data = TRUE, plot = TRUE, force = FALSE) {
# setwd("/home/job/WORKSPACE/EaCoN_tests/SNP6")
# CEL <- "GSM820994.CEL.bz2"
# samplename <- "BITES_TEST"
# l2r.level <- "normal"
# wave.renorm <- TRUE
# wave.rda <- NULL
# gc.renorm <- TRUE
# gc.rda <- NULL
# mingap <- 1E+06
# out.dir <- getwd()
# oschp.keep <- TRUE
# force.OS <- NULL
# apt.version <- "1.20.0"
# apt.build <- "na35.r1"
# genome.pkg <- "BSgenome.Hsapiens.UCSC.hg19"
# return.data <- FALSE
# write.data <- TRUE
# plot <- TRUE
# force <- FALSE
# require(foreach)
# source("~/git_gustaveroussy/EaCoN/R/mini_functions.R")
# source("~/git_gustaveroussy/EaCoN/R/renorm_functions.R")
## Early checks
if (is.null(CEL)) stop(tmsg("A CEL file is required !"), call. = FALSE)
if (is.null(samplename)) stop(tmsg("A samplename is required !"), call. = FALSE)
if (!file.exists(CEL)) stop(tmsg(paste0("Could not find CEL file ", CEL, " !")), call. = FALSE)
if (gc.renorm) { if (!is.null(gc.rda)) { if (!file.exists(gc.rda)) stop(tmsg(paste0("Could not find gc.rda file ", gc.rda)), call. = FALSE) } }
if (wave.renorm) { if (!is.null(wave.rda)) { if (!file.exists(wave.rda)) stop(tmsg(paste0("Could not find wave.rda file ", wave.rda)), call. = FALSE) } }
if (is.null(genome.pkg)) stop(tmsg("A BSgenome package name is required !"), call. = FALSE)
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
if (dir.exists(samplename)) { if (!force) stop(tmsg(paste0("A [", samplename, '] dir already exists !')), call. = FALSE) else unlink(samplename, recursive = TRUE, force = FALSE) }
l2r.lev.conv <- list("normal" = "Log2Ratio", "weighted" = "SmoothSignal")
if (!(l2r.level %in% names(l2r.lev.conv))) stop(tmsg("Option 'l2r.level' should be 'normal' or 'weighted' !"), call. = FALSE)
## Handling compressed files
CEL <- compressed_handler(CEL)
## Secondary checks
sup.array <- c("GenomeWideSNP_6")
arraytype.cel = affxparser::readCelHeader(filename = CEL)$chiptype
if (!arraytype.cel %in% sup.array) stop(tmsg(paste0("Identified array type '", arraytype.cel, "' is not supported by this function !")), call. = FALSE)
## Checking APT version compatibility
valid.apt.versions <- c("1.20.0")
if (!(apt.version %in% valid.apt.versions)) warning(tmsg(paste0("APT version ", apt.version, " is not supported. Program may fail !")))
## Checking build compatibility
valid.builds <- c("na35.r1")
if (!(tolower(apt.build) %in% valid.builds)) warning(tmsg(paste0("Build ", apt.build, " is not supported. Program may fail !")))
## Checking apt-copynumber-cyto-ssa package loc
apt.snp6.pkg.name <- paste0("apt.snp6.", apt.version)
if (!(apt.snp6.pkg.name %in% installed.packages())) stop(tmsg(paste0("Package ", apt.snp6.pkg.name, " not found !")), call. = FALSE)
suppressPackageStartupMessages(require(package = apt.snp6.pkg.name, character.only = TRUE))
## Processing CEL to an OSCHP file
oscf <- apt.snp6.process(CEL = CEL, samplename = samplename, out.dir = out.dir, temp.files.keep = FALSE, force.OS = force.OS, apt.build = apt.build)
## Reading OSCHP
my.oschp <- oschp.load(file = oscf)
sex.chr <- c("chrX", "chrY")
## Processing : meta (and checks)
if (!("affymetrix-chipsummary-snp-qc" %in% names(my.oschp$Meta$analysis))) my.oschp$Meta$analysis[["affymetrix-chipsummary-snp-qc"]] <- NA
### Loading genome info
tmsg(paste0("Loading ", genome.pkg, " ..."))
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
# genome2 <- BSgenome::providerVersion(BSg.obj)
genome2 <- metadata(BSg.obj)$genome
cs <- chromobjector(BSg.obj)
### Getting basic meta
genome <- getmeta("affymetrix-algorithm-param-genome-version", my.oschp$Meta$analysis)
if (genome != genome2) stop(tmsg(paste0("Genome build name given with BSgenome package '", genome.pkg, "', (", genome2, ") is different from the genome build specified by provided APT build version '", apt.build, "' (", genome, ") !")), call. = FALSE)
arraytype <- getmeta("affymetrix-array-type", my.oschp$Meta$analysis)
manufacturer <- getmeta("program-company", my.oschp$Meta$analysis)
species <- getmeta("affymetrix-algorithm-param-genome-species", my.oschp$Meta$analysis)
snp6.conv <- list("1" = "male", "2" = "female", "NA" = "NA", "0" = "NA")
gender.conv <- list("female" = "XX", "male" = "XY", "NA" = "NA")
pgender <- gender.conv[[snp6.conv[[as.character(as.numeric(getmeta("affymetrix-chipsummary-Gender", my.oschp$Meta$analysis)))]]]]
if (!(arraytype %in% sup.array)) stop(paste0("Unsupported array : '", arraytype, "' !"), call. = FALSE)
## Reconstructing missing meta
if (!"CEL1" %in% names(my.oschp$Meta)) {
datheader.split <- unlist(strsplit(x = affxparser::readCelHeader(filename = CEL)$datheader, split = "\\s+"))
my.oschp$Meta$CEL1$acquisition <- list("affymetrix-scanner-id" = datheader.split[8], "affymetrix-scan-date" = paste0(datheader.split[6:7], collapse = " "))
my.oschp$Meta$CEL1$array <- list("affymetrix-array-id" = NA, "affymetrix-array-barcode" = NA)
}
# meta.a2 <- list("affymetrix-scanner-id" = scanner.id, "affymetrix-scan-date" = scan.date)
# meta.a2 <- list("affymetrix-scanner-id" = scanner.id, "affymetrix-scan-date" = scan.date)
# meta.a3 <- list("affymetrix-array-id" = NA, "affymetrix-array-barcode" = NA)
# meta.a3 <- list("affymetrix-array-id" = NA, "affymetrix-array-barcode" = NA)
meta.b <- list(
samplename = samplename,
source = "microarray",
source.file = CEL,
type = arraytype,
manufacturer = manufacturer,
species = species,
genome = genome,
genome.pkg = genome.pkg,
predicted.gender = pgender
)
## Extracting data : L2R
ao.df <- dplyr::as.tbl(data.frame(my.oschp$MultiData$CopyNumber[,c(2:4)], L2R.ori = as.vector(my.oschp$MultiData$CopyNumber[[l2r.lev.conv[[l2r.level]]]])))
ao.df$Chromosome <- as.integer(ao.df$Chromosome) ### Patching the Chromosome column : on R4+, it is read as 'raw', we need ints
affy.meta <- my.oschp$Meta
affy.chrom <- my.oschp$MultiData[["CopyNumber_&keyvals"]][seq.int(3, nrow(my.oschp$MultiData[["CopyNumber_&keyvals"]]), 3),1:2]
ao.df$L2R <- ao.df$L2R.ori
ak <- affy.chrom$val
names(ak) <- as.numeric(sub(":display", "", affy.chrom$key))
ao.df$chrA <- as.vector(ak[as.character(ao.df$Chromosome)])
ao.df$chr <- paste0("chr", ao.df$chrA)
ao.df$chrN <- unlist(cs$chrom2chr[ao.df$chr])
## Normalizing SNPs
tmsg("Normalizing SNP data (using rcnorm) ...")
baf.df <- rcnorm::rcnorm.snp(myCEL = CEL, genome.pkg = genome.pkg, allSNPs = FALSE)
baf.df$chr <- paste0("chr", baf.df$chrs)
baf.df$chrN <- unlist(cs$chrom2chr[baf.df$chr])
baf.df <- baf.df[order(baf.df$chrN, baf.df$pos),]
baf.df <- baf.df[!is.na(baf.df$BAF),]
gc()
ao.df <- suppressWarnings(Reduce(function(t1, t2) dplyr::left_join(t1, t2, by = "ProbeSetName"), list(ao.df, dplyr::as.tbl(data.frame(ProbeSetName = rownames(baf.df), BAF.ori = baf.df$BAF, BAF = baf.df$BAF)), dplyr::as.tbl(my.oschp$MultiData$CopyNumber[,c(2,9)]))))
rm(my.oschp, baf.df)
gc()
## Hacking Type of the 'Allele Difference' column (from array to vector)
ao.df[['Allele Difference']] <- as.vector(ao.df[['Allele Difference']])
ao.df <- dplyr::arrange(ao.df, chrN, Position, ProbeSetName)
colnames(ao.df)[3] <- "pos"
## Merging L2R and BAF data
ao.df <- ao.df[!(is.na(ao.df$L2R) & is.na(ao.df$BAF)),]
######################
######################
##### WAR ZONE ! #####
######################
######################
########################
## Segmentation-based ##
########################
minseglen <- 50
# nna <- !is.na(ao.df$BAF)
nna <- !is.na(ao.df$BAF) & !is.na(ao.df[["Allele Difference"]])
ao.df$mBAF <- BAF2mBAF(ao.df$BAF)
# str(peltres <- changepoint::cpt.meanvar(ao.df$mBAF[nna], penalty = "MBIC", method = "PELT", minseglen = minseglen)@cpts)
peltres <- changepoint::cpt.var(ao.df[["Allele Difference"]][nna], penalty = "BIC", method = "PELT", minseglen = minseglen)@cpts
## Refining with chr ends
kends <- vapply(unique(ao.df$chrN[nna]), function(k) { max(which(ao.df$chrN[nna] == k))}, 1L)
peltres <- sort(unique(c(peltres, kends)))
prdiff <- diff(peltres)
toremove <- vector()
if (any(prdiff < 50)) {
for (b in which(prdiff < 50)) {
if (peltres[b+1] %in% kends) toremove <- c(toremove, b) else toremove <- c(toremove, b+1)
}
peltres <- peltres[-toremove]
}
## Clustering BAF segments
bs.end <- peltres
bs.start <- c(1, peltres[-length(peltres)]+1)
ao.df$cluster <- NA
ao.df$uni <- FALSE
suppressPackageStartupMessages(library(mclust))
mc.G <- 4
mc.mN <- "E"
smeds <- hrates <- vector()
for(i in seq_along(bs.start)) {
mcresBIC <- mclust::mclustBIC(data = ao.df$BAF[nna][bs.start[i]:bs.end[i]], G = mc.G, modelNames = mc.mN, verbose = FALSE)
mcres <- mclust::Mclust(data = ao.df$BAF[nna][bs.start[i]:bs.end[i]], G = mc.G, modelNames = mc.mN, verbose = FALSE, x = mcresBIC)$classification
ao.df$cluster[nna][bs.start[i]:bs.end[i]] <- mcres
if (length(unique(mcres)) == 2) ao.df$uni[nna][bs.start[i]:bs.end[i]] <- TRUE
smeds <- c(smeds, median(ao.df$mBAF[nna][bs.start[i]:bs.end[i]][!mcres %in% c(1,4)], na.rm = TRUE))
hrates <- c(hrates, length(which(!mcres %in% c(1,4))) / length(mcres))
}
## Rescaling
tmsg("Rescaling BAF ...")
ao.df$BAF.unscaled <- ao.df$BAF
for(i in seq_along(bs.start)) {
lmed <- median(ao.df$BAF.unscaled[nna][bs.start[i]:bs.end[i]][ao.df$cluster[nna][bs.start[i]:bs.end[i]] == 1], na.rm = TRUE)
umed <- median(ao.df$BAF.unscaled[nna][bs.start[i]:bs.end[i]][ao.df$cluster[nna][bs.start[i]:bs.end[i]] == 4], na.rm = TRUE)
ao.df$BAF[nna][bs.start[i]:bs.end[i]] <- (lmed - ao.df$BAF[nna][bs.start[i]:bs.end[i]]) / (lmed - umed)
}
ao.df$mBAF <- BAF2mBAF(ao.df$BAF)
## Rescue
ao.df$cluster2 <- ao.df$cluster
ao.df$cluster2[ao.df$cluster2 == 4] <- 1
ao.df$cluster2[ao.df$cluster2 > 1] <- 2
target.hrate <- .3
for(i in seq_along(hrates)) {
if (hrates[i] < .2 & smeds[i] < .45) {
ao.df$cluster2[nna][bs.start[i]:bs.end[i]] <- 2
ao.df$uni[nna][bs.start[i]:bs.end[i]] <- TRUE
}
}
## L2R renormalizations
tmsg("Normalizing L2R data ...")
smo <- round(nrow(ao.df) / 550)
if(smo%%2 == 0) smo <- smo+1
### Wave
if (wave.renorm) {
tmsg("Wave re-normalization ...")
ren.res <- renorm.go(input.data = ao.df, renorm.rda = wave.rda, track.type = "Wave", smo = smo, arraytype = arraytype, genome = genome)
ao.df <- ren.res$data
fitted.l2r <- ren.res$renorm$l2r$l2r
if(is.null(ren.res$renorm$pos)) {
meta.b <- setmeta("wave.renorm", "None", meta.b)
tmsg(" No positive fit.")
} else {
## Tweaking sex chromosomes
sex.idx <- ao.df$chr %in% sex.chr
auto.ori.med <- median(ao.df$L2R[!sex.idx], na.rm = TRUE)
auto.rn.med <- median(fitted.l2r[!sex.idx], na.rm = TRUE)
if (any(sex.idx)) {
for (k in sex.chr) {
k.idx <- ao.df$chr == k
if (any(k.idx)) {
k.ori.diffmed <- median(ao.df$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.rn.diffmed <- median(fitted.l2r[k.idx], na.rm = TRUE) - auto.rn.med
fitted.l2r[k.idx] <- fitted.l2r[k.idx] - k.rn.diffmed + k.ori.diffmed
}
}
}
meta.b <- setmeta("wave.renorm", paste0(ren.res$mrenorm$pos, collapse = ","), meta.b)
}
rm(ren.res)
ao.df[["L2R.Wave"]] <- fitted.l2r - median(fitted.l2r, na.rm = TRUE)
ao.df$L2R <- ao.df[["L2R.Wave"]]
} else {
meta.b <- setmeta("wave.renorm", "FALSE", meta.b)
}
### GC
if (gc.renorm) {
tmsg("GC renormalization ...")
ren.res <- renorm.go(input.data = ao.df, renorm.rda = gc.rda, track.type = "GC", smo = smo, arraytype = arraytype, genome = genome)
ao.df <- ren.res$data
fitted.l2r <- ren.res$renorm$l2r$l2r
if(is.null(ren.res$renorm$pos)) {
meta.b <- setmeta("gc.renorm", "None", meta.b)
tmsg(" No positive fit.")
} else {
## Tweaking sex chromosomes
sex.idx <- ao.df$chr %in% sex.chr
auto.ori.med <- median(ao.df$L2R[!sex.idx], na.rm = TRUE)
auto.rn.med <- median(fitted.l2r[!sex.idx], na.rm = TRUE)
if (any(sex.idx)) {
for (k in sex.chr) {
k.idx <- ao.df$chr == k
if (any(k.idx)) {
k.ori.diffmed <- median(ao.df$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.rn.diffmed <- median(fitted.l2r[k.idx], na.rm = TRUE) - auto.rn.med
fitted.l2r[k.idx] <- fitted.l2r[k.idx] - k.rn.diffmed + k.ori.diffmed
}
}
}
meta.b <- setmeta("gc.renorm", paste0(ren.res$renorm$pos, collapse = ","), meta.b)
}
rm(ren.res)
ao.df[["L2R.GC"]] <- fitted.l2r - median(fitted.l2r, na.rm = TRUE)
ao.df$L2R <- ao.df[["L2R.GC"]]
} else {
meta.b <- setmeta("gc.renorm", "FALSE", meta.b)
}
## Rough median-centering of L2R
ao.df$L2R <- ao.df$L2R - median(ao.df$L2R, na.rm = TRUE)
## Identifying gaps and clustering chromosomal portions
gaps <- which(diff(ao.df$pos) >= mingap)
kends <- vapply(unique(ao.df$Chromosome), function(k) { max(which(ao.df$Chromosome == k)) }, 1L)
kbreaks <- sort(unique(c(gaps, kends)))
ao.df$chrgap <- rep(seq_along(kbreaks), times = c(kbreaks[1], diff(kbreaks)))
## Building ASCAT-like object
tmsg("Building normalized object ...")
my.ascat.obj <- list(
data = list(
Tumor_LogR.ori = data.frame(sample = ao.df$L2R.ori, row.names = ao.df$ProbeSetName),
Tumor_LogR = data.frame(sample = ao.df$L2R, row.names = ao.df$ProbeSetName),
Tumor_BAF = data.frame(sample = ao.df$BAF, row.names = ao.df$ProbeSetName),
Tumor_AD = data.frame(sample = ao.df[["Allele Difference"]], row.names = ao.df$ProbeSetName),
Tumor_LogR_segmented = NULL,
Tumor_BAF_segmented = NULL,
Germline_LogR = NULL,
Germline_BAF = NULL,
SNPpos = data.frame(chrs = ao.df$chr, pos = ao.df$pos, row.names = ao.df$ProbeSetName),
ch = sapply(unique(ao.df$chr), function(x) { which(ao.df$chr == x) }),
chr = sapply(unique(ao.df$chrgap), function(x) { which(ao.df$chrgap == x) }),
chrs = unique(ao.df$chr),
samples = samplename,
gender = as.vector(meta.b$predicted.gender),
sexchromosomes = sex.chr,
failedarrays = NULL
),
germline = list(
germlinegenotypes = matrix(as.logical(abs(ao.df$cluster2 - 2L)), ncol = 1),
failedarrays = NULL
)
)
colnames(my.ascat.obj$germline$germlinegenotypes) <- colnames(my.ascat.obj$data$Tumor_LogR) <- colnames(my.ascat.obj$data$Tumor_LogR.ori) <- colnames(my.ascat.obj$data$Tumor_BAF) <- samplename
my.ascat.obj$data$Tumor_BAF.unscaled = data.frame(sample = ao.df$BAF.unscaled, row.names = ao.df$ProbeSetName)
colnames(my.ascat.obj$data$Tumor_BAF.unscaled) <- samplename
my.ascat.obj$data$Tumor_BAF.unisomy = data.frame(sample = ao.df$uni, row.names = ao.df$ProbeSetName)
colnames(my.ascat.obj$data$Tumor_BAF.unisomy) <- samplename
rownames(my.ascat.obj$germline$germlinegenotypes) <- ao.df$ProbeSetName
genopos <- ao.df$pos + cs$chromosomes$chr.length.toadd[ao.df$chrN]
rm(ao.df)
gc()
## Adding meta
my.ascat.obj$meta = list(
basic = meta.b,
affy = affy.meta
)
## Adding CEL intensities
my.ascat.obj$CEL = list(
CEL1 = affxparser::readCel(filename = CEL)
)
my.ascat.obj$CEL$CEL1$intensities <- as.integer(my.ascat.obj$CEL$CEL1$intensities)
if(write.data) saveRDS(my.ascat.obj, paste0(out.dir, "/", samplename, "/", samplename, "_", arraytype, "_", genome, "_processed.RDS"), compress = "bzip2")
## PLOT
if (plot) {
tmsg("Plotting ...")
kend <- genopos[vapply(my.ascat.obj$data$ch, max, 1L)]
l2r.notna <- which(!is.na(my.ascat.obj$data$Tumor_LogR[,1]))
l2r.rm <- runmed(my.ascat.obj$data$Tumor_LogR[,1][l2r.notna], smo)
l2r.ori.rm <- runmed(my.ascat.obj$data$Tumor_LogR.ori[,1][l2r.notna], smo)
png(paste0(out.dir, "/", samplename, "/", samplename, "_", arraytype, "_", genome, "_rawplot.png"), 1600, 1050)
par(mfrow = c(3,1))
plot(genopos, my.ascat.obj$data$Tumor_LogR.ori[,1], pch = ".", cex = 3, col = "grey75", xaxs = "i", yaxs = "i", ylim = c(-2,2), main = paste0(samplename, " ", arraytype, " raw L2R profile (median-centered) / ", round(sum(abs(diff(l2r.ori.rm))), digits = 3)), xlab = "Genomic position", ylab = "L2R")
lines(genopos[l2r.notna], l2r.ori.rm, col = 1)
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = 0, col = 2, lty = 2, lwd = 2)
plot(genopos, my.ascat.obj$data$Tumor_LogR[,1], pch = ".", cex = 3, col = "grey75", xaxs = "i", yaxs = "i", ylim = c(-2,2), main = paste0(samplename, " ", arraytype, " L2R profile (", l2r.level, ", median-centered)) / ", round(sum(abs(diff(l2r.rm))), digits = 3)), xlab = "Genomic position", ylab = "L2R")
lines(genopos[l2r.notna], l2r.rm, col = 1)
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = 0, col = 2, lty = 2, lwd = 2)
plot(genopos, my.ascat.obj$data$Tumor_BAF[,1], pch = ".", cex = 3, col = "grey80", xaxs = "i", yaxs = "i", ylim = c(0,1), main = paste0(samplename, " ", arraytype, " BAF profile"), xlab = "Genomic position", ylab = "BAF")
points(genopos[my.ascat.obj$germline$germlinegenotypes[,1] == 0], my.ascat.obj$data$Tumor_BAF[my.ascat.obj$germline$germlinegenotypes[,1] == 0,1], pch = ".", cex = 3, col = "grey25")
# plot(ao.df$genopos, my.ascat.obj$data$Tumor_BAF[,1], pch = ".", cex = 3, col = ao.df$ForcedCall-5, xaxs = "i", yaxs = "i", ylim = c(0,1), main = paste0(samplename, " ", arraytype, " BAF profile"), xlab = "Genomic position", ylab = "BAF")
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = .5, col = 2, lty = 2, lwd = 2)
dev.off()
}
## Cleaning
if(!oschp.keep) {
tmsg("Removing temporary OSCHP file ...")
file.remove(oscf)
}
tmsg("Done.")
gc()
if(return.data) return(my.ascat.obj)
}
SNP6.Process.Batch <- function(CEL.list.file = NULL, nthread = 1, cluster.type = "PSOCK", ...) {
## Checking the CEL.list.file
if (is.null(CEL.list.file)) stop("A CEL.list.file is required !", call. = FALSE)
if (!file.exists(CEL.list.file)) stop("Could not find CEL.list.file !", call. = FALSE)
message("Reading and checking CEL.list.file ...")
myCELs <- read.table(file = CEL.list.file, header = TRUE, sep="\t", check.names = FALSE, as.is = TRUE)
head.ok <- c("cel_files", "SampleName")
head.chk <- all(colnames(CEL.list.file) == head.ok)
if (!head.chk) {
message("Invalid header in CEL.list.file !")
message(paste0("EXPECTED : ", head.ok))
message(paste0("FOUND : ", colnames(myCELs)))
stop("Invalid header.", call. = FALSE)
}
sn.chk <- duplicated(myCELs$SampleName)
if (any(sn.chk)) {
message("CEL.list.file contains duplicated SampleNames !")
message(myCELs$SampleName[which(duplicated(myCELs$SampleName))])
stop("Duplicated SampleNames.", call. = FALSE)
}
fecheck <- !vapply(myCELs$cel_files, file.exists, TRUE)
fecheck.pos <- which(fecheck)
if (length(fecheck.pos) > 0) stop(paste0("\n", "CEL file could not be found : ", myCELs$cel_files[fecheck.pos], collapse = ""), call. = FALSE)
message(paste0("Found ", nrow(myCELs), " samples to process."))
## Adjusting cores/threads
message("Adjusting number of threads if needed ...")
avail.cores <- parallel::detectCores(logical = TRUE)
if (is.null(nthread)) { nthread <- avail.cores -1; message(paste0("Reset nthread to ", nthread)) }
if (nrow(myCELs) < nthread) { nthread <- nrow(myCELs); message(paste0("Reset nthread to ", nthread)) }
if (avail.cores <= nthread) message(paste0(" WARNING : nthread set to ", nthread, " while available logical threads number is ", avail.cores, " !"))
## Building cluster
current.bitmapType <- getOption("bitmapType")
`%dopar%` <- foreach::"%dopar%"
`%do%` <- foreach::"%do%"
cl <- parallel::makeCluster(spec = nthread, type = cluster.type, outfile = "")
doParallel::registerDoParallel(cl)
p <- 0
s6res <- foreach::foreach(p = seq_len(nrow(myCELs)), .inorder = FALSE, .errorhandling = "stop") %dopar% {
EaCoN.set.bitmapType(type = current.bitmapType)
SNP6.Process(CEL = myCELs$cel_files[p], samplename = myCELs$SampleName[p], return.data = FALSE, ...)
}
## Stopping cluster
message("Stopping cluster ...")
parallel::stopCluster(cl)
message("Done.")
}
| /R/apt_snp6_process.R | permissive | gustaveroussy/EaCoN | R | false | false | 21,263 | r | ## Performs CS CEL processing
SNP6.Process <- function(CEL = NULL, samplename = NULL, l2r.level = "normal", gc.renorm = TRUE, gc.rda = NULL, wave.renorm = TRUE, wave.rda = NULL, mingap = 1E+06, out.dir = getwd(), oschp.keep = FALSE, force.OS = NULL, apt.version = "1.20.0", apt.build = "na35.r1", genome.pkg = "BSgenome.Hsapiens.UCSC.hg19", return.data = FALSE, write.data = TRUE, plot = TRUE, force = FALSE) {
# setwd("/home/job/WORKSPACE/EaCoN_tests/SNP6")
# CEL <- "GSM820994.CEL.bz2"
# samplename <- "BITES_TEST"
# l2r.level <- "normal"
# wave.renorm <- TRUE
# wave.rda <- NULL
# gc.renorm <- TRUE
# gc.rda <- NULL
# mingap <- 1E+06
# out.dir <- getwd()
# oschp.keep <- TRUE
# force.OS <- NULL
# apt.version <- "1.20.0"
# apt.build <- "na35.r1"
# genome.pkg <- "BSgenome.Hsapiens.UCSC.hg19"
# return.data <- FALSE
# write.data <- TRUE
# plot <- TRUE
# force <- FALSE
# require(foreach)
# source("~/git_gustaveroussy/EaCoN/R/mini_functions.R")
# source("~/git_gustaveroussy/EaCoN/R/renorm_functions.R")
## Early checks
if (is.null(CEL)) stop(tmsg("A CEL file is required !"), call. = FALSE)
if (is.null(samplename)) stop(tmsg("A samplename is required !"), call. = FALSE)
if (!file.exists(CEL)) stop(tmsg(paste0("Could not find CEL file ", CEL, " !")), call. = FALSE)
if (gc.renorm) { if (!is.null(gc.rda)) { if (!file.exists(gc.rda)) stop(tmsg(paste0("Could not find gc.rda file ", gc.rda)), call. = FALSE) } }
if (wave.renorm) { if (!is.null(wave.rda)) { if (!file.exists(wave.rda)) stop(tmsg(paste0("Could not find wave.rda file ", wave.rda)), call. = FALSE) } }
if (is.null(genome.pkg)) stop(tmsg("A BSgenome package name is required !"), call. = FALSE)
if (!genome.pkg %in% BSgenome::installed.genomes()) {
if (genome.pkg %in% BSgenome::available.genomes()) {
stop(tmsg(paste0("BSgenome ", genome.pkg, " available but not installed. Please install it !")), call. = FALSE)
} else {
stop(tmsg(paste0("BSgenome ", genome.pkg, " not available in valid BSgenomes and not installed ... Please check your genome name or install your custom BSgenome !")), call. = FALSE)
}
}
if (dir.exists(samplename)) { if (!force) stop(tmsg(paste0("A [", samplename, '] dir already exists !')), call. = FALSE) else unlink(samplename, recursive = TRUE, force = FALSE) }
l2r.lev.conv <- list("normal" = "Log2Ratio", "weighted" = "SmoothSignal")
if (!(l2r.level %in% names(l2r.lev.conv))) stop(tmsg("Option 'l2r.level' should be 'normal' or 'weighted' !"), call. = FALSE)
## Handling compressed files
CEL <- compressed_handler(CEL)
## Secondary checks
sup.array <- c("GenomeWideSNP_6")
arraytype.cel = affxparser::readCelHeader(filename = CEL)$chiptype
if (!arraytype.cel %in% sup.array) stop(tmsg(paste0("Identified array type '", arraytype.cel, "' is not supported by this function !")), call. = FALSE)
## Checking APT version compatibility
valid.apt.versions <- c("1.20.0")
if (!(apt.version %in% valid.apt.versions)) warning(tmsg(paste0("APT version ", apt.version, " is not supported. Program may fail !")))
## Checking build compatibility
valid.builds <- c("na35.r1")
if (!(tolower(apt.build) %in% valid.builds)) warning(tmsg(paste0("Build ", apt.build, " is not supported. Program may fail !")))
## Checking apt-copynumber-cyto-ssa package loc
apt.snp6.pkg.name <- paste0("apt.snp6.", apt.version)
if (!(apt.snp6.pkg.name %in% installed.packages())) stop(tmsg(paste0("Package ", apt.snp6.pkg.name, " not found !")), call. = FALSE)
suppressPackageStartupMessages(require(package = apt.snp6.pkg.name, character.only = TRUE))
## Processing CEL to an OSCHP file
oscf <- apt.snp6.process(CEL = CEL, samplename = samplename, out.dir = out.dir, temp.files.keep = FALSE, force.OS = force.OS, apt.build = apt.build)
## Reading OSCHP
my.oschp <- oschp.load(file = oscf)
sex.chr <- c("chrX", "chrY")
## Processing : meta (and checks)
if (!("affymetrix-chipsummary-snp-qc" %in% names(my.oschp$Meta$analysis))) my.oschp$Meta$analysis[["affymetrix-chipsummary-snp-qc"]] <- NA
### Loading genome info
tmsg(paste0("Loading ", genome.pkg, " ..."))
suppressPackageStartupMessages(require(genome.pkg, character.only = TRUE))
BSg.obj <- getExportedValue(genome.pkg, genome.pkg)
# genome2 <- BSgenome::providerVersion(BSg.obj)
genome2 <- metadata(BSg.obj)$genome
cs <- chromobjector(BSg.obj)
### Getting basic meta
genome <- getmeta("affymetrix-algorithm-param-genome-version", my.oschp$Meta$analysis)
if (genome != genome2) stop(tmsg(paste0("Genome build name given with BSgenome package '", genome.pkg, "', (", genome2, ") is different from the genome build specified by provided APT build version '", apt.build, "' (", genome, ") !")), call. = FALSE)
arraytype <- getmeta("affymetrix-array-type", my.oschp$Meta$analysis)
manufacturer <- getmeta("program-company", my.oschp$Meta$analysis)
species <- getmeta("affymetrix-algorithm-param-genome-species", my.oschp$Meta$analysis)
snp6.conv <- list("1" = "male", "2" = "female", "NA" = "NA", "0" = "NA")
gender.conv <- list("female" = "XX", "male" = "XY", "NA" = "NA")
pgender <- gender.conv[[snp6.conv[[as.character(as.numeric(getmeta("affymetrix-chipsummary-Gender", my.oschp$Meta$analysis)))]]]]
if (!(arraytype %in% sup.array)) stop(paste0("Unsupported array : '", arraytype, "' !"), call. = FALSE)
## Reconstructing missing meta
if (!"CEL1" %in% names(my.oschp$Meta)) {
datheader.split <- unlist(strsplit(x = affxparser::readCelHeader(filename = CEL)$datheader, split = "\\s+"))
my.oschp$Meta$CEL1$acquisition <- list("affymetrix-scanner-id" = datheader.split[8], "affymetrix-scan-date" = paste0(datheader.split[6:7], collapse = " "))
my.oschp$Meta$CEL1$array <- list("affymetrix-array-id" = NA, "affymetrix-array-barcode" = NA)
}
# meta.a2 <- list("affymetrix-scanner-id" = scanner.id, "affymetrix-scan-date" = scan.date)
# meta.a2 <- list("affymetrix-scanner-id" = scanner.id, "affymetrix-scan-date" = scan.date)
# meta.a3 <- list("affymetrix-array-id" = NA, "affymetrix-array-barcode" = NA)
# meta.a3 <- list("affymetrix-array-id" = NA, "affymetrix-array-barcode" = NA)
meta.b <- list(
samplename = samplename,
source = "microarray",
source.file = CEL,
type = arraytype,
manufacturer = manufacturer,
species = species,
genome = genome,
genome.pkg = genome.pkg,
predicted.gender = pgender
)
## Extracting data : L2R
ao.df <- dplyr::as.tbl(data.frame(my.oschp$MultiData$CopyNumber[,c(2:4)], L2R.ori = as.vector(my.oschp$MultiData$CopyNumber[[l2r.lev.conv[[l2r.level]]]])))
ao.df$Chromosome <- as.integer(ao.df$Chromosome) ### Patching the Chromosome column : on R4+, it is read as 'raw', we need ints
affy.meta <- my.oschp$Meta
affy.chrom <- my.oschp$MultiData[["CopyNumber_&keyvals"]][seq.int(3, nrow(my.oschp$MultiData[["CopyNumber_&keyvals"]]), 3),1:2]
ao.df$L2R <- ao.df$L2R.ori
ak <- affy.chrom$val
names(ak) <- as.numeric(sub(":display", "", affy.chrom$key))
ao.df$chrA <- as.vector(ak[as.character(ao.df$Chromosome)])
ao.df$chr <- paste0("chr", ao.df$chrA)
ao.df$chrN <- unlist(cs$chrom2chr[ao.df$chr])
## Normalizing SNPs
tmsg("Normalizing SNP data (using rcnorm) ...")
baf.df <- rcnorm::rcnorm.snp(myCEL = CEL, genome.pkg = genome.pkg, allSNPs = FALSE)
baf.df$chr <- paste0("chr", baf.df$chrs)
baf.df$chrN <- unlist(cs$chrom2chr[baf.df$chr])
baf.df <- baf.df[order(baf.df$chrN, baf.df$pos),]
baf.df <- baf.df[!is.na(baf.df$BAF),]
gc()
ao.df <- suppressWarnings(Reduce(function(t1, t2) dplyr::left_join(t1, t2, by = "ProbeSetName"), list(ao.df, dplyr::as.tbl(data.frame(ProbeSetName = rownames(baf.df), BAF.ori = baf.df$BAF, BAF = baf.df$BAF)), dplyr::as.tbl(my.oschp$MultiData$CopyNumber[,c(2,9)]))))
rm(my.oschp, baf.df)
gc()
## Hacking Type of the 'Allele Difference' column (from array to vector)
ao.df[['Allele Difference']] <- as.vector(ao.df[['Allele Difference']])
ao.df <- dplyr::arrange(ao.df, chrN, Position, ProbeSetName)
colnames(ao.df)[3] <- "pos"
## Merging L2R and BAF data
ao.df <- ao.df[!(is.na(ao.df$L2R) & is.na(ao.df$BAF)),]
######################
######################
##### WAR ZONE ! #####
######################
######################
########################
## Segmentation-based ##
########################
minseglen <- 50
# nna <- !is.na(ao.df$BAF)
nna <- !is.na(ao.df$BAF) & !is.na(ao.df[["Allele Difference"]])
ao.df$mBAF <- BAF2mBAF(ao.df$BAF)
# str(peltres <- changepoint::cpt.meanvar(ao.df$mBAF[nna], penalty = "MBIC", method = "PELT", minseglen = minseglen)@cpts)
peltres <- changepoint::cpt.var(ao.df[["Allele Difference"]][nna], penalty = "BIC", method = "PELT", minseglen = minseglen)@cpts
## Refining with chr ends
kends <- vapply(unique(ao.df$chrN[nna]), function(k) { max(which(ao.df$chrN[nna] == k))}, 1L)
peltres <- sort(unique(c(peltres, kends)))
prdiff <- diff(peltres)
toremove <- vector()
if (any(prdiff < 50)) {
for (b in which(prdiff < 50)) {
if (peltres[b+1] %in% kends) toremove <- c(toremove, b) else toremove <- c(toremove, b+1)
}
peltres <- peltres[-toremove]
}
## Clustering BAF segments
bs.end <- peltres
bs.start <- c(1, peltres[-length(peltres)]+1)
ao.df$cluster <- NA
ao.df$uni <- FALSE
suppressPackageStartupMessages(library(mclust))
mc.G <- 4
mc.mN <- "E"
smeds <- hrates <- vector()
for(i in seq_along(bs.start)) {
mcresBIC <- mclust::mclustBIC(data = ao.df$BAF[nna][bs.start[i]:bs.end[i]], G = mc.G, modelNames = mc.mN, verbose = FALSE)
mcres <- mclust::Mclust(data = ao.df$BAF[nna][bs.start[i]:bs.end[i]], G = mc.G, modelNames = mc.mN, verbose = FALSE, x = mcresBIC)$classification
ao.df$cluster[nna][bs.start[i]:bs.end[i]] <- mcres
if (length(unique(mcres)) == 2) ao.df$uni[nna][bs.start[i]:bs.end[i]] <- TRUE
smeds <- c(smeds, median(ao.df$mBAF[nna][bs.start[i]:bs.end[i]][!mcres %in% c(1,4)], na.rm = TRUE))
hrates <- c(hrates, length(which(!mcres %in% c(1,4))) / length(mcres))
}
## Rescaling
tmsg("Rescaling BAF ...")
ao.df$BAF.unscaled <- ao.df$BAF
for(i in seq_along(bs.start)) {
lmed <- median(ao.df$BAF.unscaled[nna][bs.start[i]:bs.end[i]][ao.df$cluster[nna][bs.start[i]:bs.end[i]] == 1], na.rm = TRUE)
umed <- median(ao.df$BAF.unscaled[nna][bs.start[i]:bs.end[i]][ao.df$cluster[nna][bs.start[i]:bs.end[i]] == 4], na.rm = TRUE)
ao.df$BAF[nna][bs.start[i]:bs.end[i]] <- (lmed - ao.df$BAF[nna][bs.start[i]:bs.end[i]]) / (lmed - umed)
}
ao.df$mBAF <- BAF2mBAF(ao.df$BAF)
## Rescue
ao.df$cluster2 <- ao.df$cluster
ao.df$cluster2[ao.df$cluster2 == 4] <- 1
ao.df$cluster2[ao.df$cluster2 > 1] <- 2
target.hrate <- .3
for(i in seq_along(hrates)) {
if (hrates[i] < .2 & smeds[i] < .45) {
ao.df$cluster2[nna][bs.start[i]:bs.end[i]] <- 2
ao.df$uni[nna][bs.start[i]:bs.end[i]] <- TRUE
}
}
## L2R renormalizations
tmsg("Normalizing L2R data ...")
smo <- round(nrow(ao.df) / 550)
if(smo%%2 == 0) smo <- smo+1
### Wave
if (wave.renorm) {
tmsg("Wave re-normalization ...")
ren.res <- renorm.go(input.data = ao.df, renorm.rda = wave.rda, track.type = "Wave", smo = smo, arraytype = arraytype, genome = genome)
ao.df <- ren.res$data
fitted.l2r <- ren.res$renorm$l2r$l2r
if(is.null(ren.res$renorm$pos)) {
meta.b <- setmeta("wave.renorm", "None", meta.b)
tmsg(" No positive fit.")
} else {
## Tweaking sex chromosomes
sex.idx <- ao.df$chr %in% sex.chr
auto.ori.med <- median(ao.df$L2R[!sex.idx], na.rm = TRUE)
auto.rn.med <- median(fitted.l2r[!sex.idx], na.rm = TRUE)
if (any(sex.idx)) {
for (k in sex.chr) {
k.idx <- ao.df$chr == k
if (any(k.idx)) {
k.ori.diffmed <- median(ao.df$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.rn.diffmed <- median(fitted.l2r[k.idx], na.rm = TRUE) - auto.rn.med
fitted.l2r[k.idx] <- fitted.l2r[k.idx] - k.rn.diffmed + k.ori.diffmed
}
}
}
meta.b <- setmeta("wave.renorm", paste0(ren.res$mrenorm$pos, collapse = ","), meta.b)
}
rm(ren.res)
ao.df[["L2R.Wave"]] <- fitted.l2r - median(fitted.l2r, na.rm = TRUE)
ao.df$L2R <- ao.df[["L2R.Wave"]]
} else {
meta.b <- setmeta("wave.renorm", "FALSE", meta.b)
}
### GC
if (gc.renorm) {
tmsg("GC renormalization ...")
ren.res <- renorm.go(input.data = ao.df, renorm.rda = gc.rda, track.type = "GC", smo = smo, arraytype = arraytype, genome = genome)
ao.df <- ren.res$data
fitted.l2r <- ren.res$renorm$l2r$l2r
if(is.null(ren.res$renorm$pos)) {
meta.b <- setmeta("gc.renorm", "None", meta.b)
tmsg(" No positive fit.")
} else {
## Tweaking sex chromosomes
sex.idx <- ao.df$chr %in% sex.chr
auto.ori.med <- median(ao.df$L2R[!sex.idx], na.rm = TRUE)
auto.rn.med <- median(fitted.l2r[!sex.idx], na.rm = TRUE)
if (any(sex.idx)) {
for (k in sex.chr) {
k.idx <- ao.df$chr == k
if (any(k.idx)) {
k.ori.diffmed <- median(ao.df$L2R.ori[k.idx], na.rm = TRUE) - auto.ori.med
k.rn.diffmed <- median(fitted.l2r[k.idx], na.rm = TRUE) - auto.rn.med
fitted.l2r[k.idx] <- fitted.l2r[k.idx] - k.rn.diffmed + k.ori.diffmed
}
}
}
meta.b <- setmeta("gc.renorm", paste0(ren.res$renorm$pos, collapse = ","), meta.b)
}
rm(ren.res)
ao.df[["L2R.GC"]] <- fitted.l2r - median(fitted.l2r, na.rm = TRUE)
ao.df$L2R <- ao.df[["L2R.GC"]]
} else {
meta.b <- setmeta("gc.renorm", "FALSE", meta.b)
}
## Rough median-centering of L2R
ao.df$L2R <- ao.df$L2R - median(ao.df$L2R, na.rm = TRUE)
## Identifying gaps and clustering chromosomal portions
gaps <- which(diff(ao.df$pos) >= mingap)
kends <- vapply(unique(ao.df$Chromosome), function(k) { max(which(ao.df$Chromosome == k)) }, 1L)
kbreaks <- sort(unique(c(gaps, kends)))
ao.df$chrgap <- rep(seq_along(kbreaks), times = c(kbreaks[1], diff(kbreaks)))
## Building ASCAT-like object
tmsg("Building normalized object ...")
my.ascat.obj <- list(
data = list(
Tumor_LogR.ori = data.frame(sample = ao.df$L2R.ori, row.names = ao.df$ProbeSetName),
Tumor_LogR = data.frame(sample = ao.df$L2R, row.names = ao.df$ProbeSetName),
Tumor_BAF = data.frame(sample = ao.df$BAF, row.names = ao.df$ProbeSetName),
Tumor_AD = data.frame(sample = ao.df[["Allele Difference"]], row.names = ao.df$ProbeSetName),
Tumor_LogR_segmented = NULL,
Tumor_BAF_segmented = NULL,
Germline_LogR = NULL,
Germline_BAF = NULL,
SNPpos = data.frame(chrs = ao.df$chr, pos = ao.df$pos, row.names = ao.df$ProbeSetName),
ch = sapply(unique(ao.df$chr), function(x) { which(ao.df$chr == x) }),
chr = sapply(unique(ao.df$chrgap), function(x) { which(ao.df$chrgap == x) }),
chrs = unique(ao.df$chr),
samples = samplename,
gender = as.vector(meta.b$predicted.gender),
sexchromosomes = sex.chr,
failedarrays = NULL
),
germline = list(
germlinegenotypes = matrix(as.logical(abs(ao.df$cluster2 - 2L)), ncol = 1),
failedarrays = NULL
)
)
colnames(my.ascat.obj$germline$germlinegenotypes) <- colnames(my.ascat.obj$data$Tumor_LogR) <- colnames(my.ascat.obj$data$Tumor_LogR.ori) <- colnames(my.ascat.obj$data$Tumor_BAF) <- samplename
my.ascat.obj$data$Tumor_BAF.unscaled = data.frame(sample = ao.df$BAF.unscaled, row.names = ao.df$ProbeSetName)
colnames(my.ascat.obj$data$Tumor_BAF.unscaled) <- samplename
my.ascat.obj$data$Tumor_BAF.unisomy = data.frame(sample = ao.df$uni, row.names = ao.df$ProbeSetName)
colnames(my.ascat.obj$data$Tumor_BAF.unisomy) <- samplename
rownames(my.ascat.obj$germline$germlinegenotypes) <- ao.df$ProbeSetName
genopos <- ao.df$pos + cs$chromosomes$chr.length.toadd[ao.df$chrN]
rm(ao.df)
gc()
## Adding meta
my.ascat.obj$meta = list(
basic = meta.b,
affy = affy.meta
)
## Adding CEL intensities
my.ascat.obj$CEL = list(
CEL1 = affxparser::readCel(filename = CEL)
)
my.ascat.obj$CEL$CEL1$intensities <- as.integer(my.ascat.obj$CEL$CEL1$intensities)
if(write.data) saveRDS(my.ascat.obj, paste0(out.dir, "/", samplename, "/", samplename, "_", arraytype, "_", genome, "_processed.RDS"), compress = "bzip2")
## PLOT
if (plot) {
tmsg("Plotting ...")
kend <- genopos[vapply(my.ascat.obj$data$ch, max, 1L)]
l2r.notna <- which(!is.na(my.ascat.obj$data$Tumor_LogR[,1]))
l2r.rm <- runmed(my.ascat.obj$data$Tumor_LogR[,1][l2r.notna], smo)
l2r.ori.rm <- runmed(my.ascat.obj$data$Tumor_LogR.ori[,1][l2r.notna], smo)
png(paste0(out.dir, "/", samplename, "/", samplename, "_", arraytype, "_", genome, "_rawplot.png"), 1600, 1050)
par(mfrow = c(3,1))
plot(genopos, my.ascat.obj$data$Tumor_LogR.ori[,1], pch = ".", cex = 3, col = "grey75", xaxs = "i", yaxs = "i", ylim = c(-2,2), main = paste0(samplename, " ", arraytype, " raw L2R profile (median-centered) / ", round(sum(abs(diff(l2r.ori.rm))), digits = 3)), xlab = "Genomic position", ylab = "L2R")
lines(genopos[l2r.notna], l2r.ori.rm, col = 1)
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = 0, col = 2, lty = 2, lwd = 2)
plot(genopos, my.ascat.obj$data$Tumor_LogR[,1], pch = ".", cex = 3, col = "grey75", xaxs = "i", yaxs = "i", ylim = c(-2,2), main = paste0(samplename, " ", arraytype, " L2R profile (", l2r.level, ", median-centered)) / ", round(sum(abs(diff(l2r.rm))), digits = 3)), xlab = "Genomic position", ylab = "L2R")
lines(genopos[l2r.notna], l2r.rm, col = 1)
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = 0, col = 2, lty = 2, lwd = 2)
plot(genopos, my.ascat.obj$data$Tumor_BAF[,1], pch = ".", cex = 3, col = "grey80", xaxs = "i", yaxs = "i", ylim = c(0,1), main = paste0(samplename, " ", arraytype, " BAF profile"), xlab = "Genomic position", ylab = "BAF")
points(genopos[my.ascat.obj$germline$germlinegenotypes[,1] == 0], my.ascat.obj$data$Tumor_BAF[my.ascat.obj$germline$germlinegenotypes[,1] == 0,1], pch = ".", cex = 3, col = "grey25")
# plot(ao.df$genopos, my.ascat.obj$data$Tumor_BAF[,1], pch = ".", cex = 3, col = ao.df$ForcedCall-5, xaxs = "i", yaxs = "i", ylim = c(0,1), main = paste0(samplename, " ", arraytype, " BAF profile"), xlab = "Genomic position", ylab = "BAF")
abline(v = kend, col = 4, lty = 3, lwd = 2)
abline(h = .5, col = 2, lty = 2, lwd = 2)
dev.off()
}
## Cleaning
if(!oschp.keep) {
tmsg("Removing temporary OSCHP file ...")
file.remove(oscf)
}
tmsg("Done.")
gc()
if(return.data) return(my.ascat.obj)
}
SNP6.Process.Batch <- function(CEL.list.file = NULL, nthread = 1, cluster.type = "PSOCK", ...) {
## Checking the CEL.list.file
if (is.null(CEL.list.file)) stop("A CEL.list.file is required !", call. = FALSE)
if (!file.exists(CEL.list.file)) stop("Could not find CEL.list.file !", call. = FALSE)
message("Reading and checking CEL.list.file ...")
myCELs <- read.table(file = CEL.list.file, header = TRUE, sep="\t", check.names = FALSE, as.is = TRUE)
head.ok <- c("cel_files", "SampleName")
head.chk <- all(colnames(CEL.list.file) == head.ok)
if (!head.chk) {
message("Invalid header in CEL.list.file !")
message(paste0("EXPECTED : ", head.ok))
message(paste0("FOUND : ", colnames(myCELs)))
stop("Invalid header.", call. = FALSE)
}
sn.chk <- duplicated(myCELs$SampleName)
if (any(sn.chk)) {
message("CEL.list.file contains duplicated SampleNames !")
message(myCELs$SampleName[which(duplicated(myCELs$SampleName))])
stop("Duplicated SampleNames.", call. = FALSE)
}
fecheck <- !vapply(myCELs$cel_files, file.exists, TRUE)
fecheck.pos <- which(fecheck)
if (length(fecheck.pos) > 0) stop(paste0("\n", "CEL file could not be found : ", myCELs$cel_files[fecheck.pos], collapse = ""), call. = FALSE)
message(paste0("Found ", nrow(myCELs), " samples to process."))
## Adjusting cores/threads
message("Adjusting number of threads if needed ...")
avail.cores <- parallel::detectCores(logical = TRUE)
if (is.null(nthread)) { nthread <- avail.cores -1; message(paste0("Reset nthread to ", nthread)) }
if (nrow(myCELs) < nthread) { nthread <- nrow(myCELs); message(paste0("Reset nthread to ", nthread)) }
if (avail.cores <= nthread) message(paste0(" WARNING : nthread set to ", nthread, " while available logical threads number is ", avail.cores, " !"))
## Building cluster
current.bitmapType <- getOption("bitmapType")
`%dopar%` <- foreach::"%dopar%"
`%do%` <- foreach::"%do%"
cl <- parallel::makeCluster(spec = nthread, type = cluster.type, outfile = "")
doParallel::registerDoParallel(cl)
p <- 0
s6res <- foreach::foreach(p = seq_len(nrow(myCELs)), .inorder = FALSE, .errorhandling = "stop") %dopar% {
EaCoN.set.bitmapType(type = current.bitmapType)
SNP6.Process(CEL = myCELs$cel_files[p], samplename = myCELs$SampleName[p], return.data = FALSE, ...)
}
## Stopping cluster
message("Stopping cluster ...")
parallel::stopCluster(cl)
message("Done.")
}
|
## These functions written in partial fulfillment of Coursera Data Science: R Programming
## github- HemantSingh0502
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { ## define the argument with default mode of "matrix"
inv <- NULL ## initialize inv as NULL; will hold value of matrix inverse
set <- function(y) { ## define the set function to assign new
x <<- y ## value of matrix in parent environment
inv <<- NULL ## if there is a new matrix, reset inv to NULL
}
get <- function() x ## define the get fucntion - returns value of the matrix argument
setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment
getinverse <- function() inv ## gets the value of inv where called
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## you need this in order to refer
## to the functions with the $ operator
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve will retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | HemantSingh0502/ProgrammingAssignment2 | R | false | false | 1,738 | r | ## These functions written in partial fulfillment of Coursera Data Science: R Programming
## github- HemantSingh0502
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { ## define the argument with default mode of "matrix"
inv <- NULL ## initialize inv as NULL; will hold value of matrix inverse
set <- function(y) { ## define the set function to assign new
x <<- y ## value of matrix in parent environment
inv <<- NULL ## if there is a new matrix, reset inv to NULL
}
get <- function() x ## define the get fucntion - returns value of the matrix argument
setinverse <- function(inverse) inv <<- inverse ## assigns value of inv in parent environment
getinverse <- function() inv ## gets the value of inv where called
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ## you need this in order to refer
## to the functions with the $ operator
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve will retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of PredictionComparison
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
library("testthat")
x1 <- runif(1000)
x2 <- runif(1000)
y <- sample(0:1, 1000, TRUE)
model1 <- list(prediction=data.frame(rowId=1:1000,
subjectId=1:1000,
cohortStartDate=rep('2010-01-01',1000),
value=x1,
outcomeCount=y))
model2 <- list(prediction=data.frame(rowId=1:1000,
subjectId=1:1000,
cohortStartDate=rep('2010-01-01',1000),
value=x2,
outcomeCount=y))
context("Performance Measures")
test_that("IDI", {
ourIDI <- PredictionComparison::IDI(model2, model1)$IDI
existingIDI <- Hmisc::improveProb(x1, x2, y)$idi
tolerance <- 0.001
testthat::expect_equal(ourIDI, as.numeric(existingIDI), tolerance = tolerance)
ourZ <- PredictionComparison::IDI(model2, model1)$z
existingZ <-Hmisc::improveProb(x1, x2, y)$z.idi
testthat::expect_equal(ourZ, as.numeric(existingZ), tolerance = tolerance)
})
test_that("NRI", {
temp <- PredictionComparison::NRI(model2, model1, thresholds = 0.5)
ourNRI <- temp[[1]]$nri
temp2 <- nricens::nribin(event = y, p.std = x1, p.new = x2, cut = 0.5)
existingNRI <- temp2$nri[1,1]
tolerance <- 0.001
testthat::expect_equal(ourNRI, existingNRI, tolerance = tolerance)
})
test_that("NRI2", {
temp <- PredictionComparison::NRI2(model2, model1)
ourNRI <- temp$nri
ourNRIZ <- temp$z
temp2 <- Hmisc::improveProb(x1, x2, y)
existingNRI <- temp2$nri
existingNRIZ <- temp2$z.nri
tolerance <- 0.001
testthat::expect_equal(ourNRI, as.numeric(existingNRI), tolerance = tolerance)
testthat::expect_equal(ourNRIZ, as.numeric(existingNRIZ), tolerance = tolerance)
})
| /tests/testthat/test-metric.R | permissive | OHDSI/PredictionComparison | R | false | false | 2,485 | r | # Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of PredictionComparison
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
library("testthat")
x1 <- runif(1000)
x2 <- runif(1000)
y <- sample(0:1, 1000, TRUE)
model1 <- list(prediction=data.frame(rowId=1:1000,
subjectId=1:1000,
cohortStartDate=rep('2010-01-01',1000),
value=x1,
outcomeCount=y))
model2 <- list(prediction=data.frame(rowId=1:1000,
subjectId=1:1000,
cohortStartDate=rep('2010-01-01',1000),
value=x2,
outcomeCount=y))
context("Performance Measures")
test_that("IDI", {
ourIDI <- PredictionComparison::IDI(model2, model1)$IDI
existingIDI <- Hmisc::improveProb(x1, x2, y)$idi
tolerance <- 0.001
testthat::expect_equal(ourIDI, as.numeric(existingIDI), tolerance = tolerance)
ourZ <- PredictionComparison::IDI(model2, model1)$z
existingZ <-Hmisc::improveProb(x1, x2, y)$z.idi
testthat::expect_equal(ourZ, as.numeric(existingZ), tolerance = tolerance)
})
test_that("NRI", {
temp <- PredictionComparison::NRI(model2, model1, thresholds = 0.5)
ourNRI <- temp[[1]]$nri
temp2 <- nricens::nribin(event = y, p.std = x1, p.new = x2, cut = 0.5)
existingNRI <- temp2$nri[1,1]
tolerance <- 0.001
testthat::expect_equal(ourNRI, existingNRI, tolerance = tolerance)
})
test_that("NRI2", {
temp <- PredictionComparison::NRI2(model2, model1)
ourNRI <- temp$nri
ourNRIZ <- temp$z
temp2 <- Hmisc::improveProb(x1, x2, y)
existingNRI <- temp2$nri
existingNRIZ <- temp2$z.nri
tolerance <- 0.001
testthat::expect_equal(ourNRI, as.numeric(existingNRI), tolerance = tolerance)
testthat::expect_equal(ourNRIZ, as.numeric(existingNRIZ), tolerance = tolerance)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.