content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fwrite_fwf.R
\name{fwrite_fwf}
\alias{fwrite_fwf}
\alias{fwrite_fwf,data.frame,character,StfwfSchema-method}
\title{Fast write a fixed-width file.}
\usage{
fwrite_fwf(
data,
filename,
StfwfSchema,
validate = FALSE,
justify = "left",
...
)
\S4method{fwrite_fwf}{data.frame,character,StfwfSchema}(
data,
filename,
StfwfSchema,
validate = FALSE,
justify = "right",
...
)
}
\arguments{
\item{data}{\linkS4class{data.table} with the data to write.}
\item{filename}{Character vector of length 1 with the name of the file to write.}
\item{StfwfSchema}{Object of class \linkS4class{StfwfSchema} with the schema of the file.}
\item{validate}{Logical vector of length 1 with default value \code{FALSE} to indicate whether to
validate the content of \code{data} before writing.}
\item{justify}{Character vector of length 1 with default value \code{left} to indicate whether to
justify strings to the left or to the right.}
\item{...}{Other parameters from \code{\link[data.table]{fwrite}}.}
}
\value{
Returns an invisible \code{NULL}. The dataset is written in file \code{filename}.
}
\description{
\code{fwrite_fwf} takes as basic input a \linkS4class{data.table} and the schema for
the fwf file to write, concatenates columns accordingly and uses \code{\link[data.table]{fwrite}}
to write the file on disk.
}
\examples{
\dontrun{
# file will be written to working directory
path <- system.file('extdata', package = 'fastReadfwf')
stSchema <- fastReadfwf::xlsxToSchema(file.path(path, 'SchemaSNHS.xlsx'), 'stSchema')
data(MicroDataSNHS)
fwrite_fwf(MicroDataSNHS, file.path(getwd(), 'MicroDataSNHS'), stSchema, justify = 'right')
}
}
\seealso{
\code{\link[data.table]{fwrite}} \code{\link{fread_fwf}]}
}
| /man/fwrite_fwf.Rd | no_license | david-salgado/fastReadfwf | R | false | true | 1,802 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fwrite_fwf.R
\name{fwrite_fwf}
\alias{fwrite_fwf}
\alias{fwrite_fwf,data.frame,character,StfwfSchema-method}
\title{Fast write a fixed-width file.}
\usage{
fwrite_fwf(
data,
filename,
StfwfSchema,
validate = FALSE,
justify = "left",
...
)
\S4method{fwrite_fwf}{data.frame,character,StfwfSchema}(
data,
filename,
StfwfSchema,
validate = FALSE,
justify = "right",
...
)
}
\arguments{
\item{data}{\linkS4class{data.table} with the data to write.}
\item{filename}{Character vector of length 1 with the name of the file to write.}
\item{StfwfSchema}{Object of class \linkS4class{StfwfSchema} with the schema of the file.}
\item{validate}{Logical vector of length 1 with default value \code{FALSE} to indicate whether to
validate the content of \code{data} before writing.}
\item{justify}{Character vector of length 1 with default value \code{left} to indicate whether to
justify strings to the left or to the right.}
\item{...}{Other parameters from \code{\link[data.table]{fwrite}}.}
}
\value{
Returns an invisible \code{NULL}. The dataset is written in file \code{filename}.
}
\description{
\code{fwrite_fwf} takes as basic input a \linkS4class{data.table} and the schema for
the fwf file to write, concatenates columns accordingly and uses \code{\link[data.table]{fwrite}}
to write the file on disk.
}
\examples{
\dontrun{
# file will be written to working directory
path <- system.file('extdata', package = 'fastReadfwf')
stSchema <- fastReadfwf::xlsxToSchema(file.path(path, 'SchemaSNHS.xlsx'), 'stSchema')
data(MicroDataSNHS)
fwrite_fwf(MicroDataSNHS, file.path(getwd(), 'MicroDataSNHS'), stSchema, justify = 'right')
}
}
\seealso{
\code{\link[data.table]{fwrite}} \code{\link{fread_fwf}]}
}
|
library(qqvases)
### Name: qq_plot
### Title: Interactive QQ Plot
### Aliases: qq_plot
### ** Examples
## Not run:
##D qq_plot(rnorm(50))
##D
##D qq_plot(rnorm(50), step=0.05, breaks="Sturges")
##D
##D if(require("MASS", quietly=TRUE)){ qq_plot(geyser$waiting) }
## End(Not run)
| /data/genthat_extracted_code/qqvases/examples/qq_plot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 289 | r | library(qqvases)
### Name: qq_plot
### Title: Interactive QQ Plot
### Aliases: qq_plot
### ** Examples
## Not run:
##D qq_plot(rnorm(50))
##D
##D qq_plot(rnorm(50), step=0.05, breaks="Sturges")
##D
##D if(require("MASS", quietly=TRUE)){ qq_plot(geyser$waiting) }
## End(Not run)
|
gg_circle <- function(r, xc, yc, color="black", fill=NA, ...) {
x <- xc + r*cos(seq(0, pi, length.out=100))
ymax <- yc + r*sin(seq(0, pi, length.out=100))
ymin <- yc + r*sin(seq(0, -pi, length.out=100))
annotate("ribbon", x=x, ymin=ymin, ymax=ymax, color=color, fill=fill, ...)
}
square <- ggplot(data.frame(x=0:10, y=0:10), aes(x=x, y=y)) + theme_light()+
scale_x_continuous(breaks=seq(0, 10, 1),minor_breaks = NULL) +
scale_y_continuous(breaks=seq(0, 10, 1),minor_breaks = NULL)
square + gg_circle(r=4.5, xc=5, yc=5, alpha=0.1)
square + gg_circle(r=0.25, xc=0.5, yc=0.5, color="blue", fill="yellow", alpha=0.1)
| /Useful/PlotCircle.R | no_license | hkshu001/R_Learning | R | false | false | 645 | r | gg_circle <- function(r, xc, yc, color="black", fill=NA, ...) {
x <- xc + r*cos(seq(0, pi, length.out=100))
ymax <- yc + r*sin(seq(0, pi, length.out=100))
ymin <- yc + r*sin(seq(0, -pi, length.out=100))
annotate("ribbon", x=x, ymin=ymin, ymax=ymax, color=color, fill=fill, ...)
}
square <- ggplot(data.frame(x=0:10, y=0:10), aes(x=x, y=y)) + theme_light()+
scale_x_continuous(breaks=seq(0, 10, 1),minor_breaks = NULL) +
scale_y_continuous(breaks=seq(0, 10, 1),minor_breaks = NULL)
square + gg_circle(r=4.5, xc=5, yc=5, alpha=0.1)
square + gg_circle(r=0.25, xc=0.5, yc=0.5, color="blue", fill="yellow", alpha=0.1)
|
####This script serves to get overrepresented GO terms from DE (DESeq2) results
###reading options
options(stringsAsFactors = F)
### graphical options
#options(device='x11')
#grDevices::X11.options(type='cairo')
options(bitmapType='cairo')
###libraries
require(plyr)
require(reshape2)
require(ggplot2)
library(RColorBrewer)
#source("http://bioconductor.org/biocLite.R")
#biocLite("topGO")
library(topGO)
###just in case: color palette
#scale_color_manual(values=c("#000000", "#E69F00", "#56B4E9", "#009E73",
#"#F0E442", "#0072B2", "#D55E00", "#CC79A7"))
#black orange sky_blue green yellow blue vermillion reddish_purple
###############################################################################
###Functions
###This function is for list of genes => enriched GO terms. Supports custom FC cutoff.
##testing set
#filename = "EcyT12_annot.csv"; sep = ","; upORdown = "up"; gocat = "BP"
#logFCthreshold = 1; padj.threshold = 0.001; writeGenes = T
#fa_filename = "~/Documents/transcriptome_annotation/EcyBCdTP1_cor_AnnotationTable.txt"
GOenrichment <- function(filename, sep = ",", upORdown = "up", gocat = "BP",
DEfilename = "",
FA_filename = "", #in case GO annotations aren't included
logFCthreshold = 1, #change to any FC threshold
padj.threshold = 0.05, #change to any adjusted p-value threshold
writeGenes = T) { #also write all genes for every term
full_list <- read.csv(filename, sep = sep)
#rename variables we'll need later (probably...)
full_list$"Sequence.Name" <- row.names(full_list)
annotation <- read.delim(FA_filename)
#merge tables
withAnnotation <- merge(x = annotation, y=full_list, all.x=F, all.y=F , by="Sequence.Name")
#get de genes
defile <- read.csv(DEfilename)
#either upregs
if (upORdown == "up") {
de <- defile[defile$salmon.DESeq2.FC > 0, "Gene"]
##get rid of NAs & get names only
#de <- de[complete.cases(de$padj), "Sequence.Name"]
}
#or downregs
if (upORdown == "down") {
de <- defile[defile$salmon.DESeq2.FC < 0, "Gene"]
}
#get GO terms
if (gocat == "BP") BP <- withAnnotation[,c("Sequence.Name", "GO.Biological.Process")]
if (gocat == "MF") BP <- withAnnotation[,c("Sequence.Name", "GO.Molecular.Function")]
if (gocat == "CC") BP <- withAnnotation[,c("Sequence.Name", "GO.Cellular.Component")]
#get only non-empty ones (I just cannot analyze all the rest)
if (gocat == "BP") BPGO <- BP[BP$GO.Biological.Process != "-",]
if (gocat == "MF")BPGO <- BP[BP$GO.Molecular.Function != "-",]
if (gocat == "CC")BPGO <- BP[BP$GO.Cellular.Component != "-",]
#get all GO terms in machine-readable way (in a list + without names, only numbers)
if (gocat == "BP") GOs <- strsplit(BPGO$GO.Biological.Process, split = "| ", fixed = T)
if (gocat == "MF") GOs <- strsplit(BPGO$GO.Molecular.Function, split = "| ", fixed = T)
if (gocat == "CC") GOs <- strsplit(BPGO$GO.Cellular.Component, split = "| ", fixed = T)
names(GOs) <- BPGO$Sequence.Name
GOsTop <- lapply(X = GOs, function(x) gsub(" .*", "", x)) #remove human-readable name
#get DE genes for the object
DElist <- factor(as.integer(names(GOsTop) %in% de))
names(DElist) <- names(GOsTop)
#construct a GOdat object (topGO package)
GOdata <- new("topGOdata", ontology = gocat, allGenes = DElist, annot = annFUN.gene2GO, gene2GO = GOsTop)
f <- runTest(GOdata, algorithm = "elim", statistic = "fisher")
#from the manual: We like to interpret the p-values returned by these methods as corrected or not affected by multiple testing.
signif_at_0.01 <- sum(f@score < 0.01)
allRes <- GenTable(object = GOdata, f, topNodes = signif_at_0.01, numChar = 100)
if (writeGenes & nrow(allRes)) {
allRes$Genes <- NA
for (i in 1:length(allRes$Genes)) {
temp <- genesInTerm(GOdata, allRes$GO.ID[i])[[1]]
tempde <- temp[temp %in% de]
namestempde <- withAnnotation[withAnnotation$Sequence.Name %in% tempde, "best.hit.to.nr"]
allRes$Genes[i] <- paste(namestempde, collapse = ", ")
}
}
#output
dir.name <- paste0("GO_FC", logFCthreshold, "_padj_", padj.threshold, "_", gocat)
if (!dir.name %in% dir()) dir.create(dir.name)
#full table
names(allRes)[6] <- "p-value"
write.csv(allRes, paste0(filename, "GO", gocat, upORdown, "_all", ".csv"))
## i tried with subdirectories, but they do not work if the original file was not in the same folder
signif_at_0.001 <- sum(f@score < 0.001)
Res <- allRes[1:signif_at_0.001, ] #allRes$`p-value` < 0.001, doesn't work properly
write.csv(Res, paste0(filename, "GO", upORdown, ".csv"))
return(allRes)
}
setwd("/home/drozdovapb/Research/Projects/DE/texts/Paper1_stresses/acetone_phenanthrene_story/CBPD_submit/1-R1/new_DE_comparison/")
#file1 <- "PB03_vs_B03.Ecycommon_DE.csv"
#for (file in dir()) {
# if(grepl("csv", file)) {
# # ## Biological process, logFC cutoff of 3
# # GOenrichment(filename = file, writeGenes = T)
# # GOenrichment(filename = file, upORdown = "down", writeGenes = T)
# # # ## Molecular function, logFC cutoff of 3
# # GOenrichment(filename = file, gocat = "MF", writeGenes = T)
# # GOenrichment(filename = file, upORdown = "down", gocat = "MF", writeGenes = T)
# # ## Biological process, logFC 1
# GOenrichment(filename = file1, gocat = "BP", logFCthreshold = 1, writeGenes = T)
# }}
#filename <- "../new_DE_comparison/salmon/Ecy.isoform.counts.matrix.B24h_vs_PB24.DESeq2.DE_results"
#sep = "\t"
FA_filename = "~/Research/Projects/DE/annotation/EcyBCdTP1_cor_AnnotationTable.txt"
##Ecy, 3h, solvent
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t", writeGenes = T,
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Ecycommon_DE.csv")
##Ecy, 3h, phenanthrene
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Ecycommon_DE.csv")
##Ecy, 24h, solvent
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Ecycommon_DE.csv")
##Ecy, 24h, phenanthrene
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Ecycommon_DE.csv")
FA_filename = "~/Research/Projects/DE/annotation/EveBCdTP1_cor_AnnotationTable.txt"
##Eve, 3h, solvent
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Evecommon_DE.csv")
##Eve, 3h, phenanthrene
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Evecommon_DE.csv")
##Eve, 24h, solvent
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Evecommon_DE.csv")
##Eve, 24h, phenanthrene
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Evecommon_DE.csv")
FA_filename = "~/Research/Projects/DE/annotation/GlaBCdTP1_cor_AnnotationTable.txt"
##Gla, 3h, solvent
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Glacommon_DE.csv")
##Gla, 3h, phenanthrene
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Glacommon_DE.csv")
##Gla, 24h, solvent
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Glacommon_DE.csv")
##Gla, 24h, phenanthrene
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Glacommon_DE.csv")
##MF
################
##CC
################
##CC
FA_filename = "~/Research/Projects/DE/annotation/EcyBCdTP1_cor_AnnotationTable.txt"
##Ecy, 3h, solvent
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t", writeGenes = T,
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Ecycommon_DE.csv")
##Ecy, 3h, phenanthrene
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Ecycommon_DE.csv")
##Ecy, 24h, solvent
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Ecycommon_DE.csv")
##Ecy, 24h, phenanthrene
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Ecycommon_DE.csv")
FA_filename = "~/Research/Projects/DE/annotation/EveBCdTP1_cor_AnnotationTable.txt"
##Eve, 3h, solvent
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Evecommon_DE.csv")
##Eve, 3h, phenanthrene
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Evecommon_DE.csv")
##Eve, 24h, solvent
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Evecommon_DE.csv")
##Eve, 24h, phenanthrene
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Evecommon_DE.csv")
FA_filename = "~/Research/Projects/DE/annotation/GlaBCdTP1_cor_AnnotationTable.txt"
##Gla, 3h, solvent
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Glacommon_DE.csv")
##Gla, 3h, phenanthrene
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Glacommon_DE.csv")
##Gla, 24h, solvent
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Glacommon_DE.csv")
##Gla, 24h, phenanthrene
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Glacommon_DE.csv") | /CBPD_phenanthrene_scripts/GO_enrichment.R | no_license | drozdovapb/EveEcyGlaDE | R | false | false | 18,031 | r | ####This script serves to get overrepresented GO terms from DE (DESeq2) results
###reading options
options(stringsAsFactors = F)
### graphical options
#options(device='x11')
#grDevices::X11.options(type='cairo')
options(bitmapType='cairo')
###libraries
require(plyr)
require(reshape2)
require(ggplot2)
library(RColorBrewer)
#source("http://bioconductor.org/biocLite.R")
#biocLite("topGO")
library(topGO)
###just in case: color palette
#scale_color_manual(values=c("#000000", "#E69F00", "#56B4E9", "#009E73",
#"#F0E442", "#0072B2", "#D55E00", "#CC79A7"))
#black orange sky_blue green yellow blue vermillion reddish_purple
###############################################################################
###Functions
###This function is for list of genes => enriched GO terms. Supports custom FC cutoff.
##testing set
#filename = "EcyT12_annot.csv"; sep = ","; upORdown = "up"; gocat = "BP"
#logFCthreshold = 1; padj.threshold = 0.001; writeGenes = T
#fa_filename = "~/Documents/transcriptome_annotation/EcyBCdTP1_cor_AnnotationTable.txt"
GOenrichment <- function(filename, sep = ",", upORdown = "up", gocat = "BP",
DEfilename = "",
FA_filename = "", #in case GO annotations aren't included
logFCthreshold = 1, #change to any FC threshold
padj.threshold = 0.05, #change to any adjusted p-value threshold
writeGenes = T) { #also write all genes for every term
full_list <- read.csv(filename, sep = sep)
#rename variables we'll need later (probably...)
full_list$"Sequence.Name" <- row.names(full_list)
annotation <- read.delim(FA_filename)
#merge tables
withAnnotation <- merge(x = annotation, y=full_list, all.x=F, all.y=F , by="Sequence.Name")
#get de genes
defile <- read.csv(DEfilename)
#either upregs
if (upORdown == "up") {
de <- defile[defile$salmon.DESeq2.FC > 0, "Gene"]
##get rid of NAs & get names only
#de <- de[complete.cases(de$padj), "Sequence.Name"]
}
#or downregs
if (upORdown == "down") {
de <- defile[defile$salmon.DESeq2.FC < 0, "Gene"]
}
#get GO terms
if (gocat == "BP") BP <- withAnnotation[,c("Sequence.Name", "GO.Biological.Process")]
if (gocat == "MF") BP <- withAnnotation[,c("Sequence.Name", "GO.Molecular.Function")]
if (gocat == "CC") BP <- withAnnotation[,c("Sequence.Name", "GO.Cellular.Component")]
#get only non-empty ones (I just cannot analyze all the rest)
if (gocat == "BP") BPGO <- BP[BP$GO.Biological.Process != "-",]
if (gocat == "MF")BPGO <- BP[BP$GO.Molecular.Function != "-",]
if (gocat == "CC")BPGO <- BP[BP$GO.Cellular.Component != "-",]
#get all GO terms in machine-readable way (in a list + without names, only numbers)
if (gocat == "BP") GOs <- strsplit(BPGO$GO.Biological.Process, split = "| ", fixed = T)
if (gocat == "MF") GOs <- strsplit(BPGO$GO.Molecular.Function, split = "| ", fixed = T)
if (gocat == "CC") GOs <- strsplit(BPGO$GO.Cellular.Component, split = "| ", fixed = T)
names(GOs) <- BPGO$Sequence.Name
GOsTop <- lapply(X = GOs, function(x) gsub(" .*", "", x)) #remove human-readable name
#get DE genes for the object
DElist <- factor(as.integer(names(GOsTop) %in% de))
names(DElist) <- names(GOsTop)
#construct a GOdat object (topGO package)
GOdata <- new("topGOdata", ontology = gocat, allGenes = DElist, annot = annFUN.gene2GO, gene2GO = GOsTop)
f <- runTest(GOdata, algorithm = "elim", statistic = "fisher")
#from the manual: We like to interpret the p-values returned by these methods as corrected or not affected by multiple testing.
signif_at_0.01 <- sum(f@score < 0.01)
allRes <- GenTable(object = GOdata, f, topNodes = signif_at_0.01, numChar = 100)
if (writeGenes & nrow(allRes)) {
allRes$Genes <- NA
for (i in 1:length(allRes$Genes)) {
temp <- genesInTerm(GOdata, allRes$GO.ID[i])[[1]]
tempde <- temp[temp %in% de]
namestempde <- withAnnotation[withAnnotation$Sequence.Name %in% tempde, "best.hit.to.nr"]
allRes$Genes[i] <- paste(namestempde, collapse = ", ")
}
}
#output
dir.name <- paste0("GO_FC", logFCthreshold, "_padj_", padj.threshold, "_", gocat)
if (!dir.name %in% dir()) dir.create(dir.name)
#full table
names(allRes)[6] <- "p-value"
write.csv(allRes, paste0(filename, "GO", gocat, upORdown, "_all", ".csv"))
## i tried with subdirectories, but they do not work if the original file was not in the same folder
signif_at_0.001 <- sum(f@score < 0.001)
Res <- allRes[1:signif_at_0.001, ] #allRes$`p-value` < 0.001, doesn't work properly
write.csv(Res, paste0(filename, "GO", upORdown, ".csv"))
return(allRes)
}
setwd("/home/drozdovapb/Research/Projects/DE/texts/Paper1_stresses/acetone_phenanthrene_story/CBPD_submit/1-R1/new_DE_comparison/")
#file1 <- "PB03_vs_B03.Ecycommon_DE.csv"
#for (file in dir()) {
# if(grepl("csv", file)) {
# # ## Biological process, logFC cutoff of 3
# # GOenrichment(filename = file, writeGenes = T)
# # GOenrichment(filename = file, upORdown = "down", writeGenes = T)
# # # ## Molecular function, logFC cutoff of 3
# # GOenrichment(filename = file, gocat = "MF", writeGenes = T)
# # GOenrichment(filename = file, upORdown = "down", gocat = "MF", writeGenes = T)
# # ## Biological process, logFC 1
# GOenrichment(filename = file1, gocat = "BP", logFCthreshold = 1, writeGenes = T)
# }}
#filename <- "../new_DE_comparison/salmon/Ecy.isoform.counts.matrix.B24h_vs_PB24.DESeq2.DE_results"
#sep = "\t"
FA_filename = "~/Research/Projects/DE/annotation/EcyBCdTP1_cor_AnnotationTable.txt"
##Ecy, 3h, solvent
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t", writeGenes = T,
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Ecycommon_DE.csv")
##Ecy, 3h, phenanthrene
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Ecycommon_DE.csv")
##Ecy, 24h, solvent
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Ecycommon_DE.csv")
##Ecy, 24h, phenanthrene
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Ecycommon_DE.csv")
FA_filename = "~/Research/Projects/DE/annotation/EveBCdTP1_cor_AnnotationTable.txt"
##Eve, 3h, solvent
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Evecommon_DE.csv")
##Eve, 3h, phenanthrene
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Evecommon_DE.csv")
##Eve, 24h, solvent
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Evecommon_DE.csv")
##Eve, 24h, phenanthrene
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Evecommon_DE.csv")
FA_filename = "~/Research/Projects/DE/annotation/GlaBCdTP1_cor_AnnotationTable.txt"
##Gla, 3h, solvent
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Glacommon_DE.csv")
##Gla, 3h, phenanthrene
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Glacommon_DE.csv")
##Gla, 24h, solvent
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Glacommon_DE.csv")
##Gla, 24h, phenanthrene
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "BP", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Glacommon_DE.csv")
##MF
################
##CC
################
##CC
FA_filename = "~/Research/Projects/DE/annotation/EcyBCdTP1_cor_AnnotationTable.txt"
##Ecy, 3h, solvent
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t", writeGenes = T,
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Ecycommon_DE.csv")
##Ecy, 3h, phenanthrene
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Ecycommon_DE.csv")
##Ecy, 24h, solvent
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Ecycommon_DE.csv")
##Ecy, 24h, phenanthrene
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Ecycommon_DE.csv")
GOenrichment(filename = "salmon/Ecy.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Ecycommon_DE.csv")
FA_filename = "~/Research/Projects/DE/annotation/EveBCdTP1_cor_AnnotationTable.txt"
##Eve, 3h, solvent
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Evecommon_DE.csv")
##Eve, 3h, phenanthrene
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Evecommon_DE.csv")
##Eve, 24h, solvent
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Evecommon_DE.csv")
##Eve, 24h, phenanthrene
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Evecommon_DE.csv")
GOenrichment(filename = "salmon/Eve.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Evecommon_DE.csv")
FA_filename = "~/Research/Projects/DE/annotation/GlaBCdTP1_cor_AnnotationTable.txt"
##Gla, 3h, solvent
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB03_vs_B03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB03_vs_B03.Glacommon_DE.csv")
##Gla, 3h, phenanthrene
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph03_vs_PB03.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph03_vs_PB03.Glacommon_DE.csv")
##Gla, 24h, solvent
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "up", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.PB24_vs_B24h.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "PB24_vs_B24h.Glacommon_DE.csv")
##Gla, 24h, phenanthrene
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Glacommon_DE.csv")
GOenrichment(filename = "salmon/Gla.isoform.counts.matrix.Ph24_vs_PB24.DESeq2.DE_results",
upORdown = "down", gocat = "CC", sep = "\t",
FA_filename = FA_filename, DEfilename = "Ph24_vs_PB24.Glacommon_DE.csv") |
#' findmass
#'
#' see if any features match a given mass, and whether they are plausibly M0
#' @details a convenience function to perform a targeted search of all feaures for a mass of interest. Also performs a crude plausibility check as to whether the matched feature could be M0, based on the assumption of approximately 1 carbon per 17 m/z units and natural isottopic abundance of 1.1% 13C. Note that this function returns the cluster to which the feature is assigned, but that the M0_plausibility is independent of cluster membership.
#'
#' @param ramclustObj R object: the ramclustR object to explore
#' @param mz numeric: mz value to search for
#' @param mztol numeric: absolute mass tolerance around mz
#' @param rttol numeric: when examining isotope patterns, feaure retention time tolerance around features matching mz +- mztol
#' @param zmax integer: maximum charge state to consider. default is 6.
#' @param m.check logical: check whether the matching masses are plausibly M0. That is, we look for ions 1 proton mass (from charge state 1:zmax) below the target m/z at the same time that have intensities consistent with target ion being a non-M0 isotope.
#' @return returns a table to the console listing masses which match, their retention time and intensity, and whether it appears to be plausible as M0
#' @keywords 'ramclustR' 'RAMClustR', 'ramclustR', 'metabolomics', 'mass spectrometry', 'clustering', 'feature', 'xcms', 'MSFinder'
#' @author Corey Broeckling
#' @export
findmass <- function (ramclustObj = NULL,
mz = NULL,
mztol = 0.02,
rttol = 2,
zmax = 6,
m.check = TRUE)
{
if (is.null(mz)) {
stop("must set 'mz'", "\n")
}
if (is.null(mztol)) {
stop("must set 'mztol'", "\n")
}
tar <- which(abs(ramclustObj$fmz - mz) <= mztol)
if (length(tar) == 0) {
out <- data.frame(featn = NA, featclus = NA,
mz = NA, rt = NA,
int = NA, M0_plausible = NA)
out<-out[0,]
}
else {
out <- data.frame(featn = tar, featclus = ramclustObj$featclus[tar],
mz = ramclustObj$fmz[tar], rt = ramclustObj$frt[tar],
int = ramclustObj$msint[tar], M0_plausible = rep(NA,
length(tar)))
if (m.check) {
for (i in 1:length(tar)) {
check <- vector()
for (j in 1:zmax) {
check1 <- which((abs(ramclustObj$fmz - mz +
1.007276) <= mztol) & (abs(ramclustObj$frt -
ramclustObj$frt[tar[i]]) <= rttol))
check <- unique(c(check, check1))
}
if (length(check) > 0) {
negrange <- c(0.5, 2) * (ramclustObj$msint[tar[i]]/((ramclustObj$fmz[tar[i]]/17) *
0.011))
out[i, "M0_plausible"] <- !any(ramclustObj$msint[check] >
negrange[1] & ramclustObj$msint[check] <
negrange[2])
}
else {
out[i, "M0_plausible"] <- TRUE
}
}
}
}
return(out)
}
| /R/findmass.R | no_license | inambioinfo/RAMClustR | R | false | false | 3,313 | r | #' findmass
#'
#' see if any features match a given mass, and whether they are plausibly M0
#' @details a convenience function to perform a targeted search of all feaures for a mass of interest. Also performs a crude plausibility check as to whether the matched feature could be M0, based on the assumption of approximately 1 carbon per 17 m/z units and natural isottopic abundance of 1.1% 13C. Note that this function returns the cluster to which the feature is assigned, but that the M0_plausibility is independent of cluster membership.
#'
#' @param ramclustObj R object: the ramclustR object to explore
#' @param mz numeric: mz value to search for
#' @param mztol numeric: absolute mass tolerance around mz
#' @param rttol numeric: when examining isotope patterns, feaure retention time tolerance around features matching mz +- mztol
#' @param zmax integer: maximum charge state to consider. default is 6.
#' @param m.check logical: check whether the matching masses are plausibly M0. That is, we look for ions 1 proton mass (from charge state 1:zmax) below the target m/z at the same time that have intensities consistent with target ion being a non-M0 isotope.
#' @return returns a table to the console listing masses which match, their retention time and intensity, and whether it appears to be plausible as M0
#' @keywords 'ramclustR' 'RAMClustR', 'ramclustR', 'metabolomics', 'mass spectrometry', 'clustering', 'feature', 'xcms', 'MSFinder'
#' @author Corey Broeckling
#' @export
findmass <- function (ramclustObj = NULL,
mz = NULL,
mztol = 0.02,
rttol = 2,
zmax = 6,
m.check = TRUE)
{
if (is.null(mz)) {
stop("must set 'mz'", "\n")
}
if (is.null(mztol)) {
stop("must set 'mztol'", "\n")
}
tar <- which(abs(ramclustObj$fmz - mz) <= mztol)
if (length(tar) == 0) {
out <- data.frame(featn = NA, featclus = NA,
mz = NA, rt = NA,
int = NA, M0_plausible = NA)
out<-out[0,]
}
else {
out <- data.frame(featn = tar, featclus = ramclustObj$featclus[tar],
mz = ramclustObj$fmz[tar], rt = ramclustObj$frt[tar],
int = ramclustObj$msint[tar], M0_plausible = rep(NA,
length(tar)))
if (m.check) {
for (i in 1:length(tar)) {
check <- vector()
for (j in 1:zmax) {
check1 <- which((abs(ramclustObj$fmz - mz +
1.007276) <= mztol) & (abs(ramclustObj$frt -
ramclustObj$frt[tar[i]]) <= rttol))
check <- unique(c(check, check1))
}
if (length(check) > 0) {
negrange <- c(0.5, 2) * (ramclustObj$msint[tar[i]]/((ramclustObj$fmz[tar[i]]/17) *
0.011))
out[i, "M0_plausible"] <- !any(ramclustObj$msint[check] >
negrange[1] & ramclustObj$msint[check] <
negrange[2])
}
else {
out[i, "M0_plausible"] <- TRUE
}
}
}
}
return(out)
}
|
library(XLConnect)
### Name: col2idx
### Title: Converting Excel column names to indices
### Aliases: col2idx
### Keywords: utilities
### ** Examples
col2idx(c("A", "BTG"))
| /data/genthat_extracted_code/XLConnect/examples/col2idx.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 180 | r | library(XLConnect)
### Name: col2idx
### Title: Converting Excel column names to indices
### Aliases: col2idx
### Keywords: utilities
### ** Examples
col2idx(c("A", "BTG"))
|
rm(list = ls())
setwd("~/Desktop/Lab R-Geography ")
#library(sf)
library(tmap)
library(dplyr)
library(rgdal) #to import shapefiles
library(broom) #to convert shapefiles into the data frame structure we need
library(haven)
library(ggmap)
#EXAMPLE I: Downloading a file
download.file("http://biogeo.ucdavis.edu/data/gadm2.8/rds/ITA_adm1.rds",
"ITA_adm1.rds", mode = "wb")
italy_sp = readRDS("ITA_adm1.rds")
#EXERCISE I: Try downloading another country of your choice
download.file("http://biogeo.ucdavis.edu/data/gadm2.8/rds/FRA_adm1.rds",
"FRA_adm1.rds", mode = "wb")
france_sp = readRDS("FRA_adm1.rds")
#EXERCISE II
# Print france_sp
__(___)
# Call summary() on france_sp
___(___)
#check the structure of the data
___@___
# Call plot() on france_sp
plot(____)
# Call str on france_sp
str(france_sp)
#EXERCISE IV
#Try running the following code
str(france_sp, max.level = 2)
str(france_sp@polygons, max.level=2)
# Taking a deeper look:
tenth <- france_sp@polygons[[10]]
str(tenth, max.level = 2)
#EXERCISE V
# Call head() and str() on the data slot of countries_spdf
head(france_sp@data)
str(france_sp@data)
# Pull out the name column using $
france_sp$NAME_1
# Pull out the subregion column using [[
france_sp[["NAME_1"]]
# Subsetting
france_centre<- france_sp$NAME_1 == "_____"
##########################################################
# Let's work with tmap:
library(tmap)
#Let's download the sp file
temp <- tempfile(fileext = ".zip")
download.file("http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/NUTS_2013_01M_SH.zip", temp)
unzip(temp)
#load the data and filter
EU_NUTS = readOGR(dsn = "./NUTS_2013_01M_SH/data", layer = "NUTS_RG_01M_2013")
#Explore the spatialdataframe:
summary(EU_NUTS)
# plot EU_NUTS
plot(EU_NUTS)
#Subset our map
eu_nuts2 <- subset(EU_NUTS, STAT_LEVL_ == 2) # set NUTS level
eu_nuts2
# inomce is a dataframe that contains income data NUTS-2
income<- read_dta("basedatos.dta")
income$region
#Merge eurost to EU_NUTS
nuts_merge<- merge(eu_nuts2, income, by.x="NUTS_ID", by.y="region")
#Call Summary
summary(nuts_merge)
# Choose a variable with col mapped to it
tm_shape(nuts_merge) +
tm_fill(col = "GDPpc2017")
#lets try going back to Madrid
# Since the data is not very good lets move on to Madrid again:
sf_madrid <- readOGR("municipios_y_distritos_madrid.shp")
sf_madrid$municipio_
#We want to keep only municipios in Madrid city
madrid_city<- c( "Madrid-Retiro",
"Madrid-Salamanca",
"Madrid-Centro",
"Madrid-Arganzuela",
"Madrid-Chamart\303\255n",
"Madrid-Tetu\303\241n",
"Madrid-Chamber\303\255",
"Madrid-Fuencarral-El Pardo",
"Madrid-Moncloa-Aravaca",
"Madrid-Latina",
"Madrid-Carabanchel",
"Madrid-Usera",
"Madrid-Puente de Vallecas",
"Madrid-San Blas-Canillejas",
"Madrid-Barajas",
"Madrid-Moratalaz",
"Madrid-Ciudad Lineal",
"Madrid-Hortaleza",
"Madrid-Villaverde",
"Madrid-Villa de Vallecas",
"Madrid-Vic\303\241lvaro")
madrid_city_map <- sf_madrid[sf_madrid$municipio_ %in% madrid_city, ]
#Now we have our sp of Madrid city only
madrid_city_map
plot(madrid_city_map)
# Load back new data from betting project:
betting<- read_dta("mc_income.dta")
betting$distrito
betting$municipality<-case_when(
betting$distrito==1 ~ "Madrid-Arganzuela",
betting$distrito==2 ~ "Madrid-Barajas",
betting$distrito==3 ~ "Madrid-Carabanchel",
betting$distrito==4 ~ "Madrid-Centro",
betting$distrito==5 ~ "Madrid-Chamart\303\255n",
betting$distrito==6 ~ "Madrid-Chamber\303\255",
betting$distrito==7 ~ "Madrid-Ciudad Lineal",
betting$distrito==8 ~ "Madrid-Fuencarral-El Pardo",
betting$distrito==9 ~ "Madrid-Hortaleza",
betting$distrito==10 ~ "Madrid-Latina",
betting$distrito==11 ~ "Madrid-Moncloa-Aravaca",
betting$distrito==12 ~ "Madrid-Moratalaz",
betting$distrito==13 ~ "Madrid-Puente de Vallecas",
betting$distrito==14 ~ "Madrid-Retiro",
betting$distrito==15 ~ "Madrid-Salamanca",
betting$distrito==16 ~ "Madrid-San Blas-Canillejas",
betting$distrito==17 ~ "Madrid-Tetu\303\241n",
betting$distrito==18 ~ "Madrid-Usera",
betting$distrito==19 ~ "Madrid-Vic\303\241lvaro",
betting$distrito==20 ~ "Madrid-Villa de Vallecas",
betting$distrito==21 ~ "Madrid-Villaverde")
# Check for duplicates
any(duplicated(betting$municipality))
any(duplicated(madrid_city_map$municipio_))
# Mergin the two datasets:
madrid_merge<- merge(____, ____, by.x="_____", by.y="______")
# fast way:
spplot(madrid_merge, "renta")
spplot(madrid_merge, "renta", main = "Income")
#THIS DOESNT WORK
#ggmap(madrid_map) +
# geom_sf(data = madrid_merge ,aes(fill=renta), inherit.aes = FALSE) #error
library(sp)
library(tmap)
# Plot from last exercise
tm_shape(madrid_merge) +
tm_fill(col = "renta")
# Save a static version "population.png"
tmap_save(filename = "madrid_income.png")
# Add style argument to the tm_fill() call
tm_shape(france_sp) +
tm_fill(col = "", style="") +
# Add a tm_borders() layer
tm_borders(col="")
# New plot, with tm_bubbles() instead of tm_fill()
tm_shape(countries_spdf) +
tm_bubbles(size = "population", style="quantile") +
# Add a tm_borders() layer
tm_borders(col="burlywood4")
# Save a static version "population.png"
tmap_save(filename = ".png")
| /exercies_polygons.R | no_license | marespadafor/drawingmaps | R | false | false | 6,065 | r | rm(list = ls())
setwd("~/Desktop/Lab R-Geography ")
#library(sf)
library(tmap)
library(dplyr)
library(rgdal) #to import shapefiles
library(broom) #to convert shapefiles into the data frame structure we need
library(haven)
library(ggmap)
#EXAMPLE I: Downloading a file
download.file("http://biogeo.ucdavis.edu/data/gadm2.8/rds/ITA_adm1.rds",
"ITA_adm1.rds", mode = "wb")
italy_sp = readRDS("ITA_adm1.rds")
#EXERCISE I: Try downloading another country of your choice
download.file("http://biogeo.ucdavis.edu/data/gadm2.8/rds/FRA_adm1.rds",
"FRA_adm1.rds", mode = "wb")
france_sp = readRDS("FRA_adm1.rds")
#EXERCISE II
# Print france_sp
__(___)
# Call summary() on france_sp
___(___)
#check the structure of the data
___@___
# Call plot() on france_sp
plot(____)
# Call str on france_sp
str(france_sp)
#EXERCISE IV
#Try running the following code
str(france_sp, max.level = 2)
str(france_sp@polygons, max.level=2)
# Taking a deeper look:
tenth <- france_sp@polygons[[10]]
str(tenth, max.level = 2)
#EXERCISE V
# Call head() and str() on the data slot of countries_spdf
head(france_sp@data)
str(france_sp@data)
# Pull out the name column using $
france_sp$NAME_1
# Pull out the subregion column using [[
france_sp[["NAME_1"]]
# Subsetting
france_centre<- france_sp$NAME_1 == "_____"
##########################################################
# Let's work with tmap:
library(tmap)
#Let's download the sp file
temp <- tempfile(fileext = ".zip")
download.file("http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/NUTS_2013_01M_SH.zip", temp)
unzip(temp)
#load the data and filter
EU_NUTS = readOGR(dsn = "./NUTS_2013_01M_SH/data", layer = "NUTS_RG_01M_2013")
#Explore the spatialdataframe:
summary(EU_NUTS)
# plot EU_NUTS
plot(EU_NUTS)
#Subset our map
eu_nuts2 <- subset(EU_NUTS, STAT_LEVL_ == 2) # set NUTS level
eu_nuts2
# inomce is a dataframe that contains income data NUTS-2
income<- read_dta("basedatos.dta")
income$region
#Merge eurost to EU_NUTS
nuts_merge<- merge(eu_nuts2, income, by.x="NUTS_ID", by.y="region")
#Call Summary
summary(nuts_merge)
# Choose a variable with col mapped to it
tm_shape(nuts_merge) +
tm_fill(col = "GDPpc2017")
#lets try going back to Madrid
# Since the data is not very good lets move on to Madrid again:
sf_madrid <- readOGR("municipios_y_distritos_madrid.shp")
sf_madrid$municipio_
#We want to keep only municipios in Madrid city
madrid_city<- c( "Madrid-Retiro",
"Madrid-Salamanca",
"Madrid-Centro",
"Madrid-Arganzuela",
"Madrid-Chamart\303\255n",
"Madrid-Tetu\303\241n",
"Madrid-Chamber\303\255",
"Madrid-Fuencarral-El Pardo",
"Madrid-Moncloa-Aravaca",
"Madrid-Latina",
"Madrid-Carabanchel",
"Madrid-Usera",
"Madrid-Puente de Vallecas",
"Madrid-San Blas-Canillejas",
"Madrid-Barajas",
"Madrid-Moratalaz",
"Madrid-Ciudad Lineal",
"Madrid-Hortaleza",
"Madrid-Villaverde",
"Madrid-Villa de Vallecas",
"Madrid-Vic\303\241lvaro")
madrid_city_map <- sf_madrid[sf_madrid$municipio_ %in% madrid_city, ]
#Now we have our sp of Madrid city only
madrid_city_map
plot(madrid_city_map)
# Load back new data from betting project:
betting<- read_dta("mc_income.dta")
betting$distrito
betting$municipality<-case_when(
betting$distrito==1 ~ "Madrid-Arganzuela",
betting$distrito==2 ~ "Madrid-Barajas",
betting$distrito==3 ~ "Madrid-Carabanchel",
betting$distrito==4 ~ "Madrid-Centro",
betting$distrito==5 ~ "Madrid-Chamart\303\255n",
betting$distrito==6 ~ "Madrid-Chamber\303\255",
betting$distrito==7 ~ "Madrid-Ciudad Lineal",
betting$distrito==8 ~ "Madrid-Fuencarral-El Pardo",
betting$distrito==9 ~ "Madrid-Hortaleza",
betting$distrito==10 ~ "Madrid-Latina",
betting$distrito==11 ~ "Madrid-Moncloa-Aravaca",
betting$distrito==12 ~ "Madrid-Moratalaz",
betting$distrito==13 ~ "Madrid-Puente de Vallecas",
betting$distrito==14 ~ "Madrid-Retiro",
betting$distrito==15 ~ "Madrid-Salamanca",
betting$distrito==16 ~ "Madrid-San Blas-Canillejas",
betting$distrito==17 ~ "Madrid-Tetu\303\241n",
betting$distrito==18 ~ "Madrid-Usera",
betting$distrito==19 ~ "Madrid-Vic\303\241lvaro",
betting$distrito==20 ~ "Madrid-Villa de Vallecas",
betting$distrito==21 ~ "Madrid-Villaverde")
# Check for duplicates
any(duplicated(betting$municipality))
any(duplicated(madrid_city_map$municipio_))
# Mergin the two datasets:
madrid_merge<- merge(____, ____, by.x="_____", by.y="______")
# fast way:
spplot(madrid_merge, "renta")
spplot(madrid_merge, "renta", main = "Income")
#THIS DOESNT WORK
#ggmap(madrid_map) +
# geom_sf(data = madrid_merge ,aes(fill=renta), inherit.aes = FALSE) #error
library(sp)
library(tmap)
# Plot from last exercise
tm_shape(madrid_merge) +
tm_fill(col = "renta")
# Save a static version "population.png"
tmap_save(filename = "madrid_income.png")
# Add style argument to the tm_fill() call
tm_shape(france_sp) +
tm_fill(col = "", style="") +
# Add a tm_borders() layer
tm_borders(col="")
# New plot, with tm_bubbles() instead of tm_fill()
tm_shape(countries_spdf) +
tm_bubbles(size = "population", style="quantile") +
# Add a tm_borders() layer
tm_borders(col="burlywood4")
# Save a static version "population.png"
tmap_save(filename = ".png")
|
trainsetp=trainset
b=Sys.time()
for (i in 1:Npermutation){
#permute trainset:
#taking all permutation is impossible when we get more than 5 animals per group, so we sample one possibility
trainsetp$groupingvar=sample (trainset$groupingvar)
#create svm model: we tune only in one kernel:
obj <- tune.svm(groupingvar~., data = trainsetp, gamma = 4^(-5:5), cost = 4^(-5:5),
tune.control(sampling = "cross"),kernel = bestk[[1]])
svm.model <- svm(groupingvar ~ ., data = trainsetp, cost = obj$best.parameters$cost, gamma = obj$best.parameters$gamma, kernel = bestk[[1]])
svm.pred <- predict(svm.model, testset %>% select(-groupingvar))
SVMprediction_res =table(pred = svm.pred, true = testset$groupingvar)
#SVMprediction = as.data.frame(SVMprediction_res)
#Accuracy of grouping and plot
temp =classAgreement (SVMprediction_res)
Acc_sampled = c(Acc_sampled, temp$kappa)
}
print("time to perform the analysis:")
print(Sys.time()-b)
hist(Acc_sampled, breaks=c(-10:10)/10)
abline(v = Accuracyreal, col="Red")
# Exports `binconf`
k <- sum(Acc_sampled >= Accuracyreal) # one-tailed test
print("P value for a one-tailed test:")
print(zapsmall(binconf(k, length(Acc_sampled), method='exact'))) # 95% CI by default
save.image(file= "thisisatest.rdata")
| /analysis/Rcode/multidimensional_analysis_perm_svm.R | permissive | dscun/HCS_analysis | R | false | false | 1,374 | r |
trainsetp=trainset
b=Sys.time()
for (i in 1:Npermutation){
#permute trainset:
#taking all permutation is impossible when we get more than 5 animals per group, so we sample one possibility
trainsetp$groupingvar=sample (trainset$groupingvar)
#create svm model: we tune only in one kernel:
obj <- tune.svm(groupingvar~., data = trainsetp, gamma = 4^(-5:5), cost = 4^(-5:5),
tune.control(sampling = "cross"),kernel = bestk[[1]])
svm.model <- svm(groupingvar ~ ., data = trainsetp, cost = obj$best.parameters$cost, gamma = obj$best.parameters$gamma, kernel = bestk[[1]])
svm.pred <- predict(svm.model, testset %>% select(-groupingvar))
SVMprediction_res =table(pred = svm.pred, true = testset$groupingvar)
#SVMprediction = as.data.frame(SVMprediction_res)
#Accuracy of grouping and plot
temp =classAgreement (SVMprediction_res)
Acc_sampled = c(Acc_sampled, temp$kappa)
}
print("time to perform the analysis:")
print(Sys.time()-b)
hist(Acc_sampled, breaks=c(-10:10)/10)
abline(v = Accuracyreal, col="Red")
# Exports `binconf`
k <- sum(Acc_sampled >= Accuracyreal) # one-tailed test
print("P value for a one-tailed test:")
print(zapsmall(binconf(k, length(Acc_sampled), method='exact'))) # 95% CI by default
save.image(file= "thisisatest.rdata")
|
## Models with simulated data
joint_model_cov <- function(structured_data, unstructured_data, dat1, biasfield, resolution = c(10,10), biascov){
#packages
library(INLA)
library(reshape2)
library(rgeos)
library(fields)
max_x <- max(biasfield$x)
max_y <- max(biasfield$y)
#preparation - mesh construction - use the loc.domain argument
mesh <- inla.mesh.2d(loc.domain = biasfield[,c(1,2)],max.edge=c(20,40),cutoff=2, offset = c(5,20))
#plot the mesh to see what it looks like
#plot(mesh)
##set the spde representation to be the mesh just created
spde <- inla.spde2.matern(mesh)
#make A matrix for structured data
structured_data_A <- inla.spde.make.A(mesh = mesh, loc = as.matrix(structured_data[,2:3]))
#make A matrix for unstructured data
unstructured_data_A <- inla.spde.make.A(mesh = mesh, loc = as.matrix(unstructured_data[,1:2]))
# Joint model
# One spatial field
# Uses Simpson approach for PP data
# Binomial model for PA data
# Using cloglog
# create integration stack
loc.d <- t(matrix(c(0,0,max_x,0,max_x,max_y,0,max_y,0,0), 2))
#make dual mesh
dd <- deldir::deldir(mesh$loc[, 1], mesh$loc[, 2])
tiles <- deldir::tile.list(dd)
#make domain into spatial polygon
domainSP <- SpatialPolygons(list(Polygons(
list(Polygon(loc.d)), '0')))
#intersection between domain and dual mesh
poly.gpc <- as(domainSP@polygons[[1]]@Polygons[[1]]@coords, "gpc.poly")
# w now contains area of voronoi polygons
w <- sapply(tiles, function(p) rgeos::area.poly(rgeos::intersect(as(cbind(p$x, p$y), "gpc.poly"), poly.gpc)))
#check some have 0 weight
table(w>0)
##plot stuff
# par(mfrow=c(1,1))
# plot(mesh$loc, asp=1, col=(w==0)+1, pch=19, xlab='', ylab='')
# plot(dd, add=TRUE)
# lines(loc.d, col=3)
nv <- mesh$n
n <- nrow(unstructured_data)
#change data to include 0s for nodes and 1s for presences
y.pp <- rep(0:1, c(nv, n))
#add expectation vector (area for integration points/nodes and 0 for presences)
e.pp <- c(w, rep(0, n))
#diagonal matrix for integration point A matrix
imat <- Diagonal(nv, rep(1, nv))
A.pp <- rBind(imat, unstructured_data_A)
#get covariate for integration points
covariate = dat1$gridcov[Reduce('cbind', nearest.pixel(mesh$loc[,1], mesh$loc[,2], im(dat1$gridcov)))]
biascovariate = biascov[Reduce('cbind', nearest.pixel(mesh$loc[,1], mesh$loc[,2], im(biascov)))]
#unstructured data stack with integration points
stk_unstructured_data <- inla.stack(data=list(y=cbind(y.pp, NA), e = e.pp),
effects=list(list(data.frame(interceptB=rep(1,nv+n), env = c(covariate, unstructured_data$env), bias = c(biascovariate, unstructured_data$bias))), list(uns_field=1:spde$n.spde)),
A=list(1,A.pp),
tag="unstructured_data")
#stack for structured data
#note intercept with different name
stk_structured_data <- inla.stack(data=list(y=cbind(NA, structured_data$presence), Ntrials = rep(1, nrow(structured_data))),
effects=list(list(data.frame(interceptA=rep(1,length(structured_data$x)), env = structured_data$env)), list(str_field=1:spde$n.spde)),
A=list(1,structured_data_A),
tag="structured_data")
##NOTE: doesn't use the copy function initially
stk <- inla.stack(stk_unstructured_data, stk_structured_data)
# join.stack <- stk
#
source("Create prediction stack.R")
join.stack <- create_prediction_stack(stk, resolution, biasfield = biasfield, dat1 = dat1, mesh, spde)
formulaJ = y ~ interceptA + interceptB + env + bias + f(uns_field, model = spde) + f(str_field, copy = "uns_field", fixed = TRUE) -1
result <- inla(formulaJ,family=c("poisson", "binomial"),
data=inla.stack.data(join.stack),
control.predictor=list(A=inla.stack.A(join.stack), compute=TRUE),
control.family = list(list(link = "log"),
list(link = "cloglog")),
E = inla.stack.data(join.stack)$e,
Ntrials = inla.stack.data(join.stack)$Ntrials,
control.compute = list(cpo=TRUE, waic= TRUE, dic = TRUE)
)
##project the mesh onto the initial simulated grid 100x100 cells in dimension
proj1<-inla.mesh.projector(mesh,ylim=c(1,max_y),xlim=c(1,max_x),dims=c(max_x,max_y))
##pull out the mean of the random field for the NPMS model
xmean1 <- inla.mesh.project(proj1, result$summary.random$uns_field$mean)
##plot the estimated random field
# plot with the original
library(fields)
# some of the commands below were giving warnings as not graphical parameters - I have fixed what I can
# scales and col.region did nothing on my version
png("joint model with bias covariate.png", height = 1000, width = 2500, pointsize = 30)
par(mfrow=c(1,3))
image.plot(1:max_x,1:max_y,xmean1, col=tim.colors(),xlab='', ylab='',main="mean of r.f",asp=1)
image.plot(list(x=dat1$Lam$xcol*100, y=dat1$Lam$yrow*100, z=t(dat1$rf.s)), main='Truth', asp=1) # make sure scale = same
points(structured_data[structured_data[,4] %in% 0,2:3], pch=16, col='white') #absences
points(structured_data[structured_data[,4] %in% 1,2:3], pch=16, col='black')
##plot the standard deviation of random field
xsd1 <- inla.mesh.project(proj1, result$summary.random$uns_field$sd)
image.plot(1:max_x,1:max_y,xsd1, col=tim.colors(),xlab='', ylab='', main="sd of r.f",asp=1)
dev.off()
result$summary.fixed
return(list(join.stack = join.stack, result = result))
}
| /Run models joint covariate for bias.R | no_license | NERC-CEH/IOFFsimwork | R | false | false | 5,761 | r | ## Models with simulated data
joint_model_cov <- function(structured_data, unstructured_data, dat1, biasfield, resolution = c(10,10), biascov){
#packages
library(INLA)
library(reshape2)
library(rgeos)
library(fields)
max_x <- max(biasfield$x)
max_y <- max(biasfield$y)
#preparation - mesh construction - use the loc.domain argument
mesh <- inla.mesh.2d(loc.domain = biasfield[,c(1,2)],max.edge=c(20,40),cutoff=2, offset = c(5,20))
#plot the mesh to see what it looks like
#plot(mesh)
##set the spde representation to be the mesh just created
spde <- inla.spde2.matern(mesh)
#make A matrix for structured data
structured_data_A <- inla.spde.make.A(mesh = mesh, loc = as.matrix(structured_data[,2:3]))
#make A matrix for unstructured data
unstructured_data_A <- inla.spde.make.A(mesh = mesh, loc = as.matrix(unstructured_data[,1:2]))
# Joint model
# One spatial field
# Uses Simpson approach for PP data
# Binomial model for PA data
# Using cloglog
# create integration stack
loc.d <- t(matrix(c(0,0,max_x,0,max_x,max_y,0,max_y,0,0), 2))
#make dual mesh
dd <- deldir::deldir(mesh$loc[, 1], mesh$loc[, 2])
tiles <- deldir::tile.list(dd)
#make domain into spatial polygon
domainSP <- SpatialPolygons(list(Polygons(
list(Polygon(loc.d)), '0')))
#intersection between domain and dual mesh
poly.gpc <- as(domainSP@polygons[[1]]@Polygons[[1]]@coords, "gpc.poly")
# w now contains area of voronoi polygons
w <- sapply(tiles, function(p) rgeos::area.poly(rgeos::intersect(as(cbind(p$x, p$y), "gpc.poly"), poly.gpc)))
#check some have 0 weight
table(w>0)
##plot stuff
# par(mfrow=c(1,1))
# plot(mesh$loc, asp=1, col=(w==0)+1, pch=19, xlab='', ylab='')
# plot(dd, add=TRUE)
# lines(loc.d, col=3)
nv <- mesh$n
n <- nrow(unstructured_data)
#change data to include 0s for nodes and 1s for presences
y.pp <- rep(0:1, c(nv, n))
#add expectation vector (area for integration points/nodes and 0 for presences)
e.pp <- c(w, rep(0, n))
#diagonal matrix for integration point A matrix
imat <- Diagonal(nv, rep(1, nv))
A.pp <- rBind(imat, unstructured_data_A)
#get covariate for integration points
covariate = dat1$gridcov[Reduce('cbind', nearest.pixel(mesh$loc[,1], mesh$loc[,2], im(dat1$gridcov)))]
biascovariate = biascov[Reduce('cbind', nearest.pixel(mesh$loc[,1], mesh$loc[,2], im(biascov)))]
#unstructured data stack with integration points
stk_unstructured_data <- inla.stack(data=list(y=cbind(y.pp, NA), e = e.pp),
effects=list(list(data.frame(interceptB=rep(1,nv+n), env = c(covariate, unstructured_data$env), bias = c(biascovariate, unstructured_data$bias))), list(uns_field=1:spde$n.spde)),
A=list(1,A.pp),
tag="unstructured_data")
#stack for structured data
#note intercept with different name
stk_structured_data <- inla.stack(data=list(y=cbind(NA, structured_data$presence), Ntrials = rep(1, nrow(structured_data))),
effects=list(list(data.frame(interceptA=rep(1,length(structured_data$x)), env = structured_data$env)), list(str_field=1:spde$n.spde)),
A=list(1,structured_data_A),
tag="structured_data")
##NOTE: doesn't use the copy function initially
stk <- inla.stack(stk_unstructured_data, stk_structured_data)
# join.stack <- stk
#
source("Create prediction stack.R")
join.stack <- create_prediction_stack(stk, resolution, biasfield = biasfield, dat1 = dat1, mesh, spde)
formulaJ = y ~ interceptA + interceptB + env + bias + f(uns_field, model = spde) + f(str_field, copy = "uns_field", fixed = TRUE) -1
result <- inla(formulaJ,family=c("poisson", "binomial"),
data=inla.stack.data(join.stack),
control.predictor=list(A=inla.stack.A(join.stack), compute=TRUE),
control.family = list(list(link = "log"),
list(link = "cloglog")),
E = inla.stack.data(join.stack)$e,
Ntrials = inla.stack.data(join.stack)$Ntrials,
control.compute = list(cpo=TRUE, waic= TRUE, dic = TRUE)
)
##project the mesh onto the initial simulated grid 100x100 cells in dimension
proj1<-inla.mesh.projector(mesh,ylim=c(1,max_y),xlim=c(1,max_x),dims=c(max_x,max_y))
##pull out the mean of the random field for the NPMS model
xmean1 <- inla.mesh.project(proj1, result$summary.random$uns_field$mean)
##plot the estimated random field
# plot with the original
library(fields)
# some of the commands below were giving warnings as not graphical parameters - I have fixed what I can
# scales and col.region did nothing on my version
png("joint model with bias covariate.png", height = 1000, width = 2500, pointsize = 30)
par(mfrow=c(1,3))
image.plot(1:max_x,1:max_y,xmean1, col=tim.colors(),xlab='', ylab='',main="mean of r.f",asp=1)
image.plot(list(x=dat1$Lam$xcol*100, y=dat1$Lam$yrow*100, z=t(dat1$rf.s)), main='Truth', asp=1) # make sure scale = same
points(structured_data[structured_data[,4] %in% 0,2:3], pch=16, col='white') #absences
points(structured_data[structured_data[,4] %in% 1,2:3], pch=16, col='black')
##plot the standard deviation of random field
xsd1 <- inla.mesh.project(proj1, result$summary.random$uns_field$sd)
image.plot(1:max_x,1:max_y,xsd1, col=tim.colors(),xlab='', ylab='', main="sd of r.f",asp=1)
dev.off()
result$summary.fixed
return(list(join.stack = join.stack, result = result))
}
|
RNGkind("Knuth-TAOCP")
set.seed(0)
runif(10)
### -> [
## 0.627400767058134412, 0.354186671786010432, 0.989893430843949762,
## 0.862408143468201493, 0.662299204617739057, 0.077800422906875638,
## 0.145743910223245676, 0.811320250853896474, 0.346456802450120616,
## 0.791849832050502633]
#
set.seed(1234)
runif(500)
.Random.seed[2:102]
#
## [1] 589114580 124868320 1023486867 888263572 1008557248
# [6] 139281324 20941710 761831889 304787211 61486457
# [11] 198656608 183885823 548183715 499640547 17082978
# [16] 412708784 414486889 325217044 699238979 321061394
# [21] 535584337 653850800 284471156 273614104 795707198
# [26] 782735090 828099772 354681544 784150177 901050950
# [31] 897434290 1052357202 736123264 780988038 1015727324
# [36] 516269193 661625711 533287921 731937269 598625428
# [41] 942076003 340891630 556261874 789057357 644757378
# [46] 136419675 53508638 612435549 938819010 734832779
# [51] 709866531 987863323 786713782 846327859 861245975
# [56] 1033677522 535542334 284580720 377803680 976541065
# [61] 290901891 816561037 130015121 877890156 104648344
# [66] 422316372 1022823627 136991533 738784288 817022663
# [71] 743988492 1042418271 891445386 987660572 552247221
# [76] 893066571 182855384 854980879 220208018 785000732
# [81] 674209255 350092667 574276366 146856646 678029035
# [86] 1020724152 226873477 963833136 1027610664 166396118
# [91] 63110549 151167337 119845999 24427520 229205377
# [96] 831118551 551494892 107462480 70641556
# 10 more
runif(10)
# [1] 0.772171009331941938 0.829357400536537503 0.739039418287575578
# [4] 0.107159891165792984 0.057164810597896604 0.056055479682981996
# [7] 0.587192881852388604 0.256809514947235695 0.269237011671066395
# [10] 0.628123477101326322 | /src/lib/rng/knuth-taocp/__test__/fixture-generation/fixture-generaton.R | permissive | R-js/libRmath.js | R | false | false | 1,865 | r | RNGkind("Knuth-TAOCP")
set.seed(0)
runif(10)
### -> [
## 0.627400767058134412, 0.354186671786010432, 0.989893430843949762,
## 0.862408143468201493, 0.662299204617739057, 0.077800422906875638,
## 0.145743910223245676, 0.811320250853896474, 0.346456802450120616,
## 0.791849832050502633]
#
set.seed(1234)
runif(500)
.Random.seed[2:102]
#
## [1] 589114580 124868320 1023486867 888263572 1008557248
# [6] 139281324 20941710 761831889 304787211 61486457
# [11] 198656608 183885823 548183715 499640547 17082978
# [16] 412708784 414486889 325217044 699238979 321061394
# [21] 535584337 653850800 284471156 273614104 795707198
# [26] 782735090 828099772 354681544 784150177 901050950
# [31] 897434290 1052357202 736123264 780988038 1015727324
# [36] 516269193 661625711 533287921 731937269 598625428
# [41] 942076003 340891630 556261874 789057357 644757378
# [46] 136419675 53508638 612435549 938819010 734832779
# [51] 709866531 987863323 786713782 846327859 861245975
# [56] 1033677522 535542334 284580720 377803680 976541065
# [61] 290901891 816561037 130015121 877890156 104648344
# [66] 422316372 1022823627 136991533 738784288 817022663
# [71] 743988492 1042418271 891445386 987660572 552247221
# [76] 893066571 182855384 854980879 220208018 785000732
# [81] 674209255 350092667 574276366 146856646 678029035
# [86] 1020724152 226873477 963833136 1027610664 166396118
# [91] 63110549 151167337 119845999 24427520 229205377
# [96] 831118551 551494892 107462480 70641556
# 10 more
runif(10)
# [1] 0.772171009331941938 0.829357400536537503 0.739039418287575578
# [4] 0.107159891165792984 0.057164810597896604 0.056055479682981996
# [7] 0.587192881852388604 0.256809514947235695 0.269237011671066395
# [10] 0.628123477101326322 |
library(shiny)
library(shinythemes)
library(plotly)
library(viridis)
ghg <- read.csv('data/ghg.csv', na.strings = '.')
names(ghg)[1] <- 'ccode'
ui <- fluidPage(theme = shinytheme('cerulean'),
titlePanel(h1("Coursera_JH Developing Data Products")),
sidebarLayout(
sidebarPanel(helpText('Create choropleth maps with information about MTCO2 emissions.'),
selectInput('gas', p('Select gas'), choices = as.list(levels(ghg$gas)),
selected = 'KYOTOGHG'
),
selectInput('sector', p('Select sector'), choices = as.list(levels(ghg$sector)),
selected = 'Total'
),
sliderInput('year', p('Select year'),
min=min(ghg$year), max = max(ghg$year), value = 2016, step = 1, sep = ''),
submitButton(text = 'Apply changes')
),
mainPanel(h2('Final assignment'),
h5('Oscar BA. 2/7/2020'),
p('This is a shiny application developed for the "Developing Data Products" course
by Johns Hopkins University in Coursera.'),
h3('Documentation'),
p('To run the app, select the gas, sector and year you want to visualize. Then, click on "Apply changes" to
plot the desired data in the map.'),
div('The necessary packages are', code('shiny'),code('shinythemes'),code('plotly'), 'and', code('viridis')),
h4('Greenhouse Gases Emissions'),
div('Human activity has worsened the environmental quality. This app shows
how have greenhouse gases emissions evolved through time. Data has been downloaded
from Gutschow et al (2019) at:', a('https://dataservices.gfz-potsdam.de/pik/showshort.php?id=escidoc:3842934'),
' and is already available in the app "data" folder.\n'),
p('"KYOTOGHG" includes all the greenhouse gases established in the Kyoto Protocol. The sector "Total" inlcudes all
except for Land Use, Land Use Change and Forestry (LULUCF)'),
plotlyOutput('map'),
p('Source: own elaboration with data from Gutschow et al (2019)'),
div('Gutschow, J., Jeffery, L., Gieseke, R., & Gunther, A. (2019). The PRIMAP-hist national
historical emissions time series (1850-2017) [Data set]. GFZ Data Services. DOI:', a('https://doi.org/10.5880/PIK.2019.018')),
)
)
)
server <- function(input, output) {
d_sub <- reactive({
ghg[ghg$sector==input$sector & ghg$gas==input$gas & complete.cases(ghg) & ghg$year==input$year,]
})
output$map <- renderPlotly({plot_ly(d_sub(), type='choropleth',
locations=d_sub()$ccode, z=d_sub()$mtco2, text=d_sub()$country,
colors = inferno(20, begin = 1, end = 0)
) %>% layout(title='Annual MTCO2 emissions'
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /app.R | no_license | oscarba07/Coursera_JH_DDP | R | false | false | 3,440 | r | library(shiny)
library(shinythemes)
library(plotly)
library(viridis)
ghg <- read.csv('data/ghg.csv', na.strings = '.')
names(ghg)[1] <- 'ccode'
ui <- fluidPage(theme = shinytheme('cerulean'),
titlePanel(h1("Coursera_JH Developing Data Products")),
sidebarLayout(
sidebarPanel(helpText('Create choropleth maps with information about MTCO2 emissions.'),
selectInput('gas', p('Select gas'), choices = as.list(levels(ghg$gas)),
selected = 'KYOTOGHG'
),
selectInput('sector', p('Select sector'), choices = as.list(levels(ghg$sector)),
selected = 'Total'
),
sliderInput('year', p('Select year'),
min=min(ghg$year), max = max(ghg$year), value = 2016, step = 1, sep = ''),
submitButton(text = 'Apply changes')
),
mainPanel(h2('Final assignment'),
h5('Oscar BA. 2/7/2020'),
p('This is a shiny application developed for the "Developing Data Products" course
by Johns Hopkins University in Coursera.'),
h3('Documentation'),
p('To run the app, select the gas, sector and year you want to visualize. Then, click on "Apply changes" to
plot the desired data in the map.'),
div('The necessary packages are', code('shiny'),code('shinythemes'),code('plotly'), 'and', code('viridis')),
h4('Greenhouse Gases Emissions'),
div('Human activity has worsened the environmental quality. This app shows
how have greenhouse gases emissions evolved through time. Data has been downloaded
from Gutschow et al (2019) at:', a('https://dataservices.gfz-potsdam.de/pik/showshort.php?id=escidoc:3842934'),
' and is already available in the app "data" folder.\n'),
p('"KYOTOGHG" includes all the greenhouse gases established in the Kyoto Protocol. The sector "Total" inlcudes all
except for Land Use, Land Use Change and Forestry (LULUCF)'),
plotlyOutput('map'),
p('Source: own elaboration with data from Gutschow et al (2019)'),
div('Gutschow, J., Jeffery, L., Gieseke, R., & Gunther, A. (2019). The PRIMAP-hist national
historical emissions time series (1850-2017) [Data set]. GFZ Data Services. DOI:', a('https://doi.org/10.5880/PIK.2019.018')),
)
)
)
server <- function(input, output) {
d_sub <- reactive({
ghg[ghg$sector==input$sector & ghg$gas==input$gas & complete.cases(ghg) & ghg$year==input$year,]
})
output$map <- renderPlotly({plot_ly(d_sub(), type='choropleth',
locations=d_sub()$ccode, z=d_sub()$mtco2, text=d_sub()$country,
colors = inferno(20, begin = 1, end = 0)
) %>% layout(title='Annual MTCO2 emissions'
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
rm(list=ls())
############################## Download in R #####################################
classes<-c("factor","factor",rep("numeric",times=7))
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
dat <- read.table(unz(temp, "household_power_consumption.txt"), sep=";",header=T,colClass=classes,na.strings="?")
unlink(temp)
rm(list=c("temp","classes"))
############################## Data frame preparation #####################################
#Converting Date variable into Date format
dat$Date=as.Date(dat$Date,format="%d/%m/%Y")
#Subsetting only desired dates
dat<-dat[dat$Date %in% as.Date(c("2007-02-01","2007-02-02") ),]
#New variable combining complete date and time
dat$datetime<-as.POSIXct(paste(as.character(dat$Date),as.character(dat$Time)))
# Sorting by datetime variable
dat<-dat[order(dat$datetime),]
############################## PLOTS #####################################
## The first plot
png("plot1.png", width = 480, height = 480, units = "px")
par(mfrow=c(1,1))
hist(dat$Global_acti,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off() | /plot1.R | no_license | vladmalkov/ExData_Plotting1 | R | false | false | 1,181 | r | rm(list=ls())
############################## Download in R #####################################
classes<-c("factor","factor",rep("numeric",times=7))
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
dat <- read.table(unz(temp, "household_power_consumption.txt"), sep=";",header=T,colClass=classes,na.strings="?")
unlink(temp)
rm(list=c("temp","classes"))
############################## Data frame preparation #####################################
#Converting Date variable into Date format
dat$Date=as.Date(dat$Date,format="%d/%m/%Y")
#Subsetting only desired dates
dat<-dat[dat$Date %in% as.Date(c("2007-02-01","2007-02-02") ),]
#New variable combining complete date and time
dat$datetime<-as.POSIXct(paste(as.character(dat$Date),as.character(dat$Time)))
# Sorting by datetime variable
dat<-dat[order(dat$datetime),]
############################## PLOTS #####################################
## The first plot
png("plot1.png", width = 480, height = 480, units = "px")
par(mfrow=c(1,1))
hist(dat$Global_acti,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off() |
## These functions will cache the result of inverting a square matrix to avoid
## doing unnecessary computation if the matrix needs to be inverted repeatedly
## This function will create a list of 3 functions to 1) get the values of a matrix
## 2) set the values of an inverted matrix and 3) get those values from a cache
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function will calculate the inverse of the matrix object stored above
## and cache the result. If the same matrix object is called again it will return
## the cached data rather than re-solving the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | 89million/ProgrammingAssignment2 | R | false | false | 1,102 | r | ## These functions will cache the result of inverting a square matrix to avoid
## doing unnecessary computation if the matrix needs to be inverted repeatedly
## This function will create a list of 3 functions to 1) get the values of a matrix
## 2) set the values of an inverted matrix and 3) get those values from a cache
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function will calculate the inverse of the matrix object stored above
## and cache the result. If the same matrix object is called again it will return
## the cached data rather than re-solving the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anglr-package.R
\docType{package}
\name{anglr-package}
\alias{anglr-package}
\title{Tidy tables for topological spatial data structures.}
\description{
The 'anglr' package helps transcend the following general limitations:
\itemize{
\item coordinates beyond X and Y, or longitude and latitude
\item storing attributes on vertices, primitives, branches (parts), or objects
\item topology and geometry are properly separated
\item spatial data can be properly represented as a graph of spatial primitives
\item polygons as true surfaces, not just glorified lines with a path-filling rule
\item TBD higher dimensional primitives are possible
\item TBD n-dimensional rasters with curvilinear coordinates, and the discrete-continuous distinction
}
}
\section{I. Creation}{
\tabular{ll}{
\code{\link{anglr}} \tab create a anglr table set from various input types \cr
}
}
\section{II. Plotting}{
\tabular{ll}{
\code{\link{globe}} \tab convert X,Y planar or angular to 3D on the surface of a globe, based on the data in longitude-latitude form \cr
\code{\link{plot.trimesh}} \tab plot 2D topology in 3D geometry space \cr
\code{\link{plot.linemesh}} \tab plot 1D topology in 3D geometry space \cr
\code{\link{plot.pointmesh}} \tab plot 0D topology in 3D geometry space \cr
}
}
| /man/anglr-package.Rd | no_license | MilesMcBain/anglr | R | false | true | 1,368 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anglr-package.R
\docType{package}
\name{anglr-package}
\alias{anglr-package}
\title{Tidy tables for topological spatial data structures.}
\description{
The 'anglr' package helps transcend the following general limitations:
\itemize{
\item coordinates beyond X and Y, or longitude and latitude
\item storing attributes on vertices, primitives, branches (parts), or objects
\item topology and geometry are properly separated
\item spatial data can be properly represented as a graph of spatial primitives
\item polygons as true surfaces, not just glorified lines with a path-filling rule
\item TBD higher dimensional primitives are possible
\item TBD n-dimensional rasters with curvilinear coordinates, and the discrete-continuous distinction
}
}
\section{I. Creation}{
\tabular{ll}{
\code{\link{anglr}} \tab create a anglr table set from various input types \cr
}
}
\section{II. Plotting}{
\tabular{ll}{
\code{\link{globe}} \tab convert X,Y planar or angular to 3D on the surface of a globe, based on the data in longitude-latitude form \cr
\code{\link{plot.trimesh}} \tab plot 2D topology in 3D geometry space \cr
\code{\link{plot.linemesh}} \tab plot 1D topology in 3D geometry space \cr
\code{\link{plot.pointmesh}} \tab plot 0D topology in 3D geometry space \cr
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Courses_F1}
\alias{Courses_F1}
\title{Dataset about KiteSurf}
\format{
An object of class \code{list} of length 71.
}
\usage{
data(Courses_F1)
}
\description{
Dataset about KiteSurf
}
\keyword{datasets}
| /man/Courses_F1.Rd | permissive | Nowaysis/F1Stats | R | false | true | 309 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Courses_F1}
\alias{Courses_F1}
\title{Dataset about KiteSurf}
\format{
An object of class \code{list} of length 71.
}
\usage{
data(Courses_F1)
}
\description{
Dataset about KiteSurf
}
\keyword{datasets}
|
#' Return devtools metadata environment
#'
#' If the package was not loaded with devtools, returns \code{NULL}.
#'
#' @param name The name of a loaded package
#' @examples
#' dev_meta("stats") # NULL
#'
#' if (has_tests()) {
#' # Load the test package in directory "testLoadHooks"
#' load_all(devtest("testLoadHooks"))
#'
#' # Get metdata for the package
#' x <- dev_meta("testLoadHooks")
#' as.list(x)
#'
#' # Clean up.
#' unload(devtest("testLoadHooks"))
#' }
#' @export
dev_meta <- function(name) {
ns <- get_namespace(as.name(name))
if (is.null(ns)) {
stop("Namespace not found for ", name, ". Is it loaded?")
}
if (is.null(ns$.__DEVTOOLS__)) {
return(NULL)
}
ns$.__DEVTOOLS__
}
# Create the devtools metadata environment for a package.
# This should be run when packages are loaded by devtools.
create_dev_meta <- function(name) {
ns <- get_namespace(as.name(name))
if (!is.null(ns$.__DEVTOOLS__)) {
stop("devtools metadata for package ", name, " already exists.")
}
ns$.__DEVTOOLS__ <- new.env(parent = ns)
ns$.__DEVTOOLS__
}
| /R/metadata.r | no_license | miraisolutions/devtools | R | false | false | 1,073 | r | #' Return devtools metadata environment
#'
#' If the package was not loaded with devtools, returns \code{NULL}.
#'
#' @param name The name of a loaded package
#' @examples
#' dev_meta("stats") # NULL
#'
#' if (has_tests()) {
#' # Load the test package in directory "testLoadHooks"
#' load_all(devtest("testLoadHooks"))
#'
#' # Get metdata for the package
#' x <- dev_meta("testLoadHooks")
#' as.list(x)
#'
#' # Clean up.
#' unload(devtest("testLoadHooks"))
#' }
#' @export
dev_meta <- function(name) {
ns <- get_namespace(as.name(name))
if (is.null(ns)) {
stop("Namespace not found for ", name, ". Is it loaded?")
}
if (is.null(ns$.__DEVTOOLS__)) {
return(NULL)
}
ns$.__DEVTOOLS__
}
# Create the devtools metadata environment for a package.
# This should be run when packages are loaded by devtools.
create_dev_meta <- function(name) {
ns <- get_namespace(as.name(name))
if (!is.null(ns$.__DEVTOOLS__)) {
stop("devtools metadata for package ", name, " already exists.")
}
ns$.__DEVTOOLS__ <- new.env(parent = ns)
ns$.__DEVTOOLS__
}
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:6, WORD_NUM:91">
</head>
<body bgcolor="white">
<a href="#0" id="0">Residents took forecasters at their word when they warned of Hurricane Hugo's fury, and the low number of deaths from the powerful storm can be credited to this healthy respect, authorities said.</a>
<a href="#1" id="1">``We've had so many close calls,'' said Gary Garnet, a meteorologist with the National Weather Service.</a>
<a href="#2" id="2">``There's nothing to worry about out there,'' he said.</a>
<a href="#3" id="3">Tens of thousands more got into their cars and headed up Interstate 26 toward Columbia.</a>
<a href="#4" id="4">``You've just got to say nice things about the people.''</a>
<a href="#5" id="5">``But this storm was very, very strong,'' he said.</a>
</body>
</html> | /DUC-Dataset/Summary_p100_R/D085.AP890925-0054.html.R | no_license | Angela7126/SLNSumEval | R | false | false | 814 | r | <html>
<head>
<meta name="TextLength" content="SENT_NUM:6, WORD_NUM:91">
</head>
<body bgcolor="white">
<a href="#0" id="0">Residents took forecasters at their word when they warned of Hurricane Hugo's fury, and the low number of deaths from the powerful storm can be credited to this healthy respect, authorities said.</a>
<a href="#1" id="1">``We've had so many close calls,'' said Gary Garnet, a meteorologist with the National Weather Service.</a>
<a href="#2" id="2">``There's nothing to worry about out there,'' he said.</a>
<a href="#3" id="3">Tens of thousands more got into their cars and headed up Interstate 26 toward Columbia.</a>
<a href="#4" id="4">``You've just got to say nice things about the people.''</a>
<a href="#5" id="5">``But this storm was very, very strong,'' he said.</a>
</body>
</html> |
## The following are a pair of functions to cache the inverse of a
## matrix.
## makeCacheMatrix creates a matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<-NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve computes the inverse of the matrix returned by makeCacheMatrix
## above. If the inverse has already been calculated (and the matrix has not
## changed), then cacheSolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Returns a matrix that is the inverse of 'x'.
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | bolognarossoblu/ProgrammingAssignment2 | R | false | false | 903 | r | ## The following are a pair of functions to cache the inverse of a
## matrix.
## makeCacheMatrix creates a matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<-NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve computes the inverse of the matrix returned by makeCacheMatrix
## above. If the inverse has already been calculated (and the matrix has not
## changed), then cacheSolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Returns a matrix that is the inverse of 'x'.
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
#' @title Convert Genotype Data
#'
#' @description Convert genotype data in various formats to sequoia's
#' 1-column-per-marker format or Colony's 2-columns-per-marker format.
#'
#' @param InFile character string with name of genotype file to be converted.
#' @param InData dataframe or matrix with genotypes to be converted.
#' @param InFormat One of 'single', 'double', 'col', 'ped', 'raw', or 'seq', see
#' Details.
#' @param OutFile character string with name of converted file. If NA, return
#' matrix with genotypes in console (default); if NULL, write to
#' 'GenoForSequoia.txt' in current working directory.
#' @param OutFormat as \code{InFormat}; only 'seq' and 'col' are implemented.
#' @param Missing vector with symbols interpreted as missing data. '0' is
#' missing data for InFormats 'col' and 'ped' only.
#' @param sep vector with field separator strings that will be tried on
#' \code{InFile}. The \code{OutFile} separator uses the
#' \code{\link[utils]{write.table}} default, i.e. one blank space.
#' @param header a logical value indicating whether the file contains a header
#' as its first line. If NA (default), set to TRUE for 'raw', and FALSE
#' otherwise.
#' @param IDcol number giving the column with individual IDs; 0 indicates the
#' rownames (for InData only). If NA (default), set to 2 for InFormat 'raw'
#' and 'ped', and otherwise to 1 for InFile and 0 (rownames) for InData,
#' except when InData has a column labeled 'ID'.
#' @param FIDcol column with the family IDs, if any are wished to
#' be used. This is column 1 for InFormat 'raw' and 'seq', but those are by
#' default not used.
#' @param FIDsep string used to paste FID and IID together into a composite-ID
#' (value passed to \code{\link{paste}}'s \code{collapse}). This joining can
#' be reversed using \code{\link{PedStripFID}}.
#' @param dropcol columns to exclude from the output data, on top of IDcol and
#' FIDcol (which become rownames). When NA, defaults to columns 3-6 for
#' InFormat 'raw' and 'seq'. Can also be used to drop some SNPs, see example
#' below on how to do this for the 2-columns-per-SNP input formats.
#' @param quiet suppress messages and warnings.
#'
#' @return A genotype matrix in the specified output format. If 'OutFile' is
#' specified, the matrix is written to this file and nothing is returned
#' inside R. When converting to 0/1/2 format, 2 is the homozygote for the
#' minor allele, and 0 the homozygote for the major allele.
#'
#' @details The first two arguments are interchangeable, and can be given
#' unnamed. The first argument is assumed to be a file name if it is of class
#' 'character' and length 1, and to be the genetic data if it is a matrix or
#' dataframe.
#'
#'
#' @section Input formats:
#' The following formats can be specified by \code{InFormat}:
#' \describe{
#' \item{seq}{(sequoia) genotypes are coded as 0, 1, 2, missing as \eqn{-9},
#' in 1 column per marker. Column 1 contains IDs, there is no header row.}
#' \item{raw}{(PLINK) genotypes are coded as 0, 1, 2, missing as NA, in 1
#' column per marker. The first 6 columns are descriptive (1:FID, 2:IID, 3 to
#' 6 ignored), and there is a header row. This is produced by PLINK's option
#' --recodeA}
#' \item{ped}{(PLINK) genotypes are coded as A, C, T, G, missing as 0, in 2
#' columns per marker. The first 6 columns are descriptive (1:FID, 2:IID, 3 to
#' 6 ignored). }
#' \item{col}{(Colony) genotypes are coded as numeric values, missing as 0, in
#' 2 columns per marker. Column 1 contains IDs.}
#' \item{single}{1 column per marker, otherwise unspecified}
#' \item{double}{2 columns per marker, otherwise unspecified}
#' }
#' For each \code{InFormat}, its default values for \code{Missing, header,
#' IDcol, FIDcol}, and \code{dropcol} can be overruled by specifying the
#' corresponding input parameters.
#'
#'
#' @section Error messages:
#' Occasionally when reading in a file \code{GenoConvert} may give an error
#' that 'rows have unequal length'. GenoConvert makes use of
#' \code{\link{readLines}} and \code{\link{strsplit}}, which is much faster
#' than \code{\link{read.table}} for large datafiles, but also more sensitive
#' to unusual line endings, unusual end-of-file characters, or invisible
#' characters (spaces or tabs) after the end of some lines. In these cases,
#' try to read the data from file using read.table or read.csv, and then use
#' \code{GenoConvert} on this dataframe or matrix, see example.
#'
#' @author Jisca Huisman, \email{jisca.huisman@gmail.com}
#'
#' @seealso \code{\link{CheckGeno}, \link{SnpStats}, \link{LHConvert}}.
#'
#' @examples
#' \dontrun{
#' # Requires PLINK installed & in system PATH:
#'
#' # tinker with window size, window overlap and VIF to get a set of
#' # 400 - 800 markers (100-200 enough for just parentage):
#' system("cmd", input = "plink --file mydata --indep 50 5 2")
#' system("cmd", input = "plink --file mydata --extract plink.prune.in
#' --recodeA --out PlinkOUT")
#'
#' GenoM <- GenoConvert(InFile = "PlinkOUT.raw")
#'
#' # save time on file conversion next time:
#' write.table(GenoM, file="Geno_sequoia.txt", quote=FALSE, col.names=FALSE)
#' GenoM <- as.matrix(read.table("Geno_sequoia.txt", row.names=1, header=FALSE))
#'
#' # drop some SNPs, e.g. after a warning of >2 alleles:
#' dropSNP <- c(5,68,101,128)
#' GenoM <- GenoConvert(ColonyFile, InFormat = "col",
#' dropcol = 1 + c(2*dropSNP-1, 2*dropSNP) )
#'
#' # circumvent a 'rows have unequal length' error:
#' GenoTmp <- as.matrix(read.table("mydata.txt", header=TRUE, row.names=1))
#' GenoM <- GenoConvert(InData=GenoTmp, InFormat="single", IDcol=0)
#' }
#'
#' @importFrom stats na.exclude
#'
#' @export
GenoConvert <- function(InData = NULL,
InFile = NULL,
InFormat = "raw",
OutFile = NA,
OutFormat = "seq",
Missing = c("-9", "??", "?", "NA", "NULL",
c("0")[InFormat %in% c("col","ped")]),
sep = c(" ", "\t", ",", ";"),
header = NA,
IDcol = NA, #
FIDcol = NA,
FIDsep = "__",
dropcol = NA,
quiet = FALSE) {
if (is.null(InFile) & is.null(InData)) {
stop("please provide 'InFile' or 'InData'")
} else if (!is.null(InFile) & !is.null(InData)) {
stop("please provide either 'InFile' or 'InData', not both")
}
if (length(InData)==1 & inherits(InData, "character")) {
InFile <- InData
InData <- NULL
} else if (is.matrix(InFile) | is.data.frame(InFile)) {
InData <- InFile
InFile <- NULL
}
if (!is.null(InFile)) {
if (is.character(InFile)) {
if (!file.exists(InFile)) stop("cannot find 'InFile'")
} else {
stop("'InFile' in unknown format, should be character string.")
}
if (is.na(header)) {
header <- ifelse(InFormat == "raw", TRUE, FALSE)
}
}
if (!InFormat %in% c("raw", "seq", "col", "ped", "single", "double")) {
stop("invalid InFormat")
}
if (OutFormat %in% c("raw", "ped", "single")) {
stop("OutFormat not (yet) implemented")
} else if (!OutFormat %in% c("seq", "col")) {
stop("invalid OutFormat")
}
if (!is.na(FIDcol) & FIDsep %in% c("", " ", "\t", "\n")) stop("sep can not be whitespace")
UseFID <- ifelse(!is.na(FIDcol), TRUE, FALSE)
if (is.na(IDcol)) {
IDcol <- ifelse(InFormat %in% c("raw", "ped"),
2,
ifelse(!is.null(InFile),
1,
ifelse("ID" %in% colnames(InData),
which(colnames(InData) == "ID"),
0))) # rownames
}
if (InFormat %in% c("raw", "ped")) {
if(is.na(FIDcol)) FIDcol <- 1
if(is.na(dropcol)) dropcol <- c(3:6)
}
if (OutFormat == "seq" & is.null(OutFile)) {
OutFile <- "GenoForSequoia.txt"
} else if (is.null(OutFile)) {
stop("please provide 'OutFile'")
}
if (interactive() & !quiet & !is.na(OutFile)) {
if (file.exists(OutFile)) {
ANS <- readline(prompt = paste("WARNING: ", OutFile, " will be overwritten.",
"Press <N> to abort, or any other key to continue."))
} else {
ANS <- readline(prompt = paste("Genotypes will be written to ", OutFile,
" . Press <N> to abort, or any other key to continue."))
}
if (substr(ANS, 1, 1) %in% c("N", "n")) stop()
}
#~~~~~~~~~~~~~~~~~~~~~~~~
if (!is.null(InData)) {
GenoTmp <- as.matrix(InData)
rm(InData)
} else if (!is.null(InFile)) {
GenoTmp <- readLines(InFile, warn=FALSE)
if (header) GenoTmp <- GenoTmp[-1]
TmpL <- strsplit(GenoTmp, split = sep[1])
if (length(TmpL[[1]])==1) {
for (s in sep[-1]) {
TmpL <- strsplit(GenoTmp, split = s)
if (length(TmpL[[1]]) > 1) break
}
}
if (length(TmpL[[1]])==1) {
stop("unknown column separator, expecting ' ' (space), '\t' (tab), ',' or ';'")
}
if (length(table(sapply(TmpL, length))) > 1) {
stop("rows have unequal length")
}
GenoTmp <- plyr::ldply(TmpL)
rm(TmpL)
}
if (nrow(GenoTmp)<2) stop("Genotype matrix must have at least 2 individuals")
if (ncol(GenoTmp)<2) stop("Genotype matrix must have at least 2 SNPs")
#~~~~~~~~~
if (IDcol==0) {
IDs_geno <- rownames(GenoTmp)
} else {
IDs_geno <- trimws( GenoTmp[, IDcol] )
}
if (!is.na(FIDcol)) FID <- GenoTmp[, FIDcol]
dropcol <- na.exclude(c(FIDcol, IDcol, dropcol))
if (any(dropcol!=0)) GenoTmp <- GenoTmp[, -dropcol]
GenoTmp <- as.matrix(GenoTmp)
if (UseFID) {
IDs_geno <- paste(FID, IDs_geno, sep=FIDsep)
}
if (any(duplicated(IDs_geno))) {
stop("'GenoM' has duplicate IDs in ", ifelse(IDcol==0, 'rownames.', paste0('column ', IDcol, '.')),
"Please exclude or rename these samples, or specify IDcol or UseFID.")
}
for (misX in Missing) {
if (any(GenoTmp == misX, na.rm=TRUE)) {
GenoTmp <- array(gsub(misX, NA, GenoTmp, fixed=TRUE),
dim=dim(GenoTmp))
}
}
if (InFormat %in% c("raw", "seq")) {
if (any(!GenoTmp %in% c(0,1,2, NA))) {
stop(paste("Unexpected value! When InFormat=", InFormat, ", genotypes should be coded as 0/1/2.\n",
"Choose InFormat='single' and/or different Missing, or check data."))
}
} else if (InFormat =="single") {
if(!all(nchar(GenoTmp)==2, na.rm=T)) {
stop(paste("Unexpected value! When InFormat='single', genotypes should be coded as 2 digits or characters.\n",
"Choose InFormat='col' for 2-columns-per-SNP format, or check data."))
}
}
if (InFormat %in% c("col", "ped", "single", "double")) { # A/C/T/G -> 0/1/2
if (InFormat %in% c("ped", "col", "double")) {
GCA <- array(dim=c(2, nrow(GenoTmp), ncol(GenoTmp)/2))
GCA[1,,] <- GenoTmp[, seq(1,ncol(GenoTmp)-1,2)]
GCA[2,,] <- GenoTmp[, seq(2,ncol(GenoTmp),2)]
} else if (InFormat %in% c("single")){
GCA <- array(dim=c(2, nrow(GenoTmp), ncol(GenoTmp)))
GCA[1,,] <- substr(GenoTmp,1,1)
GCA[2,,] <- substr(GenoTmp,2,2)
}
UniqueAlleles <- sort(na.exclude(unique(c(GCA))))
Alleles <- apply(GCA, 3, function(x) table(factor(x, levels = UniqueAlleles)))
if (is.matrix(Alleles)) {
NumAlleles <- apply(Alleles, 2, function(x) sum(x>0))
minorAllele <- apply(Alleles, 2, function(x) names(sort(x[x>0]))[1])
} else { # list
NumAlleles <- sapply(Alleles, length)
minorAllele <- sapply(Alleles, function(x) names(sort(x))[1])
}
if (any(NumAlleles > 2)) {
n.problem <- sum(NumAlleles > 2)
stop(paste("There are", n.problem, "SNPs with >2 alleles ",
ifelse(n.problem<=10, paste(which(sapply(Alleles, length) > 2), collapse="-"),"")))
}
if (any(NumAlleles ==1) & !quiet & OutFormat!='seq') {
warning(paste("There are", sum((NumAlleles ==1)), "monomorphic SNPs"))
}
GenoTmp2 <- sapply(1:dim(GCA)[3], function(i) {
apply(GCA[,,i], 2, function(x) ifelse(is.na(x[1]) | is.na(x[2]), NA,
ifelse(x[1] != x[2], 1, # heterozygote
ifelse(x[1] == minorAllele[i], 2, 0))) ) } )
} else {
AllHom0 <- apply(GenoTmp, 2, function(x) all(na.exclude(x) == 0))
AllHom2 <- apply(GenoTmp, 2, function(x) all(na.exclude(x) == 2))
if ((any(AllHom0) | any(AllHom2)) & !quiet & OutFormat!='seq') {
warning(paste("There are", sum(AllHom0)+sum(AllHom2), "monomorphic SNPs"))
}
GenoTmp2 <- matrix(as.numeric(GenoTmp), nrow(GenoTmp))
}
rownames(GenoTmp2) <- IDs_geno
rm(GenoTmp)
if (OutFormat == "seq") {
GenoOUT <- GenoTmp2
GenoOUT[is.na(GenoOUT)] <- -9
CheckGeno(GenoOUT, quiet=quiet, Return = "excl") # returns invisibly
} else if (OutFormat == "col") {
dc <- list("0" = c(1,1), "1" = c(1,2), "2" = c(2,2), "-9" = c(0,0))
GenoTmp2[is.na(GenoTmp2)] <- -9
GenoA <- array(dim=c(nrow(GenoTmp2), 2, ncol(GenoTmp2)))
for (i in 1:nrow(GenoTmp2)) {
GenoA[i,,] <- sapply(GenoTmp2[i,], function(z) dc[[z]])
}
GenoOUT <- matrix(GenoA, nrow(GenoTmp2))
row.names(GenoOUT) <- IDs_geno
# } else {
# stop("OutFormat not implemented") # caught above
}
if (!is.na(OutFile)) {
utils::write.table(GenoOUT, file = OutFile,
row.names = TRUE, col.names = FALSE, quote = FALSE)
} else {
return(GenoOUT)
}
}
#######################################################################
#######################################################################
#' @title Extract Sex and Birth Year from PLINK File
#'
#' @description Convert the first six columns of a PLINK .fam, .ped or
#' .raw file into a three-column lifehistory file for sequoia. Optionally
#' FID and IID are combined.
#'
#' @details The first 6 columns of PLINK .fam, .ped and .raw files are by
#' default FID - IID - father ID (ignored) - mother ID (ignored) - sex -
#' phenotype.
#'
#' @param PlinkFile character string with name of genotype file to be converted.
#' @param UseFID use the family ID column. The resulting ids (rownames of GenoM)
#' will be in the form FID__IID.
#' @param SwapSex change the coding from PLINK default (1=male, 2=female) to
#' sequoia default (1=female, 2=male); any other numbers are set to NA.
#' @param FIDsep characters inbetween FID and IID in composite-ID. By default a
#' double underscore is used, to avoid problems when some IIDs contain an
#' underscore. Only used when UseFID=TRUE.
#' @param LifeHistData dataframe with additional sex and birth year info. In
#' case of conflicts, LifeHistData takes priority, with a warning. If
#' UseFID=TRUE, IDs in LifeHistData are assumed to be already as FID__IID.
#'
#' @return A dataframe with id, sex and birth year, which can be used as input
#' for \code{\link{sequoia}}.
#'
#' @seealso \code{\link{GenoConvert}}, \code{\link{PedStripFID}} to reverse
#' \code{UseFID}.
#'
#' @examples
#' \dontrun{
#' # combine FID and IID in dataframe with additional sex & birth years
#' ExtraLH$FID_IID <- paste(ExtraLH$FID, ExtraLH$IID, sep = "__")
#' LH.new <- LHConvert(PlinkFile, UseFID = TRUE, FIDsep = "__",
#' LifeHistData = ExtraLH)
#' }
#'
#' @export
LHConvert <- function(PlinkFile = NULL, UseFID = FALSE,
SwapSex = TRUE, FIDsep="__", LifeHistData=NULL)
{
if (is.null(PlinkFile)) stop("please provide 'InFile'")
if (!file.exists(PlinkFile)) stop("cannot find 'PlinkFile'")
if (UseFID & FIDsep %in% c("", " ", "\t", "\n")) stop("sep can not be whitespace")
if (!is.null(LifeHistData)) {
LHIN <- CheckLH(LifeHistData, sorted=FALSE)
}
ncol <- length(scan(PlinkFile, nlines=1, what="real", quiet=TRUE))
TMP <- scan(PlinkFile, skip=1, what=as.list(c(rep("character", 2), rep("numeric", 4),
rep("NULL", ncol-6))), quiet=TRUE)
LH <- data.frame(id = TMP[[2]],
Sex = TMP[[5]],
BirthYear = TMP[[6]],
stringsAsFactors=FALSE)
if (SwapSex) {
LH$Sex <- ifelse(LH$Sex==1, 2,
ifelse(LH$Sex==2, 1,
NA))
}
if (UseFID) {
IDX <- data.frame(id.old = TMP[[2]],
id.new = paste(TMP[[1]], TMP[[2]], sep=FIDsep),
stringsAsFactors=FALSE)
LH <- merge(LH, IDX, by.x="id", by.y="id.old", all.x=TRUE)
LH$id <- ifelse(!is.na(LH$id.new), LH$id.new, LH$id)
LH <- LH[, c("id", "Sex", "BirthYear")]
}
if (!is.null(LHIN)) {
names(LHIN) <- c("id", "Sex", "BirthYear")
LH$Sex[!LH$Sex %in% c(1,2)] <- NA
LHIN$Sex[!LHIN$Sex %in% c(1,2)] <- NA
LH$BirthYear[LH$BirthYear < 0] <- NA
LHIN$BirthYear[LHIN$BirthYear < 0] <- NA
chk <- merge(LH, LHIN, by="id")
n.sexmismatch <- sum(chk$Sex.x != chk$Sex.y, na.rm=T)
n.BYmismatch <- sum(chk$BirthYear.x != chk$BirthYear.y, na.rm=T)
if (n.sexmismatch > 0 & n.sexmismatch <= 10) {
these <- with(chk, id[which(!is.na(Sex.x) & !is.na(Sex.y) & Sex.x!=Sex.y)])
warning(paste("There are", n.sexmismatch, "sex mismatches: ",
paste(these, collapse=", ")))
} else if (n.sexmismatch>10) {
warning(paste("There are", n.sexmismatch, "sex mismatches"))
}
if (n.BYmismatch > 0 & n.BYmismatch <= 10) {
these <- with(chk, id[which(!is.na(BirthYear.x) & !is.na(BirthYear.y) & BirthYear.x!=BirthYear.y)])
warning(paste("There are", n.BYmismatch, "birth year mismatches: ",
paste(these, collapse=", ")))
} else if (n.BYmismatch>10) {
warning(paste("There are", n.BYmismatch, "BY mismatches"))
}
LH <- MergeFill(LH, LHIN, by="id", overwrite=TRUE, all=TRUE)
}
LH
}
| /R/GenoConvert.R | no_license | cran/sequoia | R | false | false | 18,446 | r | #' @title Convert Genotype Data
#'
#' @description Convert genotype data in various formats to sequoia's
#' 1-column-per-marker format or Colony's 2-columns-per-marker format.
#'
#' @param InFile character string with name of genotype file to be converted.
#' @param InData dataframe or matrix with genotypes to be converted.
#' @param InFormat One of 'single', 'double', 'col', 'ped', 'raw', or 'seq', see
#' Details.
#' @param OutFile character string with name of converted file. If NA, return
#' matrix with genotypes in console (default); if NULL, write to
#' 'GenoForSequoia.txt' in current working directory.
#' @param OutFormat as \code{InFormat}; only 'seq' and 'col' are implemented.
#' @param Missing vector with symbols interpreted as missing data. '0' is
#' missing data for InFormats 'col' and 'ped' only.
#' @param sep vector with field separator strings that will be tried on
#' \code{InFile}. The \code{OutFile} separator uses the
#' \code{\link[utils]{write.table}} default, i.e. one blank space.
#' @param header a logical value indicating whether the file contains a header
#' as its first line. If NA (default), set to TRUE for 'raw', and FALSE
#' otherwise.
#' @param IDcol number giving the column with individual IDs; 0 indicates the
#' rownames (for InData only). If NA (default), set to 2 for InFormat 'raw'
#' and 'ped', and otherwise to 1 for InFile and 0 (rownames) for InData,
#' except when InData has a column labeled 'ID'.
#' @param FIDcol column with the family IDs, if any are wished to
#' be used. This is column 1 for InFormat 'raw' and 'seq', but those are by
#' default not used.
#' @param FIDsep string used to paste FID and IID together into a composite-ID
#' (value passed to \code{\link{paste}}'s \code{collapse}). This joining can
#' be reversed using \code{\link{PedStripFID}}.
#' @param dropcol columns to exclude from the output data, on top of IDcol and
#' FIDcol (which become rownames). When NA, defaults to columns 3-6 for
#' InFormat 'raw' and 'seq'. Can also be used to drop some SNPs, see example
#' below on how to do this for the 2-columns-per-SNP input formats.
#' @param quiet suppress messages and warnings.
#'
#' @return A genotype matrix in the specified output format. If 'OutFile' is
#' specified, the matrix is written to this file and nothing is returned
#' inside R. When converting to 0/1/2 format, 2 is the homozygote for the
#' minor allele, and 0 the homozygote for the major allele.
#'
#' @details The first two arguments are interchangeable, and can be given
#' unnamed. The first argument is assumed to be a file name if it is of class
#' 'character' and length 1, and to be the genetic data if it is a matrix or
#' dataframe.
#'
#'
#' @section Input formats:
#' The following formats can be specified by \code{InFormat}:
#' \describe{
#' \item{seq}{(sequoia) genotypes are coded as 0, 1, 2, missing as \eqn{-9},
#' in 1 column per marker. Column 1 contains IDs, there is no header row.}
#' \item{raw}{(PLINK) genotypes are coded as 0, 1, 2, missing as NA, in 1
#' column per marker. The first 6 columns are descriptive (1:FID, 2:IID, 3 to
#' 6 ignored), and there is a header row. This is produced by PLINK's option
#' --recodeA}
#' \item{ped}{(PLINK) genotypes are coded as A, C, T, G, missing as 0, in 2
#' columns per marker. The first 6 columns are descriptive (1:FID, 2:IID, 3 to
#' 6 ignored). }
#' \item{col}{(Colony) genotypes are coded as numeric values, missing as 0, in
#' 2 columns per marker. Column 1 contains IDs.}
#' \item{single}{1 column per marker, otherwise unspecified}
#' \item{double}{2 columns per marker, otherwise unspecified}
#' }
#' For each \code{InFormat}, its default values for \code{Missing, header,
#' IDcol, FIDcol}, and \code{dropcol} can be overruled by specifying the
#' corresponding input parameters.
#'
#'
#' @section Error messages:
#' Occasionally when reading in a file \code{GenoConvert} may give an error
#' that 'rows have unequal length'. GenoConvert makes use of
#' \code{\link{readLines}} and \code{\link{strsplit}}, which is much faster
#' than \code{\link{read.table}} for large datafiles, but also more sensitive
#' to unusual line endings, unusual end-of-file characters, or invisible
#' characters (spaces or tabs) after the end of some lines. In these cases,
#' try to read the data from file using read.table or read.csv, and then use
#' \code{GenoConvert} on this dataframe or matrix, see example.
#'
#' @author Jisca Huisman, \email{jisca.huisman@gmail.com}
#'
#' @seealso \code{\link{CheckGeno}, \link{SnpStats}, \link{LHConvert}}.
#'
#' @examples
#' \dontrun{
#' # Requires PLINK installed & in system PATH:
#'
#' # tinker with window size, window overlap and VIF to get a set of
#' # 400 - 800 markers (100-200 enough for just parentage):
#' system("cmd", input = "plink --file mydata --indep 50 5 2")
#' system("cmd", input = "plink --file mydata --extract plink.prune.in
#' --recodeA --out PlinkOUT")
#'
#' GenoM <- GenoConvert(InFile = "PlinkOUT.raw")
#'
#' # save time on file conversion next time:
#' write.table(GenoM, file="Geno_sequoia.txt", quote=FALSE, col.names=FALSE)
#' GenoM <- as.matrix(read.table("Geno_sequoia.txt", row.names=1, header=FALSE))
#'
#' # drop some SNPs, e.g. after a warning of >2 alleles:
#' dropSNP <- c(5,68,101,128)
#' GenoM <- GenoConvert(ColonyFile, InFormat = "col",
#' dropcol = 1 + c(2*dropSNP-1, 2*dropSNP) )
#'
#' # circumvent a 'rows have unequal length' error:
#' GenoTmp <- as.matrix(read.table("mydata.txt", header=TRUE, row.names=1))
#' GenoM <- GenoConvert(InData=GenoTmp, InFormat="single", IDcol=0)
#' }
#'
#' @importFrom stats na.exclude
#'
#' @export
GenoConvert <- function(InData = NULL,
InFile = NULL,
InFormat = "raw",
OutFile = NA,
OutFormat = "seq",
Missing = c("-9", "??", "?", "NA", "NULL",
c("0")[InFormat %in% c("col","ped")]),
sep = c(" ", "\t", ",", ";"),
header = NA,
IDcol = NA, #
FIDcol = NA,
FIDsep = "__",
dropcol = NA,
quiet = FALSE) {
if (is.null(InFile) & is.null(InData)) {
stop("please provide 'InFile' or 'InData'")
} else if (!is.null(InFile) & !is.null(InData)) {
stop("please provide either 'InFile' or 'InData', not both")
}
if (length(InData)==1 & inherits(InData, "character")) {
InFile <- InData
InData <- NULL
} else if (is.matrix(InFile) | is.data.frame(InFile)) {
InData <- InFile
InFile <- NULL
}
if (!is.null(InFile)) {
if (is.character(InFile)) {
if (!file.exists(InFile)) stop("cannot find 'InFile'")
} else {
stop("'InFile' in unknown format, should be character string.")
}
if (is.na(header)) {
header <- ifelse(InFormat == "raw", TRUE, FALSE)
}
}
if (!InFormat %in% c("raw", "seq", "col", "ped", "single", "double")) {
stop("invalid InFormat")
}
if (OutFormat %in% c("raw", "ped", "single")) {
stop("OutFormat not (yet) implemented")
} else if (!OutFormat %in% c("seq", "col")) {
stop("invalid OutFormat")
}
if (!is.na(FIDcol) & FIDsep %in% c("", " ", "\t", "\n")) stop("sep can not be whitespace")
UseFID <- ifelse(!is.na(FIDcol), TRUE, FALSE)
if (is.na(IDcol)) {
IDcol <- ifelse(InFormat %in% c("raw", "ped"),
2,
ifelse(!is.null(InFile),
1,
ifelse("ID" %in% colnames(InData),
which(colnames(InData) == "ID"),
0))) # rownames
}
if (InFormat %in% c("raw", "ped")) {
if(is.na(FIDcol)) FIDcol <- 1
if(is.na(dropcol)) dropcol <- c(3:6)
}
if (OutFormat == "seq" & is.null(OutFile)) {
OutFile <- "GenoForSequoia.txt"
} else if (is.null(OutFile)) {
stop("please provide 'OutFile'")
}
if (interactive() & !quiet & !is.na(OutFile)) {
if (file.exists(OutFile)) {
ANS <- readline(prompt = paste("WARNING: ", OutFile, " will be overwritten.",
"Press <N> to abort, or any other key to continue."))
} else {
ANS <- readline(prompt = paste("Genotypes will be written to ", OutFile,
" . Press <N> to abort, or any other key to continue."))
}
if (substr(ANS, 1, 1) %in% c("N", "n")) stop()
}
#~~~~~~~~~~~~~~~~~~~~~~~~
if (!is.null(InData)) {
GenoTmp <- as.matrix(InData)
rm(InData)
} else if (!is.null(InFile)) {
GenoTmp <- readLines(InFile, warn=FALSE)
if (header) GenoTmp <- GenoTmp[-1]
TmpL <- strsplit(GenoTmp, split = sep[1])
if (length(TmpL[[1]])==1) {
for (s in sep[-1]) {
TmpL <- strsplit(GenoTmp, split = s)
if (length(TmpL[[1]]) > 1) break
}
}
if (length(TmpL[[1]])==1) {
stop("unknown column separator, expecting ' ' (space), '\t' (tab), ',' or ';'")
}
if (length(table(sapply(TmpL, length))) > 1) {
stop("rows have unequal length")
}
GenoTmp <- plyr::ldply(TmpL)
rm(TmpL)
}
if (nrow(GenoTmp)<2) stop("Genotype matrix must have at least 2 individuals")
if (ncol(GenoTmp)<2) stop("Genotype matrix must have at least 2 SNPs")
#~~~~~~~~~
if (IDcol==0) {
IDs_geno <- rownames(GenoTmp)
} else {
IDs_geno <- trimws( GenoTmp[, IDcol] )
}
if (!is.na(FIDcol)) FID <- GenoTmp[, FIDcol]
dropcol <- na.exclude(c(FIDcol, IDcol, dropcol))
if (any(dropcol!=0)) GenoTmp <- GenoTmp[, -dropcol]
GenoTmp <- as.matrix(GenoTmp)
if (UseFID) {
IDs_geno <- paste(FID, IDs_geno, sep=FIDsep)
}
if (any(duplicated(IDs_geno))) {
stop("'GenoM' has duplicate IDs in ", ifelse(IDcol==0, 'rownames.', paste0('column ', IDcol, '.')),
"Please exclude or rename these samples, or specify IDcol or UseFID.")
}
for (misX in Missing) {
if (any(GenoTmp == misX, na.rm=TRUE)) {
GenoTmp <- array(gsub(misX, NA, GenoTmp, fixed=TRUE),
dim=dim(GenoTmp))
}
}
if (InFormat %in% c("raw", "seq")) {
if (any(!GenoTmp %in% c(0,1,2, NA))) {
stop(paste("Unexpected value! When InFormat=", InFormat, ", genotypes should be coded as 0/1/2.\n",
"Choose InFormat='single' and/or different Missing, or check data."))
}
} else if (InFormat =="single") {
if(!all(nchar(GenoTmp)==2, na.rm=T)) {
stop(paste("Unexpected value! When InFormat='single', genotypes should be coded as 2 digits or characters.\n",
"Choose InFormat='col' for 2-columns-per-SNP format, or check data."))
}
}
if (InFormat %in% c("col", "ped", "single", "double")) { # A/C/T/G -> 0/1/2
if (InFormat %in% c("ped", "col", "double")) {
GCA <- array(dim=c(2, nrow(GenoTmp), ncol(GenoTmp)/2))
GCA[1,,] <- GenoTmp[, seq(1,ncol(GenoTmp)-1,2)]
GCA[2,,] <- GenoTmp[, seq(2,ncol(GenoTmp),2)]
} else if (InFormat %in% c("single")){
GCA <- array(dim=c(2, nrow(GenoTmp), ncol(GenoTmp)))
GCA[1,,] <- substr(GenoTmp,1,1)
GCA[2,,] <- substr(GenoTmp,2,2)
}
UniqueAlleles <- sort(na.exclude(unique(c(GCA))))
Alleles <- apply(GCA, 3, function(x) table(factor(x, levels = UniqueAlleles)))
if (is.matrix(Alleles)) {
NumAlleles <- apply(Alleles, 2, function(x) sum(x>0))
minorAllele <- apply(Alleles, 2, function(x) names(sort(x[x>0]))[1])
} else { # list
NumAlleles <- sapply(Alleles, length)
minorAllele <- sapply(Alleles, function(x) names(sort(x))[1])
}
if (any(NumAlleles > 2)) {
n.problem <- sum(NumAlleles > 2)
stop(paste("There are", n.problem, "SNPs with >2 alleles ",
ifelse(n.problem<=10, paste(which(sapply(Alleles, length) > 2), collapse="-"),"")))
}
if (any(NumAlleles ==1) & !quiet & OutFormat!='seq') {
warning(paste("There are", sum((NumAlleles ==1)), "monomorphic SNPs"))
}
GenoTmp2 <- sapply(1:dim(GCA)[3], function(i) {
apply(GCA[,,i], 2, function(x) ifelse(is.na(x[1]) | is.na(x[2]), NA,
ifelse(x[1] != x[2], 1, # heterozygote
ifelse(x[1] == minorAllele[i], 2, 0))) ) } )
} else {
AllHom0 <- apply(GenoTmp, 2, function(x) all(na.exclude(x) == 0))
AllHom2 <- apply(GenoTmp, 2, function(x) all(na.exclude(x) == 2))
if ((any(AllHom0) | any(AllHom2)) & !quiet & OutFormat!='seq') {
warning(paste("There are", sum(AllHom0)+sum(AllHom2), "monomorphic SNPs"))
}
GenoTmp2 <- matrix(as.numeric(GenoTmp), nrow(GenoTmp))
}
rownames(GenoTmp2) <- IDs_geno
rm(GenoTmp)
if (OutFormat == "seq") {
GenoOUT <- GenoTmp2
GenoOUT[is.na(GenoOUT)] <- -9
CheckGeno(GenoOUT, quiet=quiet, Return = "excl") # returns invisibly
} else if (OutFormat == "col") {
dc <- list("0" = c(1,1), "1" = c(1,2), "2" = c(2,2), "-9" = c(0,0))
GenoTmp2[is.na(GenoTmp2)] <- -9
GenoA <- array(dim=c(nrow(GenoTmp2), 2, ncol(GenoTmp2)))
for (i in 1:nrow(GenoTmp2)) {
GenoA[i,,] <- sapply(GenoTmp2[i,], function(z) dc[[z]])
}
GenoOUT <- matrix(GenoA, nrow(GenoTmp2))
row.names(GenoOUT) <- IDs_geno
# } else {
# stop("OutFormat not implemented") # caught above
}
if (!is.na(OutFile)) {
utils::write.table(GenoOUT, file = OutFile,
row.names = TRUE, col.names = FALSE, quote = FALSE)
} else {
return(GenoOUT)
}
}
#######################################################################
#######################################################################
#' @title Extract Sex and Birth Year from PLINK File
#'
#' @description Convert the first six columns of a PLINK .fam, .ped or
#' .raw file into a three-column lifehistory file for sequoia. Optionally
#' FID and IID are combined.
#'
#' @details The first 6 columns of PLINK .fam, .ped and .raw files are by
#' default FID - IID - father ID (ignored) - mother ID (ignored) - sex -
#' phenotype.
#'
#' @param PlinkFile character string with name of genotype file to be converted.
#' @param UseFID use the family ID column. The resulting ids (rownames of GenoM)
#' will be in the form FID__IID.
#' @param SwapSex change the coding from PLINK default (1=male, 2=female) to
#' sequoia default (1=female, 2=male); any other numbers are set to NA.
#' @param FIDsep characters inbetween FID and IID in composite-ID. By default a
#' double underscore is used, to avoid problems when some IIDs contain an
#' underscore. Only used when UseFID=TRUE.
#' @param LifeHistData dataframe with additional sex and birth year info. In
#' case of conflicts, LifeHistData takes priority, with a warning. If
#' UseFID=TRUE, IDs in LifeHistData are assumed to be already as FID__IID.
#'
#' @return A dataframe with id, sex and birth year, which can be used as input
#' for \code{\link{sequoia}}.
#'
#' @seealso \code{\link{GenoConvert}}, \code{\link{PedStripFID}} to reverse
#' \code{UseFID}.
#'
#' @examples
#' \dontrun{
#' # combine FID and IID in dataframe with additional sex & birth years
#' ExtraLH$FID_IID <- paste(ExtraLH$FID, ExtraLH$IID, sep = "__")
#' LH.new <- LHConvert(PlinkFile, UseFID = TRUE, FIDsep = "__",
#' LifeHistData = ExtraLH)
#' }
#'
#' @export
LHConvert <- function(PlinkFile = NULL, UseFID = FALSE,
SwapSex = TRUE, FIDsep="__", LifeHistData=NULL)
{
if (is.null(PlinkFile)) stop("please provide 'InFile'")
if (!file.exists(PlinkFile)) stop("cannot find 'PlinkFile'")
if (UseFID & FIDsep %in% c("", " ", "\t", "\n")) stop("sep can not be whitespace")
if (!is.null(LifeHistData)) {
LHIN <- CheckLH(LifeHistData, sorted=FALSE)
}
ncol <- length(scan(PlinkFile, nlines=1, what="real", quiet=TRUE))
TMP <- scan(PlinkFile, skip=1, what=as.list(c(rep("character", 2), rep("numeric", 4),
rep("NULL", ncol-6))), quiet=TRUE)
LH <- data.frame(id = TMP[[2]],
Sex = TMP[[5]],
BirthYear = TMP[[6]],
stringsAsFactors=FALSE)
if (SwapSex) {
LH$Sex <- ifelse(LH$Sex==1, 2,
ifelse(LH$Sex==2, 1,
NA))
}
if (UseFID) {
IDX <- data.frame(id.old = TMP[[2]],
id.new = paste(TMP[[1]], TMP[[2]], sep=FIDsep),
stringsAsFactors=FALSE)
LH <- merge(LH, IDX, by.x="id", by.y="id.old", all.x=TRUE)
LH$id <- ifelse(!is.na(LH$id.new), LH$id.new, LH$id)
LH <- LH[, c("id", "Sex", "BirthYear")]
}
if (!is.null(LHIN)) {
names(LHIN) <- c("id", "Sex", "BirthYear")
LH$Sex[!LH$Sex %in% c(1,2)] <- NA
LHIN$Sex[!LHIN$Sex %in% c(1,2)] <- NA
LH$BirthYear[LH$BirthYear < 0] <- NA
LHIN$BirthYear[LHIN$BirthYear < 0] <- NA
chk <- merge(LH, LHIN, by="id")
n.sexmismatch <- sum(chk$Sex.x != chk$Sex.y, na.rm=T)
n.BYmismatch <- sum(chk$BirthYear.x != chk$BirthYear.y, na.rm=T)
if (n.sexmismatch > 0 & n.sexmismatch <= 10) {
these <- with(chk, id[which(!is.na(Sex.x) & !is.na(Sex.y) & Sex.x!=Sex.y)])
warning(paste("There are", n.sexmismatch, "sex mismatches: ",
paste(these, collapse=", ")))
} else if (n.sexmismatch>10) {
warning(paste("There are", n.sexmismatch, "sex mismatches"))
}
if (n.BYmismatch > 0 & n.BYmismatch <= 10) {
these <- with(chk, id[which(!is.na(BirthYear.x) & !is.na(BirthYear.y) & BirthYear.x!=BirthYear.y)])
warning(paste("There are", n.BYmismatch, "birth year mismatches: ",
paste(these, collapse=", ")))
} else if (n.BYmismatch>10) {
warning(paste("There are", n.BYmismatch, "BY mismatches"))
}
LH <- MergeFill(LH, LHIN, by="id", overwrite=TRUE, all=TRUE)
}
LH
}
|
library(stringi)
library(tm)
library(gofastr)
library(ggplot2)
library(ggrepel)
library(reshape2)
library(RColorBrewer)
library(qdap)
library(qdapTools)
library(RNewsflow)
load(file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/final_data.Rdata")
# -------------- first dataframe where all RNs are included and phrases are not kept together
# create new column without _ separating each entry
test$MH_clean <- stri_replace_all_regex(test$MH, pattern = '_ ', " ")
# new column of cleaned out RN
test$all_RN <- ""
for(i in 1:nrow(test))
{
test[i, "all_RN"] <- gsub("[\\(\\)]", "", regmatches(test[i, "RN"], gregexpr("\\(.*?\\)", test[i, "RN"])))
if (test[i, "all_RN"] == "character0")
{
test[i, "all_RN"] <- test[i, "RN"]
}
}
# idea 2 for general and named RNs
test$general_RN <- ""
test$named_RN <- ""
test$RN_temp <- gsub("^[0] "," general_RN",test$RN) # replace leading 0s w/ general_RN
test$RN_temp <- gsub(" [0] "," general_RN",test$RN_temp) # replace other " 0 "
test$RN_temp <- gsub(" \\("," named_RN(",test$RN_temp) # replace rest w/ named_RN
test$named_RN <- regmatches(test$RN_temp,gregexpr("(?<=named_RN\\().*?(?=\\))", test$RN_temp, perl=TRUE))
test$general_RN <- regmatches(test$RN_temp,gregexpr("(?<=general_RN\\().*?(?=\\))", test$RN_temp, perl=TRUE))
test$RN_temp <- NULL
test$named_RN <- unlist(lapply(test$named_RN, function(x) ifelse(is.null(x), NA, paste0(x, collapse = ", "))))
test$general_RN <- unlist(lapply(test$general_RN, function(x) ifelse(is.null(x), NA, paste0(x, collapse = ", "))))
# create a new column without the c" and " list structure
test$all_RN_clean <- stri_replace_all_regex(test$all_RN, pattern = '^c', "")
test$all_RN_clean <- stri_replace_all_regex(test$all_RN, pattern = '"', "")
test$all_RN_clean <- gsub("^c", "", test$all_RN_clean)
# save dataframe
save(test, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/final_data.Rdata")
# work on subset
set.seed(1)
test_sub <- test[sample(nrow(test), 10000), ]
# create columns for female and male
test_sub$female <- ""
test_sub$male <- ""
# columns display whether Male or Female is present in MH terms
for (i in 1:nrow(test_sub))
{
test_sub$male[i] <- sum(grepl("\\bMale\\b", test_sub$MH_clean[i]))
test_sub$female[i] <- sum(grepl("\\bFemale\\b", test_sub$MH_clean[i]))
if (test_sub$male[i] > 0)
{
test_sub[i, "male"] <- "male"
}
else
{
test_sub[i, "male"] <- ""
}
if (test_sub$female[i] > 0)
{
test_sub[i, "female"] <- "female"
}
else
{
test_sub[i, "female"] <- ""
}
}
# repeat gender frequency for RN
test_sub$RN_demo <- paste(test_sub$all_RN_clean, test_sub$female, sep = ", ")
test_sub$RN_demo <- paste(test_sub$RN_demo, test_sub$male, sep = ", ")
# -------------- second dataframe (only named RNs where phrases are kept together)
# remove all general RN terms
test2 <- test[!(test$named_RN == ""), ]
# keep words together with ~ and separate entries with spaces
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = ", ", replacement = "?")
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = " ", replacement = "~")
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = "\\?", replacement = " ")
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = ",", replacement = "~")
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = "-", replacement = "~")
set.seed(1)
test2 <- test2[sample(nrow(test2), 10000), ]
# create columns for female and male
test2$female <- ""
test2$male <- ""
# columns display whether Male or Female is present in MH terms
for (i in 1:nrow(test2))
{
test2$male[i] <- sum(grepl("\\bMale\\b", test2$MH_clean[i]))
test2$female[i] <- sum(grepl("\\bFemale\\b", test2$MH_clean[i]))
if (test2$male[i] > 0)
{
test2[i, "male"] <- "male"
}
else
{
test2[i, "male"] <- ""
}
if (test2$female[i] > 0)
{
test2[i, "female"] <- "female"
}
else
{
test2[i, "female"] <- ""
}
}
# repeat gender frequency for RN
test2$RN_demo <- paste(test2$named_RN, test2$female, sep = ", ")
test2$RN_demo <- paste(test2$RN_demo, test2$male, sep = ", ")
# create corpus with phrases kept together based off https://stackoverflow.com/questions/24038498/corpus-build-with-phrases
dat <- test2[ , 11]
colnames(dat) <- c("text")
# create 2 variables to combine into 1
dat$docs <- "doc"
dat$num <- ""
for (i in 1:nrow(dat))
{
dat$num[i] <- i
}
# combine both variables
dat$docs <- paste(dat$docs, dat$num, sep = "")
dat <- dat[ , -c(3)]
x <- sub_holder(", ", dat$text)
RN_parsed <- apply_as_tm(t(wfm(x$unhold(gsub(" ", "~~", x$output)), dat$docs)),
weightTfIdf, to.qdap=FALSE)
save(RN_parsed, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/RN_parsed_tdm.Rdata")
# save dataframes
save(test_sub, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/final_data_sub.Rdata")
save(test2, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/final_data_sub_RN.Rdata")
| /Final Data Clean RN Non-Parsed/final_dataset_clean.R | no_license | Key2-Success/HeartBD2K | R | false | false | 5,279 | r | library(stringi)
library(tm)
library(gofastr)
library(ggplot2)
library(ggrepel)
library(reshape2)
library(RColorBrewer)
library(qdap)
library(qdapTools)
library(RNewsflow)
load(file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/final_data.Rdata")
# -------------- first dataframe where all RNs are included and phrases are not kept together
# create new column without _ separating each entry
test$MH_clean <- stri_replace_all_regex(test$MH, pattern = '_ ', " ")
# new column of cleaned out RN
test$all_RN <- ""
for(i in 1:nrow(test))
{
test[i, "all_RN"] <- gsub("[\\(\\)]", "", regmatches(test[i, "RN"], gregexpr("\\(.*?\\)", test[i, "RN"])))
if (test[i, "all_RN"] == "character0")
{
test[i, "all_RN"] <- test[i, "RN"]
}
}
# idea 2 for general and named RNs
test$general_RN <- ""
test$named_RN <- ""
test$RN_temp <- gsub("^[0] "," general_RN",test$RN) # replace leading 0s w/ general_RN
test$RN_temp <- gsub(" [0] "," general_RN",test$RN_temp) # replace other " 0 "
test$RN_temp <- gsub(" \\("," named_RN(",test$RN_temp) # replace rest w/ named_RN
test$named_RN <- regmatches(test$RN_temp,gregexpr("(?<=named_RN\\().*?(?=\\))", test$RN_temp, perl=TRUE))
test$general_RN <- regmatches(test$RN_temp,gregexpr("(?<=general_RN\\().*?(?=\\))", test$RN_temp, perl=TRUE))
test$RN_temp <- NULL
test$named_RN <- unlist(lapply(test$named_RN, function(x) ifelse(is.null(x), NA, paste0(x, collapse = ", "))))
test$general_RN <- unlist(lapply(test$general_RN, function(x) ifelse(is.null(x), NA, paste0(x, collapse = ", "))))
# create a new column without the c" and " list structure
test$all_RN_clean <- stri_replace_all_regex(test$all_RN, pattern = '^c', "")
test$all_RN_clean <- stri_replace_all_regex(test$all_RN, pattern = '"', "")
test$all_RN_clean <- gsub("^c", "", test$all_RN_clean)
# save dataframe
save(test, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/final_data.Rdata")
# work on subset
set.seed(1)
test_sub <- test[sample(nrow(test), 10000), ]
# create columns for female and male
test_sub$female <- ""
test_sub$male <- ""
# columns display whether Male or Female is present in MH terms
for (i in 1:nrow(test_sub))
{
test_sub$male[i] <- sum(grepl("\\bMale\\b", test_sub$MH_clean[i]))
test_sub$female[i] <- sum(grepl("\\bFemale\\b", test_sub$MH_clean[i]))
if (test_sub$male[i] > 0)
{
test_sub[i, "male"] <- "male"
}
else
{
test_sub[i, "male"] <- ""
}
if (test_sub$female[i] > 0)
{
test_sub[i, "female"] <- "female"
}
else
{
test_sub[i, "female"] <- ""
}
}
# repeat gender frequency for RN
test_sub$RN_demo <- paste(test_sub$all_RN_clean, test_sub$female, sep = ", ")
test_sub$RN_demo <- paste(test_sub$RN_demo, test_sub$male, sep = ", ")
# -------------- second dataframe (only named RNs where phrases are kept together)
# remove all general RN terms
test2 <- test[!(test$named_RN == ""), ]
# keep words together with ~ and separate entries with spaces
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = ", ", replacement = "?")
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = " ", replacement = "~")
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = "\\?", replacement = " ")
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = ",", replacement = "~")
test2$named_RN <- stri_replace_all_regex(test2$named_RN, pattern = "-", replacement = "~")
set.seed(1)
test2 <- test2[sample(nrow(test2), 10000), ]
# create columns for female and male
test2$female <- ""
test2$male <- ""
# columns display whether Male or Female is present in MH terms
for (i in 1:nrow(test2))
{
test2$male[i] <- sum(grepl("\\bMale\\b", test2$MH_clean[i]))
test2$female[i] <- sum(grepl("\\bFemale\\b", test2$MH_clean[i]))
if (test2$male[i] > 0)
{
test2[i, "male"] <- "male"
}
else
{
test2[i, "male"] <- ""
}
if (test2$female[i] > 0)
{
test2[i, "female"] <- "female"
}
else
{
test2[i, "female"] <- ""
}
}
# repeat gender frequency for RN
test2$RN_demo <- paste(test2$named_RN, test2$female, sep = ", ")
test2$RN_demo <- paste(test2$RN_demo, test2$male, sep = ", ")
# create corpus with phrases kept together based off https://stackoverflow.com/questions/24038498/corpus-build-with-phrases
dat <- test2[ , 11]
colnames(dat) <- c("text")
# create 2 variables to combine into 1
dat$docs <- "doc"
dat$num <- ""
for (i in 1:nrow(dat))
{
dat$num[i] <- i
}
# combine both variables
dat$docs <- paste(dat$docs, dat$num, sep = "")
dat <- dat[ , -c(3)]
x <- sub_holder(", ", dat$text)
RN_parsed <- apply_as_tm(t(wfm(x$unhold(gsub(" ", "~~", x$output)), dat$docs)),
weightTfIdf, to.qdap=FALSE)
save(RN_parsed, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/RN_parsed_tdm.Rdata")
# save dataframes
save(test_sub, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/final_data_sub.Rdata")
save(test2, file = "Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/final_data_sub_RN.Rdata")
|
require(graphics)
hdata <- read.csv("catalogCrossSell.csv", stringsAsFactors=FALSE)
hdata2 <-subset(hdata, select=-c(CID,Health))
head(hdata2)
h_pca2 = prcomp(hdata2)
barplot(sapply(hdata2, var), horiz=T, las=1, cex.names=0.8)
summary(h_pca2)
biplot(h_pca2 , scale = TRUE , col = "black")
plot(h_pca2 , type = "l")
h_pca2
comp <- data.frame(h_pca2$x[,c(1:4)])
plot(comp, pch=16, col="blue")
library(rgl)
# Multi 3D plot
plot3d(comp$PC1, comp$PC2, comp$PC3)
plot3d(comp$PC1, comp$PC2, comp$PC4)
# Determine number of clusters
wss <- (nrow(comp)-1)*sum(apply(comp,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(comp,centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares")
# From scree plot elbow occurs at k = 3~6
# Apply k-means with k=3~6
k <- kmeans(comp, 4, nstart=25, iter.max=1000)
library(RColorBrewer)
library(scales)
palette(alpha(brewer.pal(9,'Set1'), 0.5))
plot(comp, col=k$clust, pch=16)
# 3D plot
plot3d(comp$PC1, comp$PC2, comp$PC3, col=k$clust)
plot3d(comp$PC1, comp$PC3, comp$PC4, col=k$clust)
# Cluster sizes
sort(table(k$clust))
clust <- names(sort(table(k$clust)))
clust
# First cluster
c1 <- row.names(comp[k$clust==clust[1],])
# Second Cluster
c2 <- row.names(comp[k$clust==clust[2],])
# Third Cluster
c3 <- row.names(comp[k$clust==clust[3],])
# Fourth Cluster
c4 <- row.names(comp[k$clust==clust[4],])
a <- apply(hdata2[c1,] , 2 , sum)
b <- apply(hdata2[c2,] , 2 , sum)
c <- apply(hdata2[c3,] , 2 , sum)
d <- apply(hdata2[c4,] , 2 , sum)
a;b;c;d
| /r_archive/machineLearning/rcode/code21.R | no_license | DavidHan008/lockdpwn | R | false | false | 1,602 | r | require(graphics)
hdata <- read.csv("catalogCrossSell.csv", stringsAsFactors=FALSE)
hdata2 <-subset(hdata, select=-c(CID,Health))
head(hdata2)
h_pca2 = prcomp(hdata2)
barplot(sapply(hdata2, var), horiz=T, las=1, cex.names=0.8)
summary(h_pca2)
biplot(h_pca2 , scale = TRUE , col = "black")
plot(h_pca2 , type = "l")
h_pca2
comp <- data.frame(h_pca2$x[,c(1:4)])
plot(comp, pch=16, col="blue")
library(rgl)
# Multi 3D plot
plot3d(comp$PC1, comp$PC2, comp$PC3)
plot3d(comp$PC1, comp$PC2, comp$PC4)
# Determine number of clusters
wss <- (nrow(comp)-1)*sum(apply(comp,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(comp,centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares")
# From scree plot elbow occurs at k = 3~6
# Apply k-means with k=3~6
k <- kmeans(comp, 4, nstart=25, iter.max=1000)
library(RColorBrewer)
library(scales)
palette(alpha(brewer.pal(9,'Set1'), 0.5))
plot(comp, col=k$clust, pch=16)
# 3D plot
plot3d(comp$PC1, comp$PC2, comp$PC3, col=k$clust)
plot3d(comp$PC1, comp$PC3, comp$PC4, col=k$clust)
# Cluster sizes
sort(table(k$clust))
clust <- names(sort(table(k$clust)))
clust
# First cluster
c1 <- row.names(comp[k$clust==clust[1],])
# Second Cluster
c2 <- row.names(comp[k$clust==clust[2],])
# Third Cluster
c3 <- row.names(comp[k$clust==clust[3],])
# Fourth Cluster
c4 <- row.names(comp[k$clust==clust[4],])
a <- apply(hdata2[c1,] , 2 , sum)
b <- apply(hdata2[c2,] , 2 , sum)
c <- apply(hdata2[c3,] , 2 , sum)
d <- apply(hdata2[c4,] , 2 , sum)
a;b;c;d
|
library(DataComputing)
source("util.R")
#create scatter plots of white voter percentage vs proportion of votes
trump_winloss <- trump_nums
trump_names <- names(trump_winloss)
trump_names[7] = "Young.Percentage"
trump_names[10] = "White.Percentage"
trump_names[22] = "College.Percentage"
trump_names[30] = "Persons.Per.Household"
names(trump_winloss) <- trump_names
trump_winloss$won <- as.logical(trump$votes >cruz$votes)
trump_winloss$fraction_votes <- trump$fraction_votes
Trump.White.Profile <- ggplot(trump_winloss, aes(x=White.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Trump Voter Profile",
x = "Percent White",
y = "Proportion of Votes")
cruz_winloss <- cruz_nums
cruz_names <- names(cruz_winloss)
cruz_names[7] = "Young.Percentage"
cruz_names[10] = "White.Percentage"
cruz_names[22] = "College.Percentage"
cruz_names[30] = "Persons.Per.Household"
names(cruz_winloss) <- cruz_names
cruz_winloss$won <- as.logical(cruz$votes >trump$votes)
cruz_winloss$fraction_votes <- cruz$fraction_votes
Cruz.White.Profile <- ggplot(cruz_winloss, aes(x=White.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Cruz Voter Profile",
x = "Percent White",
y = "Proportion of Votes")
sanders_winloss <- sanders_nums
sanders_names <- names(sanders_winloss)
sanders_names[7] = "Young.Percentage"
sanders_names[10] = "White.Percentage"
sanders_names[22] = "College.Percentage"
sanders_names[30] = "Persons.Per.Household"
names(sanders_winloss) <- sanders_names
sanders_winloss$won <- as.logical(sanders$votes > clinton$votes)
sanders_winloss$fraction_votes <- sanders$fraction_votes
Sanders.White.Profile <- ggplot(sanders_winloss, aes(x=White.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Sanders Voter Profile",
x = "Percent White",
y = "Proportion of Votes")
clinton_winloss <- clinton_nums
clinton_names <- names(clinton_winloss)
clinton_names[7] = "Young.Percentage"
clinton_names[10] = "White.Percentage"
clinton_names[22] = "College.Percentage"
clinton_names[30] = "Persons.Per.Household"
names(clinton_winloss) <- clinton_names
clinton_winloss$won <- as.logical(clinton$votes > sanders$votes)
clinton_winloss$fraction_votes <- clinton$fraction_votes
Clinton.White.Profile <- ggplot(clinton_winloss, aes(x=White.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Clinton Voter Profile",
x = "Percent White",
y = "Proportion of Votes")
Trump.White.Profile
Cruz.White.Profile
Clinton.White.Profile
Sanders.White.Profile
# histogram of people per household, facetted by win/loss
Trump.Num.People.Profile <- ggplot(trump_winloss, aes(x = Persons.Per.Household, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Trump - Voter Household Profile",
x = "Number of People per Household",
y = "Count")
Cruz.Num.People.Profile <- ggplot(cruz_winloss, aes(x = Persons.Per.Household, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Cruz - Voter Household Profile",
x = "Number of People per Household",
y = "Count")
Sanders.Num.People.Profile <- ggplot(sanders_winloss, aes(x = Persons.Per.Household,y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Sanders - Voter Household Profile",
x = "Number of People per Household",
y = "Count")
Clinton.Num.People.Profile <- ggplot(clinton_winloss, aes(x = Persons.Per.Household,y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Clinton - Voter Household Profile",
x = "Number of People per Household",
y = "Count")
Trump.Num.People.Profile
Cruz.Num.People.Profile
Sanders.Num.People.Profile
Clinton.Num.People.Profile
# show initial county turnouts (who is winning)
Republican.Pie.Chart <- ggplot(trump_winloss, aes(x = factor(1), fill=won)) +
geom_bar(width=1) +
coord_polar(theta="y") +
scale_fill_discrete(name="Candidate",
breaks=c("FALSE", "TRUE"),
labels=c("Cruz", "Trump")) +
labs(title = "Republican Primary Race Results")
Democrat.Pie.Chart <- ggplot(clinton_winloss, aes(x = factor(1), fill=won)) +
geom_bar(width=1) +
coord_polar(theta="y") +
scale_fill_discrete(name="Candidate",
breaks=c("FALSE", "TRUE"),
labels=c("Sanders", "Clinton")) +
labs(title = "Democrat Primary Race Results")
Republican.Pie.Chart
Democrat.Pie.Chart
#Plot of age voter profile
Trump.Age.Profile <- ggplot(trump_winloss, aes(x=Young.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Trump Age Profile",
x = "Percent 18 and Under",
y = "Proportion of Votes")
Trump.Age.Profile
Cruz.Age.Profile <- ggplot(cruz_winloss, aes(x=Young.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Cruz Age Profile",
x = "Percent 18 and Under",
y = "Proportion of Votes")
Cruz.Age.Profile
Sanders.Age.Profile <- ggplot(sanders_winloss, aes(x=Young.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Sanders Age Profile",
x = "Percent 18 and Under",
y = "Proportion of Votes")
Sanders.Age.Profile
Clinton.Age.Profile <- ggplot(clinton_winloss, aes(x=Young.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Clinton Age Profile",
x = "Percent 18 and Under",
y = "Proportion of Votes")
Clinton.Age.Profile
| /visualizations.R | no_license | ketkar/election | R | false | false | 5,444 | r | library(DataComputing)
source("util.R")
#create scatter plots of white voter percentage vs proportion of votes
trump_winloss <- trump_nums
trump_names <- names(trump_winloss)
trump_names[7] = "Young.Percentage"
trump_names[10] = "White.Percentage"
trump_names[22] = "College.Percentage"
trump_names[30] = "Persons.Per.Household"
names(trump_winloss) <- trump_names
trump_winloss$won <- as.logical(trump$votes >cruz$votes)
trump_winloss$fraction_votes <- trump$fraction_votes
Trump.White.Profile <- ggplot(trump_winloss, aes(x=White.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Trump Voter Profile",
x = "Percent White",
y = "Proportion of Votes")
cruz_winloss <- cruz_nums
cruz_names <- names(cruz_winloss)
cruz_names[7] = "Young.Percentage"
cruz_names[10] = "White.Percentage"
cruz_names[22] = "College.Percentage"
cruz_names[30] = "Persons.Per.Household"
names(cruz_winloss) <- cruz_names
cruz_winloss$won <- as.logical(cruz$votes >trump$votes)
cruz_winloss$fraction_votes <- cruz$fraction_votes
Cruz.White.Profile <- ggplot(cruz_winloss, aes(x=White.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Cruz Voter Profile",
x = "Percent White",
y = "Proportion of Votes")
sanders_winloss <- sanders_nums
sanders_names <- names(sanders_winloss)
sanders_names[7] = "Young.Percentage"
sanders_names[10] = "White.Percentage"
sanders_names[22] = "College.Percentage"
sanders_names[30] = "Persons.Per.Household"
names(sanders_winloss) <- sanders_names
sanders_winloss$won <- as.logical(sanders$votes > clinton$votes)
sanders_winloss$fraction_votes <- sanders$fraction_votes
Sanders.White.Profile <- ggplot(sanders_winloss, aes(x=White.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Sanders Voter Profile",
x = "Percent White",
y = "Proportion of Votes")
clinton_winloss <- clinton_nums
clinton_names <- names(clinton_winloss)
clinton_names[7] = "Young.Percentage"
clinton_names[10] = "White.Percentage"
clinton_names[22] = "College.Percentage"
clinton_names[30] = "Persons.Per.Household"
names(clinton_winloss) <- clinton_names
clinton_winloss$won <- as.logical(clinton$votes > sanders$votes)
clinton_winloss$fraction_votes <- clinton$fraction_votes
Clinton.White.Profile <- ggplot(clinton_winloss, aes(x=White.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Clinton Voter Profile",
x = "Percent White",
y = "Proportion of Votes")
Trump.White.Profile
Cruz.White.Profile
Clinton.White.Profile
Sanders.White.Profile
# histogram of people per household, facetted by win/loss
Trump.Num.People.Profile <- ggplot(trump_winloss, aes(x = Persons.Per.Household, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Trump - Voter Household Profile",
x = "Number of People per Household",
y = "Count")
Cruz.Num.People.Profile <- ggplot(cruz_winloss, aes(x = Persons.Per.Household, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Cruz - Voter Household Profile",
x = "Number of People per Household",
y = "Count")
Sanders.Num.People.Profile <- ggplot(sanders_winloss, aes(x = Persons.Per.Household,y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Sanders - Voter Household Profile",
x = "Number of People per Household",
y = "Count")
Clinton.Num.People.Profile <- ggplot(clinton_winloss, aes(x = Persons.Per.Household,y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Clinton - Voter Household Profile",
x = "Number of People per Household",
y = "Count")
Trump.Num.People.Profile
Cruz.Num.People.Profile
Sanders.Num.People.Profile
Clinton.Num.People.Profile
# show initial county turnouts (who is winning)
Republican.Pie.Chart <- ggplot(trump_winloss, aes(x = factor(1), fill=won)) +
geom_bar(width=1) +
coord_polar(theta="y") +
scale_fill_discrete(name="Candidate",
breaks=c("FALSE", "TRUE"),
labels=c("Cruz", "Trump")) +
labs(title = "Republican Primary Race Results")
Democrat.Pie.Chart <- ggplot(clinton_winloss, aes(x = factor(1), fill=won)) +
geom_bar(width=1) +
coord_polar(theta="y") +
scale_fill_discrete(name="Candidate",
breaks=c("FALSE", "TRUE"),
labels=c("Sanders", "Clinton")) +
labs(title = "Democrat Primary Race Results")
Republican.Pie.Chart
Democrat.Pie.Chart
#Plot of age voter profile
Trump.Age.Profile <- ggplot(trump_winloss, aes(x=Young.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Trump Age Profile",
x = "Percent 18 and Under",
y = "Proportion of Votes")
Trump.Age.Profile
Cruz.Age.Profile <- ggplot(cruz_winloss, aes(x=Young.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Cruz Age Profile",
x = "Percent 18 and Under",
y = "Proportion of Votes")
Cruz.Age.Profile
Sanders.Age.Profile <- ggplot(sanders_winloss, aes(x=Young.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Sanders Age Profile",
x = "Percent 18 and Under",
y = "Proportion of Votes")
Sanders.Age.Profile
Clinton.Age.Profile <- ggplot(clinton_winloss, aes(x=Young.Percentage, y=fraction_votes, col=won)) +
geom_point() +
labs(title = "Clinton Age Profile",
x = "Percent 18 and Under",
y = "Proportion of Votes")
Clinton.Age.Profile
|
library(knitr)
library(DEGreport)
library(bcbioRNASeq)
library(tidyverse)
# Set seed for reproducibility
set.seed(1454944673L)
opts_chunk[["set"]](
autodep = TRUE,
bootstrap.show.code = FALSE,
cache = TRUE,
cache.lazy = TRUE,
dev = c("png", "pdf"),
error = TRUE,
fig.height = 10L,
fig.retina = 2L,
fig.width = 10L,
highlight = TRUE,
message = FALSE,
prompt = TRUE,
# formatR required for tidy code
tidy = TRUE,
warning = TRUE
)
theme_set(
theme_light(base_size = 14L)
)
theme_update(
legend.justification = "center",
legend.position = "bottom"
)
| /inst/rmarkdown/shared/_setup.R | permissive | GeneticResources/bcbioRNASeq | R | false | false | 621 | r | library(knitr)
library(DEGreport)
library(bcbioRNASeq)
library(tidyverse)
# Set seed for reproducibility
set.seed(1454944673L)
opts_chunk[["set"]](
autodep = TRUE,
bootstrap.show.code = FALSE,
cache = TRUE,
cache.lazy = TRUE,
dev = c("png", "pdf"),
error = TRUE,
fig.height = 10L,
fig.retina = 2L,
fig.width = 10L,
highlight = TRUE,
message = FALSE,
prompt = TRUE,
# formatR required for tidy code
tidy = TRUE,
warning = TRUE
)
theme_set(
theme_light(base_size = 14L)
)
theme_update(
legend.justification = "center",
legend.position = "bottom"
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FlexTablePublicAPI.R
\name{setRowsColors}
\alias{setRowsColors}
\title{applies background colors to rows of a FlexTable}
\usage{
setRowsColors(object, i, colors)
}
\arguments{
\item{object}{a \code{FlexTable} object}
\item{i}{vector (integer index, row.names values or boolean vector) for rows selection.}
\item{colors}{background colors to apply (e.g. "#000000" or "black")}
}
\description{
applies background colors to rows of a FlexTable
}
\examples{
\donttest{
if( check_valid_java_version() ){
MyFTable <- FlexTable( data = mtcars[1:10, ], add.rownames=TRUE )
MyFTable <- setRowsColors( MyFTable, i=1:4, colors = "red" )
}
}
}
\seealso{
\code{\link{FlexTable}}, \code{\link{setColumnsColors}}, \code{\link{setZebraStyle}}
}
| /man/setRowsColors.Rd | no_license | hhy5277/ReporteRs | R | false | true | 809 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FlexTablePublicAPI.R
\name{setRowsColors}
\alias{setRowsColors}
\title{applies background colors to rows of a FlexTable}
\usage{
setRowsColors(object, i, colors)
}
\arguments{
\item{object}{a \code{FlexTable} object}
\item{i}{vector (integer index, row.names values or boolean vector) for rows selection.}
\item{colors}{background colors to apply (e.g. "#000000" or "black")}
}
\description{
applies background colors to rows of a FlexTable
}
\examples{
\donttest{
if( check_valid_java_version() ){
MyFTable <- FlexTable( data = mtcars[1:10, ], add.rownames=TRUE )
MyFTable <- setRowsColors( MyFTable, i=1:4, colors = "red" )
}
}
}
\seealso{
\code{\link{FlexTable}}, \code{\link{setColumnsColors}}, \code{\link{setZebraStyle}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cov.sym.data.table.R
\name{cov}
\alias{cov}
\alias{cov.default}
\alias{cov.sym.data.table}
\title{Generic function for the covariance}
\usage{
cov(x, ...)
\method{cov}{default}(x, y = NULL, use = "everything",
method = c("pearson", "kendall", "spearman"), ...)
\method{cov}{sym.data.table}(x, y, method = c("centers", "interval",
"billard", "modal"), na.rm = FALSE, ...)
}
\arguments{
\item{x}{First symbolic variables.}
\item{...}{As in R cov function.}
\item{y}{Second symbolic variables.}
\item{use}{an optional character string giving a method for computing
covariances in the presence of missing values. This must be (an abbreviation of)
one of the strings 'everything', 'all.obs', 'complete.obs', 'na.or.complete',
or 'pairwise.complete.obs'.}
\item{method}{The method to be use.}
\item{na.rm}{As in R cov function.}
}
\value{
Return a real number.
}
\description{
This function compute the symbolic covariance.
}
\examples{
data(example3)
sym.data <- example3
cov(sym.data[,1], sym.data[,4], method='centers')
cov(sym.data[,2],sym.data[,6], method='centers')
cov(sym.data[,2],sym.data[,6], method='billard')
}
\references{
Billard L. and Diday E. (2006).
Symbolic data analysis: Conceptual statistics and data mining. Wiley, Chichester.
Rodriguez, O. (2000).
Classification et Modeles Lineaires en Analyse des Donnees Symboliques. Ph.D. Thesis,
Paris IX-Dauphine University.
}
\author{
Oldemar Rodriguez Rojas
}
\keyword{Covariance}
\keyword{Symbolic}
| /man/cov.Rd | no_license | Frenchyy1/RSDA | R | false | true | 1,553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cov.sym.data.table.R
\name{cov}
\alias{cov}
\alias{cov.default}
\alias{cov.sym.data.table}
\title{Generic function for the covariance}
\usage{
cov(x, ...)
\method{cov}{default}(x, y = NULL, use = "everything",
method = c("pearson", "kendall", "spearman"), ...)
\method{cov}{sym.data.table}(x, y, method = c("centers", "interval",
"billard", "modal"), na.rm = FALSE, ...)
}
\arguments{
\item{x}{First symbolic variables.}
\item{...}{As in R cov function.}
\item{y}{Second symbolic variables.}
\item{use}{an optional character string giving a method for computing
covariances in the presence of missing values. This must be (an abbreviation of)
one of the strings 'everything', 'all.obs', 'complete.obs', 'na.or.complete',
or 'pairwise.complete.obs'.}
\item{method}{The method to be use.}
\item{na.rm}{As in R cov function.}
}
\value{
Return a real number.
}
\description{
This function compute the symbolic covariance.
}
\examples{
data(example3)
sym.data <- example3
cov(sym.data[,1], sym.data[,4], method='centers')
cov(sym.data[,2],sym.data[,6], method='centers')
cov(sym.data[,2],sym.data[,6], method='billard')
}
\references{
Billard L. and Diday E. (2006).
Symbolic data analysis: Conceptual statistics and data mining. Wiley, Chichester.
Rodriguez, O. (2000).
Classification et Modeles Lineaires en Analyse des Donnees Symboliques. Ph.D. Thesis,
Paris IX-Dauphine University.
}
\author{
Oldemar Rodriguez Rojas
}
\keyword{Covariance}
\keyword{Symbolic}
|
## Paper figure 9 -- proportion of heroin deaths involving other opioids
library(tidyverse)
library(RColorBrewer)
library(binom)
source('./code/functions/helpers.R')
mkdir_p('./report/paa_2017_paper/plots')
d <- read.csv("./data/race_age_tc.csv")
## We need to reshape this later so it is easier to calculate the bounds in
## a different dataframe and then perform a left_join. We rename the values of
## `comb_401` to make it easier to match later.
prop <- d %>%
mutate(race_cat = categorize_race(race)) %>%
group_by(race, year, race_cat) %>%
summarise(sum_t401_404 = sum(t401_404),
sum_t401_402 = sum(t401_402),
sum_t401_403 = sum(t401_403),
sum_t404 = sum(t404),
sum_t402 = sum(t402),
sum_t403 = sum(t403),
ras_t404401_404 = sum(t401_404)/sum(t404),
ras_t402401_402 = sum(t401_402)/sum(t402),
ras_t403401_403 = sum(t401_403)/sum(t403),
bound_t404401 = 1.96 *
sqrt(1/sum(t404) * ras_t404401_404 * (1 - ras_t404401_404)),
bound_t402401 = 1.96 *
sqrt(1/sum(t402) * ras_t402401_402 * (1 - ras_t402401_402)),
bound_t403401 = 1.96 *
sqrt(1/sum(t403) * ras_t403401_403 * (1 - ras_t403401_403))) %>%
mutate(t401_404_lower = binom.confint(sum_t401_404,
sum_t404,
methods = "wilson")$lower,
t401_404_upper = binom.confint(sum_t401_404,
sum_t404,
methods = "wilson")$upper,
t401_402_lower = binom.confint(sum_t401_402,
sum_t402,
methods = "wilson")$lower,
t401_402_upper = binom.confint(sum_t401_402,
sum_t402,
methods = "wilson")$upper,
t401_403_lower = binom.confint(sum_t401_403,
sum_t403,
methods = "wilson")$lower,
t401_403_upper = binom.confint(sum_t401_403,
sum_t403,
methods = "wilson")$upper)
lowers <- prop %>%
select(race, race_cat, year, ends_with("lower")) %>%
gather(key = comb_401, value = lower,
t401_404_lower:t401_403_lower) %>%
mutate(comb_401 = ifelse(comb_401 == "t401_404_lower",
"ras_t404401_404",
ifelse(comb_401 == "t401_403_lower",
"ras_t403401_403",
"ras_t402401_402")))
uppers <- prop %>%
select(race, race_cat, year, ends_with("upper")) %>%
gather(key = comb_401, value = upper,
t401_404_upper:t401_403_upper) %>%
mutate(comb_401 = ifelse(comb_401 == "t401_404_upper",
"ras_t404401_404",
ifelse(comb_401 == "t401_403_upper",
"ras_t403401_403",
"ras_t402401_402")))
## Now do the normal calculations and left_join with our bounds from above.
reshaped_prop <- prop %>%
select(race, race_cat, year,
ras_t404401_404, ras_t402401_402, ras_t403401_403) %>%
gather(key = comb_401, value = propor,
ras_t404401_404:ras_t403401_403) %>%
left_join(lowers, by = c("race", "race_cat", "year", "comb_401")) %>%
left_join(uppers, by = c("race", "race_cat", "year", "comb_401")) %>%
mutate(comb_401 = factor(comb_401,
levels = c("ras_t402401_402",
"ras_t403401_403",
"ras_t404401_404"),
labels = c("Natural / semi-synthetic (T402)",
"Methadone (T403)",
"Other synthetic (T404)"),
ordered = TRUE))
## Plotting
p_ma_t401_combo <- ggplot(data = reshaped_prop,
aes(x = year, y = propor,
group = race, color = race_cat)) +
geom_line(size = 1, alpha = .9) +
geom_point(size = 1.25, alpha = .9) +
scale_color_brewer(NULL, palette = "Set1") +
scale_x_continuous(expand = c(.02, 0)) +
scale_y_continuous(expand = c(.01, 0)) +
mk_classic() +
mk_x90() +
theme(legend.background = element_rect(fill = "transparent", color = NA)) +
labs(y = "Proportion",
x = NULL,
title = "Proportion of heroin (T401) deaths involving other opioids")
# ggsave(p_ma_t401_combo + facet_wrap(~ comb_401, nrow = 1),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos.pdf",
# height = 4, width = 8, scale = 1)
# ggsave(p_ma_t401_combo + facet_wrap(~ comb_401, nrow = 1, scales = "free_y"),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_free.pdf",
# height = 4, width = 8, scale = 1)
## Add vline version
p_ma_t401_combo_v <- add_vline(p_ma_t401_combo)
ggsave(p_ma_t401_combo_v + facet_wrap(~ comb_401, nrow = 1),
filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_v.pdf",
height = 4, width = 8, scale = 1)
# ggsave(p_ma_t401_combo_v + facet_wrap(~ comb_401, nrow = 1, scales = "free_y"),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_free_v.pdf",
# height = 4, width = 8, scale = 1)
## CI
## Paper figure 9 -- proportion of heroin deaths involving other opioids
p_ma_t401_combo <- ggplot(data = reshaped_prop,
aes(x = year, y = propor,
group = race, color = race_cat)) +
geom_ribbon(aes(ymax = upper,
ymin = lower,
fill = race_cat,
group = race_cat),
color = NA, alpha = .25) +
geom_line(size = 1, alpha = .9) +
geom_point(size = 1.25, alpha = .9) +
scale_fill_brewer(NULL, palette = "Set1") +
scale_color_brewer(NULL, palette = "Set1") +
scale_x_continuous(expand = c(.02, 0)) +
scale_y_continuous(expand = c(.01, 0)) +
mk_classic() +
theme(legend.background = element_rect(fill = "transparent", color = NA)) +
labs(y = "Proportion",
x = NULL,
title = "Proportion of heroin (T401) deaths involving other opioids") + facet_wrap(~ comb_401, nrow = 1)
# ggsave(p_ma_t401_combo + facet_wrap(~ comb_401, nrow = 1),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_ci.pdf",
# height = 4, width = 8, scale = 1)
# ggsave(p_ma_t401_combo + facet_wrap(~ comb_401, nrow = 1, scales = "free_y"),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_free_ci.pdf",
# height = 4, width = 8, scale = 1)
## Add vline version
# p_ma_t401_combo_v <- add_vline(p_ma_t401_combo)
# ggsave(p_ma_t401_combo_v + facet_wrap(~ comb_401, nrow = 1),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_v_ci.pdf",
# height = 4, width = 8, scale = 1)
# ggsave(p_ma_t401_combo_v + facet_wrap(~ comb_401, nrow = 1, scales = "free_y"),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_free_v_ci.pdf",
# height = 4, width = 8, scale = 1)
| /report/paa_2017_paper/code/paper_figure_9.R | no_license | mkiang/opioid-mcd | R | false | false | 7,618 | r | ## Paper figure 9 -- proportion of heroin deaths involving other opioids
library(tidyverse)
library(RColorBrewer)
library(binom)
source('./code/functions/helpers.R')
mkdir_p('./report/paa_2017_paper/plots')
d <- read.csv("./data/race_age_tc.csv")
## We need to reshape this later so it is easier to calculate the bounds in
## a different dataframe and then perform a left_join. We rename the values of
## `comb_401` to make it easier to match later.
prop <- d %>%
mutate(race_cat = categorize_race(race)) %>%
group_by(race, year, race_cat) %>%
summarise(sum_t401_404 = sum(t401_404),
sum_t401_402 = sum(t401_402),
sum_t401_403 = sum(t401_403),
sum_t404 = sum(t404),
sum_t402 = sum(t402),
sum_t403 = sum(t403),
ras_t404401_404 = sum(t401_404)/sum(t404),
ras_t402401_402 = sum(t401_402)/sum(t402),
ras_t403401_403 = sum(t401_403)/sum(t403),
bound_t404401 = 1.96 *
sqrt(1/sum(t404) * ras_t404401_404 * (1 - ras_t404401_404)),
bound_t402401 = 1.96 *
sqrt(1/sum(t402) * ras_t402401_402 * (1 - ras_t402401_402)),
bound_t403401 = 1.96 *
sqrt(1/sum(t403) * ras_t403401_403 * (1 - ras_t403401_403))) %>%
mutate(t401_404_lower = binom.confint(sum_t401_404,
sum_t404,
methods = "wilson")$lower,
t401_404_upper = binom.confint(sum_t401_404,
sum_t404,
methods = "wilson")$upper,
t401_402_lower = binom.confint(sum_t401_402,
sum_t402,
methods = "wilson")$lower,
t401_402_upper = binom.confint(sum_t401_402,
sum_t402,
methods = "wilson")$upper,
t401_403_lower = binom.confint(sum_t401_403,
sum_t403,
methods = "wilson")$lower,
t401_403_upper = binom.confint(sum_t401_403,
sum_t403,
methods = "wilson")$upper)
lowers <- prop %>%
select(race, race_cat, year, ends_with("lower")) %>%
gather(key = comb_401, value = lower,
t401_404_lower:t401_403_lower) %>%
mutate(comb_401 = ifelse(comb_401 == "t401_404_lower",
"ras_t404401_404",
ifelse(comb_401 == "t401_403_lower",
"ras_t403401_403",
"ras_t402401_402")))
uppers <- prop %>%
select(race, race_cat, year, ends_with("upper")) %>%
gather(key = comb_401, value = upper,
t401_404_upper:t401_403_upper) %>%
mutate(comb_401 = ifelse(comb_401 == "t401_404_upper",
"ras_t404401_404",
ifelse(comb_401 == "t401_403_upper",
"ras_t403401_403",
"ras_t402401_402")))
## Now do the normal calculations and left_join with our bounds from above.
reshaped_prop <- prop %>%
select(race, race_cat, year,
ras_t404401_404, ras_t402401_402, ras_t403401_403) %>%
gather(key = comb_401, value = propor,
ras_t404401_404:ras_t403401_403) %>%
left_join(lowers, by = c("race", "race_cat", "year", "comb_401")) %>%
left_join(uppers, by = c("race", "race_cat", "year", "comb_401")) %>%
mutate(comb_401 = factor(comb_401,
levels = c("ras_t402401_402",
"ras_t403401_403",
"ras_t404401_404"),
labels = c("Natural / semi-synthetic (T402)",
"Methadone (T403)",
"Other synthetic (T404)"),
ordered = TRUE))
## Plotting
p_ma_t401_combo <- ggplot(data = reshaped_prop,
aes(x = year, y = propor,
group = race, color = race_cat)) +
geom_line(size = 1, alpha = .9) +
geom_point(size = 1.25, alpha = .9) +
scale_color_brewer(NULL, palette = "Set1") +
scale_x_continuous(expand = c(.02, 0)) +
scale_y_continuous(expand = c(.01, 0)) +
mk_classic() +
mk_x90() +
theme(legend.background = element_rect(fill = "transparent", color = NA)) +
labs(y = "Proportion",
x = NULL,
title = "Proportion of heroin (T401) deaths involving other opioids")
# ggsave(p_ma_t401_combo + facet_wrap(~ comb_401, nrow = 1),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos.pdf",
# height = 4, width = 8, scale = 1)
# ggsave(p_ma_t401_combo + facet_wrap(~ comb_401, nrow = 1, scales = "free_y"),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_free.pdf",
# height = 4, width = 8, scale = 1)
## Add vline version
p_ma_t401_combo_v <- add_vline(p_ma_t401_combo)
ggsave(p_ma_t401_combo_v + facet_wrap(~ comb_401, nrow = 1),
filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_v.pdf",
height = 4, width = 8, scale = 1)
# ggsave(p_ma_t401_combo_v + facet_wrap(~ comb_401, nrow = 1, scales = "free_y"),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_free_v.pdf",
# height = 4, width = 8, scale = 1)
## CI
## Paper figure 9 -- proportion of heroin deaths involving other opioids
p_ma_t401_combo <- ggplot(data = reshaped_prop,
aes(x = year, y = propor,
group = race, color = race_cat)) +
geom_ribbon(aes(ymax = upper,
ymin = lower,
fill = race_cat,
group = race_cat),
color = NA, alpha = .25) +
geom_line(size = 1, alpha = .9) +
geom_point(size = 1.25, alpha = .9) +
scale_fill_brewer(NULL, palette = "Set1") +
scale_color_brewer(NULL, palette = "Set1") +
scale_x_continuous(expand = c(.02, 0)) +
scale_y_continuous(expand = c(.01, 0)) +
mk_classic() +
theme(legend.background = element_rect(fill = "transparent", color = NA)) +
labs(y = "Proportion",
x = NULL,
title = "Proportion of heroin (T401) deaths involving other opioids") + facet_wrap(~ comb_401, nrow = 1)
# ggsave(p_ma_t401_combo + facet_wrap(~ comb_401, nrow = 1),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_ci.pdf",
# height = 4, width = 8, scale = 1)
# ggsave(p_ma_t401_combo + facet_wrap(~ comb_401, nrow = 1, scales = "free_y"),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_free_ci.pdf",
# height = 4, width = 8, scale = 1)
## Add vline version
# p_ma_t401_combo_v <- add_vline(p_ma_t401_combo)
# ggsave(p_ma_t401_combo_v + facet_wrap(~ comb_401, nrow = 1),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_v_ci.pdf",
# height = 4, width = 8, scale = 1)
# ggsave(p_ma_t401_combo_v + facet_wrap(~ comb_401, nrow = 1, scales = "free_y"),
# filename = "./report/paa_2017_paper/plots/paper_fig9_t401_combos_free_v_ci.pdf",
# height = 4, width = 8, scale = 1)
|
getData <- function () {
data <- read.table('household_power_consumption.txt', header = TRUE, sep = ";", na.strings = '?')
data <- rbind(subset(data, Date == '1/2/2007'), subset(data, Date == '2/2/2007'))
data$Timestamp <- strptime(paste(data$Date, data$Time), '%d/%m/%Y %H:%M:%S')
data
}
globalActivePower <- function (data) {
plot.ts(data$Global_active_power, main = '', xlab = '', ylab = 'Global Active Power', xaxt='n')
axis(side = 1, at = c(0, length(data$Global_active_power) / 2, length(data$Global_active_power)), labels = c('Thu', 'Fri', 'Sat'))
}
energySubMetering <- function (data) {
plot(data$Timestamp, data$Sub_metering_1, type = 'l', ylab = 'Energy sub metering', xlab = '')
lines(data$Timestamp, data$Sub_metering_2, col = '#f81c20')
lines(data$Timestamp, data$Sub_metering_3, col = '#4400f9')
axis(side = 1, at = c(0, length(data$Global_active_power) / 2, length(data$Global_active_power)), labels = c('Thu', 'Fri', 'Sat'))
legend('topright', legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), fill = c('#111111', '#f81c20', '#4400f9'))
}
voltage <- function (data) {
plot(data$Timestamp, data$Voltage, type = 'l', xlab = 'datetime', ylab = 'Voltage')
}
globalReactivePower <- function (data) {
plot(data$Timestamp, data$Global_reactive_power, type = 'l', ylab = 'Global_reactive_power', xlab = 'datetime')
}
plot4 <- function (data) {
layout(matrix(c(1,2,3,4), 2, 2, byrow = FALSE))
globalActivePower(data)
energySubMetering(data)
voltage(data)
globalReactivePower(data)
}
png('plot4.png')
plot4(getData())
dev.off() | /plot4.R | no_license | derekconjar/ExData_Plotting1 | R | false | false | 1,598 | r | getData <- function () {
data <- read.table('household_power_consumption.txt', header = TRUE, sep = ";", na.strings = '?')
data <- rbind(subset(data, Date == '1/2/2007'), subset(data, Date == '2/2/2007'))
data$Timestamp <- strptime(paste(data$Date, data$Time), '%d/%m/%Y %H:%M:%S')
data
}
globalActivePower <- function (data) {
plot.ts(data$Global_active_power, main = '', xlab = '', ylab = 'Global Active Power', xaxt='n')
axis(side = 1, at = c(0, length(data$Global_active_power) / 2, length(data$Global_active_power)), labels = c('Thu', 'Fri', 'Sat'))
}
energySubMetering <- function (data) {
plot(data$Timestamp, data$Sub_metering_1, type = 'l', ylab = 'Energy sub metering', xlab = '')
lines(data$Timestamp, data$Sub_metering_2, col = '#f81c20')
lines(data$Timestamp, data$Sub_metering_3, col = '#4400f9')
axis(side = 1, at = c(0, length(data$Global_active_power) / 2, length(data$Global_active_power)), labels = c('Thu', 'Fri', 'Sat'))
legend('topright', legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), fill = c('#111111', '#f81c20', '#4400f9'))
}
voltage <- function (data) {
plot(data$Timestamp, data$Voltage, type = 'l', xlab = 'datetime', ylab = 'Voltage')
}
globalReactivePower <- function (data) {
plot(data$Timestamp, data$Global_reactive_power, type = 'l', ylab = 'Global_reactive_power', xlab = 'datetime')
}
plot4 <- function (data) {
layout(matrix(c(1,2,3,4), 2, 2, byrow = FALSE))
globalActivePower(data)
energySubMetering(data)
voltage(data)
globalReactivePower(data)
}
png('plot4.png')
plot4(getData())
dev.off() |
#比例检验
#pwr.2p.test(h=,n=,sig.level=,power=)
pwr.2p.test(h=ES.h(.65,.6),sig.level = .05,power=.9,alternative = "greater") | /9.8/2p.R | no_license | weidaoming/R | R | false | false | 128 | r | #比例检验
#pwr.2p.test(h=,n=,sig.level=,power=)
pwr.2p.test(h=ES.h(.65,.6),sig.level = .05,power=.9,alternative = "greater") |
#' @title Transpose a matrix on the remote nodes.
#' @description See base::t()
#' @param symbol a character, name of the object to transpose
#' @param newobj a character, name of the new, transposed object
#' @param async a logical, see datashield.aggregate
#' @param wait a logical, see datashield.aggregate
#' @param datasources a list of opal objects obtained after logging into the opal servers (see datashield.login)
#' @export
#'
dssT <- function(symbol, newobj = paste0(symbol, '_tr'), async = TRUE, wait = TRUE, datasources = NULL){
if(is.null(datasources)){
datasources <- dsBaseClient_findLoginObjects()
}
expr <- paste0('tDSS(', symbol, ')')
opal::datashield.assign(datasources, newobj, as.symbol(expr), async, wait)
}
| /R/dssT.R | no_license | IulianD/dsSwissKnifeClient | R | false | false | 753 | r |
#' @title Transpose a matrix on the remote nodes.
#' @description See base::t()
#' @param symbol a character, name of the object to transpose
#' @param newobj a character, name of the new, transposed object
#' @param async a logical, see datashield.aggregate
#' @param wait a logical, see datashield.aggregate
#' @param datasources a list of opal objects obtained after logging into the opal servers (see datashield.login)
#' @export
#'
dssT <- function(symbol, newobj = paste0(symbol, '_tr'), async = TRUE, wait = TRUE, datasources = NULL){
if(is.null(datasources)){
datasources <- dsBaseClient_findLoginObjects()
}
expr <- paste0('tDSS(', symbol, ')')
opal::datashield.assign(datasources, newobj, as.symbol(expr), async, wait)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neptune_operations.R
\name{neptune_describe_db_instances}
\alias{neptune_describe_db_instances}
\title{Returns information about provisioned instances, and supports pagination}
\usage{
neptune_describe_db_instances(
DBInstanceIdentifier = NULL,
Filters = NULL,
MaxRecords = NULL,
Marker = NULL
)
}
\arguments{
\item{DBInstanceIdentifier}{The user-supplied instance identifier. If this parameter is specified,
information from only the specific DB instance is returned. This
parameter isn't case-sensitive.
Constraints:
\itemize{
\item If supplied, must match the identifier of an existing DBInstance.
}}
\item{Filters}{A filter that specifies one or more DB instances to describe.
Supported filters:
\itemize{
\item \code{db-cluster-id} - Accepts DB cluster identifiers and DB cluster
Amazon Resource Names (ARNs). The results list will only include
information about the DB instances associated with the DB clusters
identified by these ARNs.
\item \code{engine} - Accepts an engine name (such as \code{neptune}), and restricts
the results list to DB instances created by that engine.
}
For example, to invoke this API from the Amazon CLI and filter so that
only Neptune DB instances are returned, you could use the following
command:}
\item{MaxRecords}{The maximum number of records to include in the response. If more
records exist than the specified \code{MaxRecords} value, a pagination token
called a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.}
\item{Marker}{An optional pagination token provided by a previous
\code{\link[=neptune_describe_db_instances]{describe_db_instances}} request. If
this parameter is specified, the response includes only records beyond
the marker, up to the value specified by \code{MaxRecords}.}
}
\description{
Returns information about provisioned instances, and supports pagination.
See \url{https://www.paws-r-sdk.com/docs/neptune_describe_db_instances/} for full documentation.
}
\keyword{internal}
| /cran/paws.database/man/neptune_describe_db_instances.Rd | permissive | paws-r/paws | R | false | true | 2,122 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neptune_operations.R
\name{neptune_describe_db_instances}
\alias{neptune_describe_db_instances}
\title{Returns information about provisioned instances, and supports pagination}
\usage{
neptune_describe_db_instances(
DBInstanceIdentifier = NULL,
Filters = NULL,
MaxRecords = NULL,
Marker = NULL
)
}
\arguments{
\item{DBInstanceIdentifier}{The user-supplied instance identifier. If this parameter is specified,
information from only the specific DB instance is returned. This
parameter isn't case-sensitive.
Constraints:
\itemize{
\item If supplied, must match the identifier of an existing DBInstance.
}}
\item{Filters}{A filter that specifies one or more DB instances to describe.
Supported filters:
\itemize{
\item \code{db-cluster-id} - Accepts DB cluster identifiers and DB cluster
Amazon Resource Names (ARNs). The results list will only include
information about the DB instances associated with the DB clusters
identified by these ARNs.
\item \code{engine} - Accepts an engine name (such as \code{neptune}), and restricts
the results list to DB instances created by that engine.
}
For example, to invoke this API from the Amazon CLI and filter so that
only Neptune DB instances are returned, you could use the following
command:}
\item{MaxRecords}{The maximum number of records to include in the response. If more
records exist than the specified \code{MaxRecords} value, a pagination token
called a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.}
\item{Marker}{An optional pagination token provided by a previous
\code{\link[=neptune_describe_db_instances]{describe_db_instances}} request. If
this parameter is specified, the response includes only records beyond
the marker, up to the value specified by \code{MaxRecords}.}
}
\description{
Returns information about provisioned instances, and supports pagination.
See \url{https://www.paws-r-sdk.com/docs/neptune_describe_db_instances/} for full documentation.
}
\keyword{internal}
|
##run fourth to put all data into useable formats for Split B
library(tidyverse)
library(data.table)
##list all files, both hc and pd, into an array
file_list <- list.files(path="~/PG-400/data2/raw/")
#Take a random sample of 20%
set.seed(32)
TestingNames <- sample(file_list)
Split.i <- TestingNames[1:504]
Split.ii <- TestingNames[505:1008]
Split.iii <- TestingNames[1009:1512]
Split.iv <- TestingNames[1513:2016]
Split.v <- TestingNames[2017:2520]
Split.vi <- TestingNames[2521:3026]
#create a not in function and then create the file names of the training set
"%ni%" <- Negate("%in%") ##create a NOT IN function
TrainingNames <- file_list[file_list %ni% Split.i]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.i)){
temp_data <- fread(Split.i[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/1Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/1Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/1TrainLabels.Rda")
saveRDS(Split.i, file="~/PG-400/data2/split B/4front/1TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.ii]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.ii)){
temp_data <- fread(Split.ii[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/2Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/2Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/2TrainLabels.Rda")
saveRDS(Split.ii, file="~/PG-400/data2/split B/4front/2TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.iii]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.iii)){
temp_data <- fread(Split.iii[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/3Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/3Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/3TrainLabels.Rda")
saveRDS(Split.iii, file="~/PG-400/data2/split B/4front/3TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.iv]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.iv)){
temp_data <- fread(Split.iv[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/4Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/4Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/4TrainLabels.Rda")
saveRDS(Split.iv, file="~/PG-400/data2/split B/4front/4TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.v]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.v)){
temp_data <- fread(Split.v[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/5Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/5Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/5TrainLabels.Rda")
saveRDS(Split.v, file="~/PG-400/data2/split B/4front/5TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.vi]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.vi)){
temp_data <- fread(Split.vi[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/6Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/6Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/6TrainLabels.Rda")
saveRDS(Split.vi, file="~/PG-400/data2/split B/4front/6TestLabels.Rda")
| /dataProcessingSplitB4front.R | permissive | M1V0/PG-400 | R | false | false | 23,526 | r | ##run fourth to put all data into useable formats for Split B
library(tidyverse)
library(data.table)
##list all files, both hc and pd, into an array
file_list <- list.files(path="~/PG-400/data2/raw/")
#Take a random sample of 20%
set.seed(32)
TestingNames <- sample(file_list)
Split.i <- TestingNames[1:504]
Split.ii <- TestingNames[505:1008]
Split.iii <- TestingNames[1009:1512]
Split.iv <- TestingNames[1513:2016]
Split.v <- TestingNames[2017:2520]
Split.vi <- TestingNames[2521:3026]
#create a not in function and then create the file names of the training set
"%ni%" <- Negate("%in%") ##create a NOT IN function
TrainingNames <- file_list[file_list %ni% Split.i]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.i)){
temp_data <- fread(Split.i[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/1Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/1Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/1TrainLabels.Rda")
saveRDS(Split.i, file="~/PG-400/data2/split B/4front/1TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.ii]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.ii)){
temp_data <- fread(Split.ii[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/2Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/2Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/2TrainLabels.Rda")
saveRDS(Split.ii, file="~/PG-400/data2/split B/4front/2TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.iii]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.iii)){
temp_data <- fread(Split.iii[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/3Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/3Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/3TrainLabels.Rda")
saveRDS(Split.iii, file="~/PG-400/data2/split B/4front/3TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.iv]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.iv)){
temp_data <- fread(Split.iv[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/4Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/4Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/4TrainLabels.Rda")
saveRDS(Split.iv, file="~/PG-400/data2/split B/4front/4TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.v]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.v)){
temp_data <- fread(Split.v[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/5Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/5Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/5TrainLabels.Rda")
saveRDS(Split.v, file="~/PG-400/data2/split B/4front/5TestLabels.Rda")
TrainingNames <- file_list[file_list %ni% Split.vi]
set.seed(32)
TrainingNames <- sample(TrainingNames)
##change the dir for the two loops
setwd("/Users/ivorym/PG-400/data2/raw")
#create the tensor of training data
TrainingSet1 <- array(numeric(),c(1024,4,0))
for (i in 1:756){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet1 <- abind::abind(TrainingSet1, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet2 <- array(numeric(),c(1024,4,0))
for (i in 757:1512){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet2 <- abind::abind(TrainingSet2, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet3 <- array(numeric(),c(1024,4,0))
for (i in 1513:2268){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet3 <- abind::abind(TrainingSet3, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
TrainingSet4 <- array(numeric(),c(1024,4,0))
for (i in 2269:length(TrainingNames)){
temp_data <- fread(TrainingNames[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TrainingSet4 <- abind::abind(TrainingSet4, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##computationally way less expensive to do the above four sets (~9 minutes each loop on 2 cores)
TrainingSet <- abind::abind(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4, along=3)
#rm(TrainingSet1, TrainingSet2, TrainingSet3, TrainingSet4) #if you wanna clean up again
#create the tensor of testing data
TestingSet <- array(numeric(),c(1024,4,0))
for (i in 1:length(Split.vi)){
temp_data <- fread(Split.vi[i], stringsAsFactors = F) #read in files using the fread function
temp_data <- select(temp_data, FC5, FC1, FC2, FC6, -time) #remove the time record as it is unnecessary
temp_data <- temp_data[1:1024,] #ensure all matrices are 1024 rows, as some end ones are one time point longer
TestingSet <- abind::abind(TestingSet, temp_data, along = 3) #bind the new matrix to the tensor as a new slice
}
##for ease of working, save the arrays
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
TrainingSetMod <- aperm(TrainingSet, c(3,1,2)) #force into samples, time, features
#dim(TrainingSetMod) #just checking it worked as intended
TestingSetMod <- aperm(TestingSet, c(3,1,2)) #force into samples, time, features
#dim(TestingSetMod) #just checking it worked as intended
##finally save everything to load into memory at a later date
saveRDS(TrainingSetMod, file="~/PG-400/data2/split B/4front/6Train.Rda")
saveRDS(TestingSetMod, file="~/PG-400/data2/split B/4front/6Test.Rda")
saveRDS(TrainingNames, file="~/PG-400/data2/split B/4front/6TrainLabels.Rda")
saveRDS(Split.vi, file="~/PG-400/data2/split B/4front/6TestLabels.Rda")
|
'
Quiz about Data type and vector operation
'
# 1-1
movies = read.csv('http://www.stat.berkeley.edu/classes/s133/data/movies.txt',
sep='|',
stringsAsFactors=FALSE)
sapply(movies, typeof)
# 1-2
# method1: with strsplit
movies$box.usd <- as.numeric(sapply(movies$box,
function(x) strsplit(x, split = "\\$")[[1]][2]))
# method2: with gsub (string substitute)
movies$box.usd.2 <- as.numeric(gsub(movies$box, pattern = "\\$", replacement = ""))
# method3: with substr (we found that the format of box is the same)
movies$box.usd.3 <- as.numeric(substr(movies$box, start = 2, stop = 8))
movies <- movies[order(movies$box.usd, decreasing = T),]
sum(head(movies$box.usd, 10))
sum(tail(movies$box.usd, 10))
# 1-3
movies$on.year <- as.numeric(sapply(movies$date,
function(x) strsplit(x, split = ",")[[1]][2]))
movies$this.year <- 2018
movies$year.pass <- movies$this.year - movies$on.year
movies$avg.year.box <- movies$box.usd / movies$year.pass
movies <- movies[order(movies$avg.year.box, decreasing = T),]
head(movies, 5)
| /tutorial/R/lec_R_basis/R_basis_DataTypes&VectorOperation_quiz.R | permissive | vashineyu/slides_and_others | R | false | false | 1,125 | r | '
Quiz about Data type and vector operation
'
# 1-1
movies = read.csv('http://www.stat.berkeley.edu/classes/s133/data/movies.txt',
sep='|',
stringsAsFactors=FALSE)
sapply(movies, typeof)
# 1-2
# method1: with strsplit
movies$box.usd <- as.numeric(sapply(movies$box,
function(x) strsplit(x, split = "\\$")[[1]][2]))
# method2: with gsub (string substitute)
movies$box.usd.2 <- as.numeric(gsub(movies$box, pattern = "\\$", replacement = ""))
# method3: with substr (we found that the format of box is the same)
movies$box.usd.3 <- as.numeric(substr(movies$box, start = 2, stop = 8))
movies <- movies[order(movies$box.usd, decreasing = T),]
sum(head(movies$box.usd, 10))
sum(tail(movies$box.usd, 10))
# 1-3
movies$on.year <- as.numeric(sapply(movies$date,
function(x) strsplit(x, split = ",")[[1]][2]))
movies$this.year <- 2018
movies$year.pass <- movies$this.year - movies$on.year
movies$avg.year.box <- movies$box.usd / movies$year.pass
movies <- movies[order(movies$avg.year.box, decreasing = T),]
head(movies, 5)
|
##Time Series Date Manipulation
##Install Packages##
if(!require(lubridate)){install.packages("lubridate")};library("lubridate", lib.loc="~/R/win-library/3.4")
#--------Setup Work Directory---------#
setwd("File Directory") ##Set file directory
getwd()
##Importing Data##
iowa<-read.csv(file.choose(),header=TRUE)## read data/import data
View(iowa)
attach(iowa)
str(iowa)
##missing value check##
apply(iowa, 2, function(x){sum(is.na(x))})
names(iowa)
Iowa<-iowa[,c(2,21)]## taking two columns out
View(Iowa)
apply(Iowa, 2, function(x){sum(is.na(x))})
## changing to date format##
Date<-as.Date(Iowa$Date,format="%m/%d/%Y")
Iowa0<-data.frame(Date,Iowa$Bottles.Sold)
View(Iowa0)
str(Iowa0)
##Monthly cut##
Month<-as.Date(cut(Iowa0$Date,breaks = "month"))
##Weekly Cut##
iowa4$week<-as.Date(cut(iowa4$iowa3.iowa1.date,breaks = "week",start.on.monday = FALSE))
# changes weekly break point to Sunday
Iowa1<-data.frame(Month,Iowa0$Iowa.Bottles.Sold)
View(Iowa1)
Iowa2<-Iowa1[order(as.Date(Iowa1$Month, format="%Y-%m-%d")),]
View(Iowa2)
Iowa3 <- aggregate(Iowa2$Iowa0.Iowa.Bottles.Sold~month(Iowa1$Month)+year(Iowa1$Month),
data=Iowa2,FUN=sum)
View(Iowa3)
write.csv(Iowa3,'iowa_monthly.csv')
Iowa_4<-data.frame(Iowa3$`year(Iowa1$Month)`,Iowa3$`Iowa2$Iowa0.Iowa.Bottles.Sold`)
View(Iowa_4)
## yearly
Iowa_5 <- aggregate(Iowa_4$Iowa3..Iowa2.Iowa0.Iowa.Bottles.Sold.~Iowa_4$Iowa3..year.Iowa1.Month..,
data=Iowa_4,FUN=sum)
View(Iowa_5)
iowa6<-data.frame(iowa4$week,iowa4$iowa3.iowa.Bottles.Sold)
View(iowa6)
iowals<-iowa6[order(as.Date(iowa6$iowa4.week, format="%Y-%m-%d")),]
View(iowals)
byweek <- aggregate(iowals$iowa4.iowa3.iowa.Bottles.Sold~iowals$iowa4.week, data = iowals, sum)
View(byweek)
plot(byweek,type='l')
write.csv(byweek,file = 'county(Polk)-zip-50321-week.csv')
date<-seq(as.Date("2012/1/1"), as.Date("2017/10/1"), "months")
Iowa4<-data.frame(date,Iowa3$`Iowa2$Iowa0.Iowa.Bottles.Sold`)
View(Iowa4)
Iowa.ts<-ts(Iowa3$`Iowa2$Iowa0.Iowa.Bottles.Sold`,frequency = 12,start = c(2012,01))##monthly analysis
View(Iowa.ts)
xmin<-min(iowa6$iowa4.week,na.rm=T)
xmax<-max(iowa6$iowa4.week,na.rm=T)
##Code End## | /TSA_DateManipulations/TSA_DateManipulation.R | no_license | Sayan-Pal585/Time-Series-Analysis | R | false | false | 2,233 | r | ##Time Series Date Manipulation
##Install Packages##
if(!require(lubridate)){install.packages("lubridate")};library("lubridate", lib.loc="~/R/win-library/3.4")
#--------Setup Work Directory---------#
setwd("File Directory") ##Set file directory
getwd()
##Importing Data##
iowa<-read.csv(file.choose(),header=TRUE)## read data/import data
View(iowa)
attach(iowa)
str(iowa)
##missing value check##
apply(iowa, 2, function(x){sum(is.na(x))})
names(iowa)
Iowa<-iowa[,c(2,21)]## taking two columns out
View(Iowa)
apply(Iowa, 2, function(x){sum(is.na(x))})
## changing to date format##
Date<-as.Date(Iowa$Date,format="%m/%d/%Y")
Iowa0<-data.frame(Date,Iowa$Bottles.Sold)
View(Iowa0)
str(Iowa0)
##Monthly cut##
Month<-as.Date(cut(Iowa0$Date,breaks = "month"))
##Weekly Cut##
iowa4$week<-as.Date(cut(iowa4$iowa3.iowa1.date,breaks = "week",start.on.monday = FALSE))
# changes weekly break point to Sunday
Iowa1<-data.frame(Month,Iowa0$Iowa.Bottles.Sold)
View(Iowa1)
Iowa2<-Iowa1[order(as.Date(Iowa1$Month, format="%Y-%m-%d")),]
View(Iowa2)
Iowa3 <- aggregate(Iowa2$Iowa0.Iowa.Bottles.Sold~month(Iowa1$Month)+year(Iowa1$Month),
data=Iowa2,FUN=sum)
View(Iowa3)
write.csv(Iowa3,'iowa_monthly.csv')
Iowa_4<-data.frame(Iowa3$`year(Iowa1$Month)`,Iowa3$`Iowa2$Iowa0.Iowa.Bottles.Sold`)
View(Iowa_4)
## yearly
Iowa_5 <- aggregate(Iowa_4$Iowa3..Iowa2.Iowa0.Iowa.Bottles.Sold.~Iowa_4$Iowa3..year.Iowa1.Month..,
data=Iowa_4,FUN=sum)
View(Iowa_5)
iowa6<-data.frame(iowa4$week,iowa4$iowa3.iowa.Bottles.Sold)
View(iowa6)
iowals<-iowa6[order(as.Date(iowa6$iowa4.week, format="%Y-%m-%d")),]
View(iowals)
byweek <- aggregate(iowals$iowa4.iowa3.iowa.Bottles.Sold~iowals$iowa4.week, data = iowals, sum)
View(byweek)
plot(byweek,type='l')
write.csv(byweek,file = 'county(Polk)-zip-50321-week.csv')
date<-seq(as.Date("2012/1/1"), as.Date("2017/10/1"), "months")
Iowa4<-data.frame(date,Iowa3$`Iowa2$Iowa0.Iowa.Bottles.Sold`)
View(Iowa4)
Iowa.ts<-ts(Iowa3$`Iowa2$Iowa0.Iowa.Bottles.Sold`,frequency = 12,start = c(2012,01))##monthly analysis
View(Iowa.ts)
xmin<-min(iowa6$iowa4.week,na.rm=T)
xmax<-max(iowa6$iowa4.week,na.rm=T)
##Code End## |
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/Function-DailyPrecipitationSerie.r
\docType{function}
\name{DailyPrecipitationSerie}
\alias{DailyPrecipitationSerie}
\alias{DailyPrecipitationSerie-methods}
\title{DailyPrecipitationSerie Constructor}
\usage{
DailyPrecipitationSerie(precipitation = numeric(0), date = as.Date(character(0)), lat = 0, long = 0, elevation = 0, station_name = "")
}
\arguments{
\item{precipitation}{the precipitation values in millimeters of the serie}
\item{date}{the dates of the serie}
\item{lat}{the latitude position of the sensor}
\item{long}{the longitude position of the sensor}
\item{elevation}{the elevation of the sensor}
\item{station_name}{the name of the sensor}
}
\value{
an object of the class DailyPrecipitationSerie
}
\description{
It constructs an instance of the DailyPrecipitationSerie class.
}
\author{
Lucas Venezian Povoa
}
| /man/DailyPrecipitationSerie.Rd | no_license | lucasvenez/prefann | R | false | false | 920 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/Function-DailyPrecipitationSerie.r
\docType{function}
\name{DailyPrecipitationSerie}
\alias{DailyPrecipitationSerie}
\alias{DailyPrecipitationSerie-methods}
\title{DailyPrecipitationSerie Constructor}
\usage{
DailyPrecipitationSerie(precipitation = numeric(0), date = as.Date(character(0)), lat = 0, long = 0, elevation = 0, station_name = "")
}
\arguments{
\item{precipitation}{the precipitation values in millimeters of the serie}
\item{date}{the dates of the serie}
\item{lat}{the latitude position of the sensor}
\item{long}{the longitude position of the sensor}
\item{elevation}{the elevation of the sensor}
\item{station_name}{the name of the sensor}
}
\value{
an object of the class DailyPrecipitationSerie
}
\description{
It constructs an instance of the DailyPrecipitationSerie class.
}
\author{
Lucas Venezian Povoa
}
|
\name{Fahrenheit to Celsius}
\alias{Fahrenheit to Celsius}
\title{
Converts temperature units}
\description{
Converts an input temperature from Fahrenheit to Celsius}
\usage{
F_to_C(x)
}
\arguments{
\item{x}{a number}
}
\details{
}
\value{
Value returned is a number with units in Celsius
}
\references{
}
\author{
Zach Jones}
\note{
}
\seealso{
}
\examples{
{F_to_C (32)
F_to_C (0)
}
}
| /TemperatureConverter/man/Fahrenheit to Celsius.Rd | no_license | zjones91/introtoRtake2 | R | false | false | 421 | rd | \name{Fahrenheit to Celsius}
\alias{Fahrenheit to Celsius}
\title{
Converts temperature units}
\description{
Converts an input temperature from Fahrenheit to Celsius}
\usage{
F_to_C(x)
}
\arguments{
\item{x}{a number}
}
\details{
}
\value{
Value returned is a number with units in Celsius
}
\references{
}
\author{
Zach Jones}
\note{
}
\seealso{
}
\examples{
{F_to_C (32)
F_to_C (0)
}
}
|
# Quality assessment to compare differences in taxonomy among data sources
# Jeffrey C. Oliver
# jcoliver@email.arizona.edu
# 2018-08-30
rm(list = ls())
################################################################################
bioscan <- read.csv("data/BioScanData.csv")
inaturalist <- read.csv("data/iNaturalist-clean.csv")
inaturalist <- inaturalist[!is.na(inaturalist$species), ]
bioscan.species.columns <- c(5:33)
bioscan.species <- colnames(bioscan)[bioscan.species.columns]
inaturalist.species <- unique(as.character(inaturalist$species))
inaturalist.species <- gsub(pattern = " ",
replacement = "_",
x = inaturalist.species)
bioscan.missing <- setdiff(bioscan.species, inaturalist.species)
bioscan.missing
#' bioscan iNaturalist
#' Plebejus acmon Icaricia acmon
#' Pyrgus albescens Pyrgus albescens [same]
#' Poanes melane Paratrytone melane
#' Papilio cresphontes Zerynthia rumina [bad GBIF import!]
################################################################################
# Now looking the other way, at any species that are in iNaturalist that are
# not in BioSCAN using the reduced, but unclean iNaturalist data
inaturalist.unclean <- read.csv("data/iNaturalist-unclean-reduced.csv")
inaturalist.unclean <- inaturalist.unclean[!is.na(inaturalist.unclean$species), ]
inaturalist.unclean.species <- unique(as.character(inaturalist.unclean$species))
inaturalist.unclean.species <- gsub(pattern = " ",
replacement = "_",
x = inaturalist.unclean.species)
inaturalist.missing <- setdiff(inaturalist.unclean.species, bioscan.species)
#' Paratrytone_melane
#' Zerynthia_rumina
#' Limenitis_lorquini
#' Limenitis_bredowii
#' Atlides_halesus
write.csv(x = inaturalist.unclean[inaturalist.unclean$species %in% inaturalist.missing, ],
file = "output/unique-iNaturalist.csv",
row.names = FALSE)
| /scripts/taxonomy-comparison.R | permissive | jcoliver/bioscan | R | false | false | 1,997 | r | # Quality assessment to compare differences in taxonomy among data sources
# Jeffrey C. Oliver
# jcoliver@email.arizona.edu
# 2018-08-30
rm(list = ls())
################################################################################
bioscan <- read.csv("data/BioScanData.csv")
inaturalist <- read.csv("data/iNaturalist-clean.csv")
inaturalist <- inaturalist[!is.na(inaturalist$species), ]
bioscan.species.columns <- c(5:33)
bioscan.species <- colnames(bioscan)[bioscan.species.columns]
inaturalist.species <- unique(as.character(inaturalist$species))
inaturalist.species <- gsub(pattern = " ",
replacement = "_",
x = inaturalist.species)
bioscan.missing <- setdiff(bioscan.species, inaturalist.species)
bioscan.missing
#' bioscan iNaturalist
#' Plebejus acmon Icaricia acmon
#' Pyrgus albescens Pyrgus albescens [same]
#' Poanes melane Paratrytone melane
#' Papilio cresphontes Zerynthia rumina [bad GBIF import!]
################################################################################
# Now looking the other way, at any species that are in iNaturalist that are
# not in BioSCAN using the reduced, but unclean iNaturalist data
inaturalist.unclean <- read.csv("data/iNaturalist-unclean-reduced.csv")
inaturalist.unclean <- inaturalist.unclean[!is.na(inaturalist.unclean$species), ]
inaturalist.unclean.species <- unique(as.character(inaturalist.unclean$species))
inaturalist.unclean.species <- gsub(pattern = " ",
replacement = "_",
x = inaturalist.unclean.species)
inaturalist.missing <- setdiff(inaturalist.unclean.species, bioscan.species)
#' Paratrytone_melane
#' Zerynthia_rumina
#' Limenitis_lorquini
#' Limenitis_bredowii
#' Atlides_halesus
write.csv(x = inaturalist.unclean[inaturalist.unclean$species %in% inaturalist.missing, ],
file = "output/unique-iNaturalist.csv",
row.names = FALSE)
|
GGEBiplot <- function (Data)
{
tclRequire("BWidget")
###########
# Variables
###########
optioncentering <- "2.Tester-Centered G+GE"
optionscaling <- "0.No scaling"
optionSVP <- "GH -(Column Metric Preserving)"
datascaling <- c("0.No scaling", "1.Std Deviation (SD)")
datacentering <- c("0.No centering", "1.Global-Centered E+G+GE",
"2.Tester-Centered G+GE", "3.Double-Centered GE")
dataSVP <- c("JK -(Row Metric Preserving)", "GH -(Column Metric Preserving)",
"HJ -(Dual Metric Preserving)", "SQ - Symmetrical")
wintitle <- "GGE Biplot"
coltitle <- "black"
background <- "white"
centro <- c(0, 0)
symbol = NA_integer_
symbol_gen = NA_integer_
symbol_env = NA_integer_
subtitle <- NULL
ejes <- array()
showtitle <- tclVar("1")
showboth <- tclVar("0")
showsymbols <- tclVar("0")
vaxis <- tclVar("0")
showguidelines <- tclVar("1")
showcircles <- tclVar("0")
scaling <- tclVar("0")
centering <- tclVar("2")
svp <- tclVar("1")
vrank <- tclVar("1")
TypeGraph <- 1
matrixdata <- NULL
desviation <- NULL
colgenotype <- NULL
colenv <- NULL
labelgen <- NULL
labelenv <- NULL
coordgenotype <- NULL
coordenviroment <- NULL
xtext <- NULL
ytext <- NULL
xCoords <- NULL
yCoords <- NULL
xAnt <- NULL
yAnt <- NULL
indexClosest <- NULL
labelsVec <- NULL
colorsVec <- NULL
venvironment <- -1
vgenotype <- -1
vgenotype1 <- -1
vgenotype2 <- -1
dimension1 <- 1
dimension2 <- 2
vcex <- 1
img <- NULL
parPlotSize <- NULL
usrCoords <- NULL
#####################
# Pantalla de dialogo
#####################
modalDialog <- function(title, question, entryInit, entryWidth = 20,
returnValOnCancel = "ID_CANCEL") {
dlg <- tktoplevel()
tkwm.deiconify(dlg)
tkgrab.set(dlg)
tkfocus(dlg)
tkwm.title(dlg, title)
textEntryVarTcl <- tclVar(paste(entryInit))
textEntryWidget <- tkentry(dlg, width = paste(entryWidth),
textvariable = textEntryVarTcl)
tkgrid(tklabel(dlg, text = " "))
tkgrid(tklabel(dlg, text = question), textEntryWidget)
tkgrid(tklabel(dlg, text = " "))
ReturnVal = returnValOnCancel
onOK <- function() {
ReturnVal = tclvalue(textEntryVarTcl)
tkgrab.release(dlg)
tkdestroy(dlg)
}
onCancel <- function() {
ReturnVal = returnValOnCancel
tkgrab.release(dlg)
tkdestroy(dlg)
}
OK.but <- tkbutton(dlg, text = " OK ", command = onOK)
Cancel.but <- tkbutton(dlg, text = " Cancel ", command = onCancel)
tkgrid(OK.but, Cancel.but)
tkgrid(tklabel(dlg, text = " "))
tkfocus(dlg)
tkwait.window(dlg)
return(ReturnVal)
}
#############################
# Funcion que cambia el color
#############################
ChangeColorv <- function(color) {
colorv = tclvalue(tcl("tk_chooseColor", initialcolor = color,
title = "Choose a color"))
if (nchar(colorv) > 0)
return(colorv)
}
##########################################
# Funcion que cambia el tamano de la letra
##########################################
ChangeSize <- function()
{
tt<-tktoplevel()
tkwm.title(tt, "Font")
scr <- tkscrollbar(tt, repeatinterval=5, command=function(...)tkyview(tl,...))
tl<-tklistbox(tt,height=4,selectmode="single",yscrollcommand=function(...)tkset(scr,...),background="white")
tkgrid(tklabel(tt,text="Font"))
tkgrid(tl,scr)
tkgrid.configure(scr,rowspan=4,sticky="nsw")
fonts <- c("Plain","Bold","Italic","Bold Italic","Symbol")
for (i in (1:5))
{
tkinsert(tl,"end",fonts[i])
}
tkselection.set(tl,1) # La fuente por defecto es plana
OnOK <- function()
{
vfont <- as.numeric(tkcurselection(tl))+1
return(vfont)
tkdestroy(tt)
}
OK.but <-tkbutton(tt,text=" OK ",command=OnOK)
tkgrid(OK.but)
tkfocus(tt)
}
######################################
# Funcion para la seleccion del modelo
######################################
Models <- function() {
labelgen <<- rownames(Data)
labelenv <<- colnames(Data)
matrixdata <<- matrix(, nrow(Data), ncol(Data))
for (i in 1:nrow(matrixdata)) for (j in 1:ncol(matrixdata)) matrixdata[i,
j] <<- Data[i, j]
colgenotype <<- rep("green4",dim(matrixdata)[1])
colenv <<- rep("blue",dim(matrixdata)[2])
for (i in 1:ncol(diag(svd(matrixdata)$d))) ejes[i] <<- paste("AXIS",
i, sep = "")
# Opcion de centrado
switch(optioncentering,
"0.No centering" = {
centering <<- tclVar("0")
},
"1.Global-Centered E+G+GE" = {
meanData = mean(matrixdata)
matrixdata <<- matrixdata - meanData
centering <<- tclVar("1")
},
"2.Tester-Centered G+GE" = {
meancolData = colMeans(matrixdata)
for (i in 1:nrow(matrixdata)) for (j in 1:ncol(matrixdata)) matrixdata[i,
j] <<- matrixdata[i, j] - meancolData[j]
centering <<- tclVar("2")
},
"3.Double-Centered GE" = {
meanData = mean(matrixdata)
meancolData = colMeans(matrixdata)
meanrowData = rowMeans(matrixdata)
for (i in 1:nrow(matrixdata)) for (j in 1:ncol(matrixdata)) matrixdata[i,
j] <<- matrixdata[i, j] + meanData - meancolData[j] -
meanrowData[i]
centering <<- tclVar("3")
})
# Opcion de escalado
switch (optionscaling,
"0.No scaling" = {
scaling <<- tclVar("0")
},
"1.Std Deviation (SD)" = {
scaling <<- tclVar("1")
desviation <<- array(, dim = ncol(matrixdata))
for (j in 1:ncol(matrixdata)) desviation[j] <<- sqrt(var(matrixdata[,
j]))
for (i in 1:nrow(matrixdata)) for (j in 1:ncol(matrixdata)) matrixdata[i,
j] <<- matrixdata[i, j]/desviation[j]
})
# Opcion de centrado
switch (optionSVP,
"JK -(Row Metric Preserving)" = {
coordgenotype <<- svd(matrixdata)$u %*% diag(svd(matrixdata)$d)
coordenviroment <<- svd(matrixdata)$v
d1 = (max(coordenviroment[, dimension1]) - min(coordenviroment[,
dimension1]))/(max(coordgenotype[, dimension1]) -
min(coordgenotype[, dimension1]))
d2 = (max(coordenviroment[, dimension2]) - min(coordenviroment[,
dimension2]))/(max(coordgenotype[, dimension2]) -
min(coordgenotype[, dimension2]))
d = max(d1, d2)
coordenviroment <<- coordenviroment/d
svp <<- tclVar("0")
},
"GH -(Column Metric Preserving)" = {
coordgenotype <<- svd(matrixdata)$u
coordenviroment <<- svd(matrixdata)$v %*% diag(svd(matrixdata)$d)
d1 = (max(coordgenotype[, dimension1]) - min(coordgenotype[,
dimension1]))/(max(coordenviroment[, dimension1]) -
min(coordenviroment[, dimension1]))
d2 = (max(coordgenotype[, dimension2]) - min(coordgenotype[,
dimension2]))/(max(coordenviroment[, dimension2]) -
min(coordenviroment[, dimension2]))
d = max(d1, d2)
coordgenotype <<- coordgenotype/d
svp <<- tclVar("1")
},
"SQ - Symmetrical" = {
coordgenotype <<- svd(matrixdata)$u %*% diag(sqrt(svd(matrixdata)$d))
coordenviroment <<- svd(matrixdata)$v %*% diag(sqrt(svd(matrixdata)$d))
svp <<- tclVar("3")
},
"HJ -(Dual Metric Preserving)" = {
coordgenotype <<- svd(matrixdata)$u %*% diag(svd(matrixdata)$d)
coordenviroment <<- svd(matrixdata)$v %*% diag(svd(matrixdata)$d)
svp <<- tclVar("2")
})
xtext <<- rbind(coordgenotype,coordenviroment)[,dimension1]
ytext <<- rbind(coordgenotype,coordenviroment)[,dimension2]
}
# #######################################
# Funcion que construye el fichero de log
#########################################
Addfile <- function() {
valorespropios =- svd(matrixdata)$d
vartotal = round(as.numeric(sum(valorespropios^2)),
2)
varexpl = round(as.numeric((valorespropios^2/vartotal) *
100), 2)
genfile <- as.data.frame(coordgenotype[, dimension1:dimension2])
rownames(genfile) <- labelgen
colnames(genfile) <- ejes[dimension1:dimension2]
envfile <<- as.data.frame(coordenviroment[, dimension1:dimension2])
rownames(envfile) <- labelenv
colnames(envfile) <- ejes[dimension1:dimension2]
coordgencuad = coordgenotype^2
CRFqEi <- coordgencuad
sumacuagen = rowSums(coordgencuad)
CRFqEi[, 1] = round(((coordgencuad)[, dimension1] *
1000)/sumacuagen, 0)
CRFqEi[, 2] = round(((coordgencuad)[, dimension2] *
1000)/sumacuagen, 0)
CRFqEi <- as.data.frame(CRFqEi[, dimension1:dimension2])
rownames(CRFqEi) <- labelgen
colnames(CRFqEi) <- ejes[dimension1:dimension2]
coordenvcuad = coordenviroment^2
CRFqEj <- coordenvcuad
sumacuaenv = rowSums(coordenvcuad)
CRFqEj[, 1] = round(((coordenvcuad)[, dimension1] *
1000)/(sumacuaenv), 0)
CRFqEj[, 2] = round(((coordenvcuad)[, dimension2] *
1000)/(sumacuaenv), 0)
CRFqEj <- as.data.frame(CRFqEj[, 1:2])
rownames(CRFqEj) <- labelenv
colnames(CRFqEj) <- ejes[dimension1:dimension2]
cat("GGE BIPLOT", file = "Results1.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Centered by: ", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat(optioncentering, file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Scaled (Divided) by: ", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat(optionscaling, file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("SVP: ", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat(optionSVP, file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Eigenvalues and variance explained", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(round(svd(matrixdata)$d, 3), file = "temp.xls",
sep = "\t", dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Row coordinates:", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(round(genfile, 3), file = "temp.xls", sep = "\t",
dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Column coordinates:", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(round(envfile, 3), file = "temp.xls", sep = "\t",
dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("RELATIVE CONTRIBUTIONS OF THE FACTOR TO THE ELEMENT:",
file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Row Contributions ----------", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(CRFqEi, file = "temp.xls", sep = "\t", dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Column Contributions ----------", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(CRFqEj, file = "temp.xls", sep = "\t", dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
file.show("Results1.xls")
file.remove("temp.xls")
}
##################################
# Funcion que construye el grafico
##################################
plotFunctiond <- function(screen = TRUE) {
valorespropios = svd(matrixdata)$d
vartotal = round(as.numeric(sum(valorespropios^2)),
2)
varexpl = round(as.numeric((valorespropios^2/vartotal) *
100), 2)
params <- par(bg = background)
plot(rbind(coordgenotype, coordenviroment), main = wintitle,
type = "n", asp = 1, col.main = coltitle, xlab = paste(ejes[dimension1],
varexpl[dimension1], "%", sep = " ", sub = subtitle),
ylab = paste(ejes[dimension2], varexpl[dimension2],
"%", sep = " "))
if (tclvalue(showguidelines) == "1")
abline(h = 0, v = 0, lty = "dotted")
indexLabeledaux<-c()
labeledPoints <- list()
# Tipo de grafico
#
switch(TypeGraph,
# Biplot
"1" = {
if (tclvalue(showboth) == "0" || tclvalue(showboth) ==
"1")
{
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
}
if (tclvalue(showboth) == "0" || tclvalue(showboth) ==
"2")
{
arrows(centro[1], centro[2], coordenviroment[,
dimension1], coordenviroment[, dimension2],
col = colenv, lty = "dotted", length = 0.05)
points(centro[1], centro[2], pch = 18, col = "black")
}
if (tclvalue(showboth) == "0")
{
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
}
if (tclvalue(showboth) == "1")
{
xCoords <<- xtext[1:length(colgenotype)]
yCoords <<- ytext[1:length(colgenotype)]
labelsVec <<- labelgen
colorsVec <<- colgenotype
}
if (tclvalue(showboth) == "2")
{
xCoords <<- xtext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
yCoords <<- ytext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
labelsVec <<- labelenv
colorsVec <<- colenv
}
},
# Examina un ambiente
"2" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
abline(a = 0, b = coordenviroment[venvironment, dimension2]/coordenviroment[venvironment,
dimension1], col = colenv[venvironment], lty = "solid",lwd = 2.5)
abline(a = 0, b = -coordenviroment[venvironment,
dimension1]/coordenviroment[venvironment, dimension2],
col = colenv[venvironment], lty = "solid",lwd = 2.5)
arrows(centro[1], centro[2], coordenviroment[venvironment,
dimension1], coordenviroment[venvironment, dimension2],
col = colenv[venvironment], lty = "solid", length = 0.1)
xCoords <<- c(xtext[1:length(colgenotype)],xtext[length(colgenotype)+venvironment])
yCoords <<- c(ytext[1:length(colgenotype)],ytext[length(colgenotype)+venvironment])
labelsVec <<- c(labelgen,labelenv[venvironment])
colorsVec <<- c(colgenotype,colenv[venvironment])
for (i in 1:nrow(matrixdata))
{
x <- solve(matrix(c(-coordenviroment[venvironment,
dimension2], coordenviroment[venvironment,
dimension1], coordenviroment[venvironment,
dimension1], coordenviroment[venvironment,
dimension2]), nrow = 2), matrix(c(0, coordenviroment[venvironment,
dimension1] * coordgenotype[i, dimension1] +
coordenviroment[venvironment, dimension2] *
coordgenotype[i, dimension2]), ncol = 1))
segments(coordgenotype[i, dimension1], coordgenotype[i,
dimension2], x[1], x[2], lty = "dotted")
}
},
# Examina un genotipo
"3" = {
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
abline(a = 0, b = coordgenotype[vgenotype, dimension2]/coordgenotype[vgenotype,
dimension1], col = colgenotype[vgenotype], lty = "solid" , lwd = 2.5)
abline(a = 0, b = -coordgenotype[vgenotype, dimension1]/coordgenotype[vgenotype,
dimension2], col = colgenotype[vgenotype], lty = "solid", lwd = 2.5 )
arrows(centro[1], centro[2], coordgenotype[vgenotype,
dimension1], coordgenotype[vgenotype, dimension2],
col = colgenotype[vgenotype], lty = "solid", length = 0.1)
xCoords <<- rbind(coordgenotype[vgenotype,], coordenviroment)[,dimension1]
yCoords <<- rbind(coordgenotype[vgenotype,], coordenviroment)[,dimension2]
labelsVec <<- c(labelgen[vgenotype],labelenv)
colorsVec <<- c(colgenotype[vgenotype],colenv)
for (i in 1:ncol(matrixdata))
{
x <- solve(matrix(c(-coordgenotype[vgenotype,
dimension2], coordgenotype[vgenotype, dimension1],
coordgenotype[vgenotype, dimension1], coordgenotype[vgenotype,
dimension2]), nrow = 2), matrix(c(0, coordgenotype[vgenotype,
dimension1] * coordenviroment[i, dimension1] +
coordgenotype[vgenotype, dimension2] * coordenviroment[i,
dimension2]), ncol = 1))
segments(coordenviroment[i, dimension1], coordenviroment[i,
dimension2], x[1], x[2], lty = "dotted")
}
},
# Relacion entre ambientes
"4" = {
arrows(centro[1], centro[2], coordenviroment[, dimension1],
coordenviroment[, dimension2], col = colenv,
lty = "solid", length = 0.05)
points(centro[1], centro[2], pch = 18, col = "black")
if (tclvalue(showcircles) == "1")
{
radio = max((max(coordenviroment[dimension1,
]) - min(coordenviroment[dimension1, ])), (max(coordenviroment[dimension2,
]) - min(coordenviroment[dimension2, ])))/10
for (i in 1:5) symbols(0, 0, circles = radio *
i, add = TRUE, inches = FALSE, fg = "black")
}
xCoords <<- xtext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
yCoords <<- ytext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
labelsVec <<- c(labelenv)
colorsVec <<- c(colenv)
},
# Compara dos genotipos
"5" = {
symbols(coordgenotype[vgenotype1, dimension1], coordgenotype[vgenotype1,
dimension2], circles = 0.2, add = TRUE, inches = FALSE,
fg = colgenotype)
symbols(coordgenotype[vgenotype2, dimension1], coordgenotype[vgenotype2,
dimension2], circles = 0.2, add = TRUE, inches = FALSE,
fg = colgenotype)
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
segments(coordgenotype[vgenotype1, dimension1], coordgenotype[vgenotype1,
dimension2], coordgenotype[vgenotype2, dimension1],
coordgenotype[vgenotype2, dimension2], col = "red",
lty = "solid", lwd = 2.5)
abline(a = 0, b = -(coordgenotype[vgenotype1, dimension1] -
coordgenotype[vgenotype2, dimension1])/(coordgenotype[vgenotype1,
dimension2] - coordgenotype[vgenotype2, dimension2]),
col = "red", lty = "solid",lwd = 2.5)
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
},
# Which-won-where
"6" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
points(centro[1], centro[2], pch = 18, col = "black")
indice = c(chull(coordgenotype[, dimension1], coordgenotype[,
dimension2]))
polygon(coordgenotype[indice, dimension1], coordgenotype[indice,
dimension2], border = "black")
i <<- 1
while (is.na(indice[i + 1]) == FALSE)
{
m<-(coordgenotype[indice[i], dimension2] - coordgenotype[indice[i + 1], dimension2])/(coordgenotype[indice[i],dimension1]-coordgenotype[indice[i + 1],dimension1])
mperp<--1/m
c2<-coordgenotype[indice[i + 1], dimension2] - m*coordgenotype[indice[i + 1],dimension1]
xint<--c2/(m-mperp)
xint<-ifelse(xint<0,min(coordenviroment[, dimension1],coordgenotype[, dimension1]), max(coordenviroment[, dimension1],coordgenotype[, dimension1]))
yint<-mperp*xint
segments(0,0, xint,yint, col="red", lty="solid",lwd=2.5)
i <<- i + 1
}
m<-(coordgenotype[indice[i], dimension2] - coordgenotype[indice[1], dimension2])/(coordgenotype[indice[i],dimension1]-coordgenotype[indice[1],dimension1])
mperp<--1/m
c2<-coordgenotype[indice[i], dimension2] - m*coordgenotype[indice[i],dimension1]
xint<--c2/(m-mperp)
xint<-ifelse(xint<0,min(coordenviroment[, dimension1],coordgenotype[, dimension1]), max(coordenviroment[, dimension1],coordgenotype[, dimension1]))
yint<-mperp*xint
segments(0,0, xint,yint, col="red", lty="solid",lwd=2.5)
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
},
# Discrimitiveness vs. representativenss
"7" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
segments(centro[1], centro[2], coordenviroment[,
dimension1], coordenviroment[, dimension2], col = colenv,
lty = "dotted")
points(centro[1], centro[2], pch = 18, col = "black")
arrows(centro[1], centro[2], mean(coordenviroment[,
dimension1]), mean(coordenviroment[, dimension2]),
col = colenv, lty = "solid", length = 0.1)
symbols(mean(coordenviroment[, dimension1]), mean(coordenviroment[,
dimension2]), circles = 0.1, add = TRUE, inches = FALSE,
fg = colenv)
abline(a = 0, b = mean(coordenviroment[, dimension2])/mean(coordenviroment[,
dimension1]), col = colenv, lty = "solid", lwd = 2.5)
radio = max((max(coordenviroment[dimension1, ]) -
min(coordenviroment[dimension1, ])), (max(coordenviroment[dimension2,
]) - min(coordenviroment[dimension2, ])))/10
for (i in 1:5) symbols(0, 0, circles = radio * i,
add = TRUE, inches = FALSE, fg = "black")
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
},
# Ranking Environments
"8" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2],pch = symbol_gen, col = colgenotype, cex = vcex)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
points(centro[1], centro[2], pch = 18, col = "black")
med1 = mean(coordenviroment[, dimension1])
med2 = mean(coordenviroment[, dimension2])
abline(a = 0, b = med2/med1, col = colenv, lty = "solid",
lwd = 2.5)
abline(a = 0, b = -med1/med2, col = colenv, lty = "solid",
lwd = 2.5)
symbols(med1, med2, circles = 0.1, add = TRUE, inches = FALSE,
fg = colenv)
mod = max((coordenviroment[, dimension1]^2 + coordenviroment[,
dimension2]^2)^0.5)
xcoord = sign(med1) * (mod^2/(1 + med2^2/med1^2))^0.5
ycoord = (med2/med1) * xcoord
arrows(centro[1], centro[2], xcoord, ycoord, col = colenv,
lty = "solid", length = 0.1)
radio = ((xcoord - med1)^2 + (ycoord - med2)^2)^0.5/3
for (i in 1:8) symbols(xcoord, ycoord, circles = radio *
i, add = TRUE, inches = FALSE, fg = "gray")
xCoords <<- xtext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
yCoords <<- ytext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
labelsVec <<- labelenv
colorsVec <<- colenv
},
# Mean vs Stability
"9" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2],pch = symbol_gen, col = colgenotype, cex = vcex)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
med1 = mean(coordenviroment[, dimension1])
med2 = mean(coordenviroment[, dimension2])
abline(a = 0, b = med2/med1, col = colgenotype, lty = "solid",
lwd = 2.5)
abline(a = 0, b = -med1/med2, col = colgenotype,
lty = "solid", lwd = 2.5)
arrows(centro[1], centro[2], med1, med2, col = colgenotype,
lty = "solid", length = 0.1)
symbols(med1, med2, circles = 0.1, add = TRUE, inches = FALSE,
fg = colenv)
for (i in 1:nrow(matrixdata))
{
x <- solve(matrix(c(-med2, med1, med1, med2),
nrow = 2), matrix(c(0, med2 * coordgenotype[i,
dimension2] + med1 * coordgenotype[i, dimension1]),
ncol = 1))
segments(coordgenotype[i, dimension1], coordgenotype[i,
dimension2], x[1], x[2], lty = "dotted")
}
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
},
# Ranking genotypes
"10" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2],pch = symbol_gen, col = colgenotype, cex = vcex)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
med1 = mean(coordenviroment[, dimension1])
med2 = mean(coordenviroment[, dimension2])
abline(a = 0, b = med2/med1, col = colgenotype, lty = "solid",
lwd = 2.5)
abline(a = 0, b = -med1/med2, col = colgenotype,
lty = "solid", lwd = 2.5)
coordx <<- 0
coordy <<- 0
for (i in 1:nrow(matrixdata)) {
x <- solve(matrix(c(-med2, med1, med1, med2),
nrow = 2), matrix(c(0, med2 * coordgenotype[i,
dimension2] + med1 * coordgenotype[i, dimension1]),
ncol = 1))
if (sign(x[1]) == sign(med1)) {
if (abs(x[1]) > abs(coordx)) {
coordx <- x[1]
coordy <- x[2]
}
}
}
arrows(centro[1], centro[2], coordx, coordy, col = colgenotype,
lty = "solid", length = 0.1)
radio = ((coordx - med1)^2 + (coordy - med2)^2)^0.5/3
for (i in 1:10) symbols(coordx, coordy, circles = radio *
i, add = TRUE, inches = FALSE, fg = "gray")
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
}
)
#
indexLabeled <- c(1:length(xCoords))
if (length(indexLabeled)>0)
for (i in (1:length(indexLabeled)))
{
indexClosest <- indexLabeled[i]
text(xCoords[indexClosest],yCoords[indexClosest],
labels=labelsVec[indexClosest], col= colorsVec[indexClosest], cex= vcex)
}
parPlotSize <<- par("plt")
usrCoords <<- par("usr")
#
}
############################
# Biplot en tres dimensiones
############################
Biplot3D <- function() {
dimensions <- 1:3
rgl.clear("all")
rgl.bg(sphere = TRUE, color = c("whitesmoke", "gray90"),
lit = FALSE)
rgl.light()
points3d(coordgenotype[, 1], coordgenotype[, 2], coordgenotype[,
3], pch = symbol_gen, col = colgenotype)
text3d(coordgenotype[, 1], coordgenotype[, 2], coordgenotype[,
3], labelgen, col = colgenotype, cex = vcex)
text3d(coordenviroment[, 1], coordenviroment[, 2], coordenviroment[,
3], labelenv, col = colenv, cex = vcex)
aspect3d("iso")
lims <- par3d("bbox")
segments3d(matrix(c(lims[1], lims[3], lims[5], lims[2],
lims[3], lims[5], lims[1], lims[3], lims[5], lims[1],
lims[4], lims[5], lims[1], lims[3], lims[5], lims[1],
lims[3], lims[6]), byrow = TRUE, ncol = 3), col = "gray60")
text3d(matrix(c((lims[1] + lims[2])/2, lims[3], lims[5],
lims[1], (lims[3] + lims[4])/2, lims[5], lims[1],
lims[3], (lims[5] + lims[6])/2), byrow = TRUE, nrow = 3),
texts = paste("Dimension ", dimensions), col = "gray60",
family = "sans", font = 1, cex = vcex)
if (tclvalue(showguidelines) == "1")
axes3d()
for (i in 1:(dim(coordenviroment)[1])) {
linea <- rbind(coordenviroment[i, ], c(0, 0, 0))
segments3d(linea[, 1], linea[, 2], linea[, 3], col = colenv)
}
if (tclvalue(showtitle) == "1")
title3d(wintitle, color = "black", family = "sans",
font = 2, cex = vcex)
start <- proc.time()[3]
while (proc.time()[3] - start < 0.75) {
}
start <- proc.time()[3]
while ((i <- 36 * (proc.time()[3] - start)) < 360) rgl.viewpoint(i,
15 - (i - 90)/4, zoom = (if (i < 180)
(i + 1)^-0.5
else (360 - i + 1)^-0.5))
rgl.viewpoint(zoom = 1)
}
######################################
# Pantalla de seleccion de un genotipo
######################################
SelectGenotype <- function() {
wingenotype <- tktoplevel()
tkwm.title(wingenotype, "Select a Genotype")
combogenotype <- tkwidget(wingenotype, "ComboBox", editable = FALSE,
values = labelgen, width = 20)
onOK <- function() {
vgenotype <<- as.numeric(tclvalue(tcl(combogenotype,
"getvalue"))) + 1
tkdestroy(wingenotype)
}
onCancel <- function() {
vgenotype <<- -1
tkdestroy(wingenotype)
}
OK.but <- tkbutton(wingenotype, text = " OK ", command = onOK)
Cancel.but <- tkbutton(wingenotype, text = " Cancel ",
command = onCancel)
tkgrid(tklabel(wingenotype, text = " "))
tkgrid(tklabel(wingenotype, text = " "))
tkgrid(tklabel(wingenotype, text = "Select a Genotype: "),
combogenotype)
tkgrid(tklabel(wingenotype, text = " "))
tkgrid(OK.but, Cancel.but)
tkgrid(tklabel(wingenotype, text = " "))
tkfocus(wingenotype)
tkwait.window(wingenotype)
}
####################################
# Pantalla de seleccion de ambientes
####################################
SelectEnvironment <- function() {
winenvironment <- tktoplevel()
tkwm.title(winenvironment, "Select an Environment")
comboenvironment <- tkwidget(winenvironment, "ComboBox",
editable = FALSE, values = labelenv, width = 20)
onOK <- function() {
venvironment <<- as.numeric(tclvalue(tcl(comboenvironment,
"getvalue"))) + 1
tkdestroy(winenvironment)
}
onCancel <- function() {
venvironment <<- -1
tkdestroy(winenvironment)
}
OK.but <- tkbutton(winenvironment, text = " OK ",
command = onOK)
Cancel.but <- tkbutton(winenvironment, text = " Cancel ",
command = onCancel)
tkgrid(tklabel(winenvironment, text = " "))
tkgrid(tklabel(winenvironment, text = " "))
tkgrid(tklabel(winenvironment, text = "Select an Environment: "),
comboenvironment)
tkgrid(tklabel(winenvironment, text = " "))
tkgrid(OK.but, Cancel.but)
tkgrid(tklabel(winenvironment, text = " "))
tkfocus(winenvironment)
tkwait.window(winenvironment)
}
#############################################
# Pantalla para la seleccion de dos genotipos
#############################################
SelectTwoGenotype <- function() {
winEnvGen <- tktoplevel()
tkwm.title(winEnvGen, "Select Genotypes")
vgenotype1 <<- -1
vgenotype2 <<- -1
combogenotype1 <- tkwidget(winEnvGen, "ComboBox", editable = FALSE,
values = labelgen, width = 20)
combogenotype2 <- tkwidget(winEnvGen, "ComboBox", editable = FALSE,
values = labelgen, width = 20)
onOK <- function() {
vgenotype1 <<- as.numeric(tclvalue(tcl(combogenotype1,
"getvalue"))) + 1
vgenotype2 <<- as.numeric(tclvalue(tcl(combogenotype2,
"getvalue"))) + 1
tkdestroy(winEnvGen)
}
onCancel <- function() {
vgenotype1 <<- -1
vgenotype2 <<- -1
tkdestroy(winEnvGen)
}
OK.but <- tkbutton(winEnvGen, text = " OK ", command = onOK)
Cancel.but <- tkbutton(winEnvGen, text = " Cancel ",
command = onCancel)
tkgrid(tklabel(winEnvGen, text = "Select two genotypes to compare: "))
tkgrid(tklabel(winEnvGen, text = " "))
tkgrid(tklabel(winEnvGen, text = " "))
tkgrid(tklabel(winEnvGen, text = "Genotype 1: "), combogenotype1)
tkgrid(tklabel(winEnvGen, text = "Genotype 2: "), combogenotype2)
tkgrid(tklabel(winEnvGen, text = " "))
tkgrid(OK.but, Cancel.but)
tkgrid(tklabel(winEnvGen, text = " "))
tkfocus(winEnvGen)
tkwait.window(winEnvGen)
}
##################################
# Guarda la imagen con formato JPG
##################################
SaveFileJPG <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Jpeg files} {.jpg .jpeg}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".jpg")
FileName <- paste(FileName, ".jpg", sep = "")
jpeg(FileName, width = 8, height = 8, units = "in",
restoreConsole = FALSE, res = 96, quality = 50)
plotFunctiond(screen = FALSE)
dev.off()
}
}
#######################################
# Guarda la imagen con formato Metafile
#######################################
# SaveFileMetafile <- function() {
# FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Metafiles} {.wmf}} {{All files} *}"))
# if (nchar(FileName)) {
# nn <- nchar(FileName)
# if (nn < 5 || substr(FileName, nn - 3, nn) != ".wmf")
# FileName <- paste(FileName, ".wmf", sep = "")
# win.metafile(FileName, width = 8, height = 8, restoreConsole = FALSE)
# plotFunctiond(screen = FALSE)
# dev.off()
# }
# }
#########################################
# Guarda la imagen con formato postscript
#########################################
SaveFilePostscript <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Postscript files} {.ps}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 4 || substr(FileName, nn - 2, nn) != ".ps")
FileName <- paste(FileName, ".ps", sep = "")
postscript(file = FileName, width = 8, height = 8,
horizontal = FALSE, onefile = FALSE, paper = "default",
family = "URWHelvetica")
plotFunctiond(screen = FALSE)
dev.off()
}
}
##################################
# Guarda la imagen con formato PDF
##################################
SaveFilePDF <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{PDF files} {.pdf}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".pdf")
FileName <- paste(FileName, ".pdf", sep = "")
pdf(FileName, width = 7, height = 7)
plotFunctiond(screen = FALSE)
dev.off()
}
}
SaveFileBmp <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Bitmap files} {.bmp}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".bmp")
FileName <- paste(FileName, ".bmp", sep = "")
bmp(FileName, width = 8, height = 8, units = "in",
restoreConsole = FALSE, res = 96)
plotFunctiond(screen = FALSE)
dev.off()
}
}
SaveFilePng <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Png files} {.png}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".png")
FileName <- paste(FileName, ".png", sep = "")
png(FileName, width = 8, height = 8, units = "in",
restoreConsole = FALSE, res = 96)
plotFunctiond(screen = FALSE)
dev.off()
}
}
SaveFileTeX <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{TeX files} {.tex}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".tex")
FileName <- paste(FileName, ".tex", sep = "")
pictex(FileName, width = 8, height = 8, debug = FALSE,
bg = "white", fg = "black")
plotFunctiond(screen = FALSE)
dev.off()
}
}
# Print <- function() {
#try(win.print(), silent = TRUE)
#if (geterrmessage() != "Error in win.print() : unable to start device devWindows\n") {
# plotFunctiond(screen = FALSE)
# dev.off()
#}
# }
#
# Pantalla principal
#
OnOKModelSelection <- function()
{
optioncentering <<- datacentering[as.numeric(tclvalue(tcl(comboscentering,
"getvalue"))) + 1]
optionscaling <<- datascaling[as.numeric(tclvalue(tcl(comboscaling,
"getvalue"))) + 1]
optionSVP <<- dataSVP[as.numeric(tclvalue(tcl(comboSVP,
"getvalue"))) + 1]
Models()
tkdestroy(winmodel)
winplot <- tktoplevel()
tkwm.title(winplot, "GGE Biplot")
img <<- tkrplot(winplot, fun = plotFunctiond, hscale = 1.5, vscale = 1.5)
tkpack(img, expand = "TRUE", fill = "both")
tkbind(img, "<B1-Motion>",OnLeftClick.move)
tkbind(img, "<ButtonPress-1>",OnLeftClick.down)
tkbind(img, "<ButtonRelease-1>",OnLeftClick.up)
tkbind(img, "<Button-3>",OnRightClick)
topMenu <- tkmenu(winplot)
tkconfigure(winplot, menu = topMenu)
menuFile <- tkmenu(topMenu, tearoff = FALSE)
menuView <- tkmenu(topMenu, tearoff = FALSE)
menuBiplotTools <- tkmenu(topMenu, tearoff = FALSE)
menuFormat <- tkmenu(topMenu, tearoff = FALSE)
menuChangeColor <- tkmenu(topMenu, tearoff = FALSE)
menuChangeFont <- tkmenu(topMenu, tearoff = FALSE)
menuRank <- tkmenu(topMenu, tearoff = FALSE)
menuModels <- tkmenu(topMenu, tearoff = FALSE)
menuBiplot <- tkmenu(topMenu, tearoff = FALSE)
menuDividedBy <- tkmenu(topMenu, tearoff = FALSE)
menuCenteredBy <- tkmenu(topMenu, tearoff = FALSE)
menuSVP <- tkmenu(topMenu, tearoff = FALSE)
menuSaveAs <- tkmenu(topMenu, tearoff = FALSE)
tkadd(menuFile, "command", label = "Open log file",
command = function()
{
Addfile()
})
tkadd(menuFile, "separator")
tkadd(menuFile, "command", label = "Copy image",
command = function()
{
tkrreplot(img)
})
tkadd(menuFile, "cascade", label = "Save image", menu = menuSaveAs)
tkadd(menuSaveAs, "command", label = "PDF file",
command = function()
{
SaveFilePDF()
})
tkadd(menuSaveAs, "command", label = "Postscript file",
command = function()
{
SaveFilePostscript()
})
# tkadd(menuSaveAs, "command", label = "Metafile",
# command = function()
# {
# SaveFileMetafile()
# })
tkadd(menuSaveAs, "command", label = "Bmp file",
command = function()
{
SaveFileBmp()
})
tkadd(menuSaveAs, "command", label = "Png file",
command = function()
{
SaveFilePng()
})
tkadd(menuSaveAs, "command", label = "Jpg/Jpeg file",
command = function()
{
SaveFileJPG()
})
tkadd(menuSaveAs, "command", label = "TeX file",
command = function()
{
SaveFileTeX()
})
# tkadd(menuFile, "command", label = "Print image",
# command = function()
# {
# Print()
# })
tkadd(menuFile, "separator")
tkadd(menuFile, "command", label = "Exit",
command = function()
{
tkdestroy(winplot)
})
tkadd(menuBiplot, "radiobutton", label = "PC1 vs. PC2 (Primary)",variable = vaxis, value = "0",
command = function()
{
dimension1 <<- 1
dimension2 <<- 2
tkrreplot(img)
})
tkadd(menuBiplot, "radiobutton", label = "PC3 vs. PC4", variable = vaxis, value = "1",
command = function()
{
dimension1 <<- 3
dimension2 <<- 4
tkrreplot(img)
})
tkadd(menuBiplot, "radiobutton", label = "PC5 vs. PC6", variable = vaxis, value = "2",
command = function()
{
dimension1 <<- 5
dimension2 <<- 6
tkrreplot(img)
})
tkadd(menuBiplot, "separator")
tkadd(menuBiplot, "radiobutton", label = "PC1 vs. PC3", variable = vaxis, value = "3",
command = function()
{
dimension1 <<- 1
dimension2 <<- 3
tkrreplot(img)
})
tkadd(menuBiplot, "radiobutton", label = "PC2 vs. PC3",variable = vaxis, value = "4",
command = function()
{
dimension1 <<- 2
dimension2 <<- 3
tkrreplot(img)
})
tkadd(menuBiplot, "separator")
tkadd(menuBiplot, "command", label = "Biplot 3D",
command = function()
{
Biplot3D()
})
tkadd(menuView, "radiobutton", label = "Show Both", variable = showboth, value = "0",
command = function()
{
tkrreplot(img)
})
tkadd(menuView, "radiobutton", label = "Show Genotypes", variable = showboth, value = "1",
command = function()
{
tkrreplot(img)
})
tkadd(menuView, "radiobutton", label = "Show Environments", variable = showboth, value = "2",
command = function()
{
tkrreplot(img)
})
tkadd(menuView, "separator")
tkadd(menuDividedBy, "radiobutton", label = "0.No scaling", variable = scaling, value = "0",
command = function()
{
optionscaling <<- "0.No scaling"
Models()
tkrreplot(img)
})
tkadd(menuDividedBy, "radiobutton", label = "1.Std Deviation (SD)", variable = scaling, value = "1",
command = function()
{
optionscaling <<- "1.Std Deviation (SD)"
Models()
tkrreplot(img)
})
tkadd(menuCenteredBy, "radiobutton", label = "0.No centering", variable = centering, value = "0",
command = function()
{
optioncentering <<- "0.No centering"
Models()
tkrreplot(img)
})
tkadd(menuCenteredBy, "radiobutton", label = "1.Global-Centered E+G+GE", variable = centering, value = "1",
command = function()
{
optioncentering <<- "1.Global-Centered E+G+GE"
Models()
tkrreplot(img)
})
tkadd(menuCenteredBy, "radiobutton", label = "2.Tester-Centered G+GE", variable = centering, value = "2",
command = function()
{
optioncentering <<- "2.Tester-Centered G+GE"
Models()
tkrreplot(img)
})
tkadd(menuCenteredBy, "radiobutton", label = "3.Double-Centered GE", variable = centering, value = "3",
command = function()
{
optioncentering <<- "3.Double-Centered GE"
Models()
tkrreplot(img)
})
tkadd(menuSVP, "radiobutton", label = "JK -(Row Metric Preserving)", variable = svp, value = "0",
command = function()
{
optionSVP <<- "JK -(Row Metric Preserving)"
Models()
tkrreplot(img)
})
tkadd(menuSVP, "radiobutton", label = "GH -(Column Metric Preserving)", variable = svp, value = "1",
command = function()
{
optionSVP <<- "GH -(Column Metric Preserving)"
Models()
tkrreplot(img)
})
tkadd(menuSVP, "radiobutton", label = "HJ -(Dual Metric Preserving)", variable = svp, value = "2",
command = function()
{
optionSVP <<- "HJ -(Dual Metric Preserving)"
Models()
tkrreplot(img)
})
tkadd(menuSVP, "radiobutton", label = "SQ - Symmetrical", variable = svp, value = "3",
command = function()
{
optionSVP <<- "SQ - Symmetrical"
Models()
tkrreplot(img)
})
tkadd(menuView, "checkbutton", label = "Show/Hide Title", variable = showtitle,
command = function()
{
if (tclvalue(showtitle) == "1") wintitle <<- "GGE Biplot"
if (tclvalue(showtitle) == "0") wintitle <<- NULL
tkrreplot(img)
})
tkadd(menuView, "checkbutton", label = "Show/Hide Gidelines", variable = showguidelines,
command = function()
{
tkrreplot(img)
})
tkadd(menuView, "checkbutton", label = "Add/Remove Symbols",variable = showsymbols,
command = function()
{
if (tclvalue(showsymbols) == "1")
{
symbol_gen <<- 20
symbol_env <<- 18
}
if (tclvalue(showsymbols) == "0")
{
symbol_gen <<- NA_integer_
symbol_env <<- NA_integer_
}
tkrreplot(img)
})
tkadd(menuBiplotTools, "command", label = "Examine a Genotype",
command = function()
{
SelectGenotype()
if (vgenotype == -1)
{
}
else
{
if (tclvalue(showtitle) == "1") wintitle <<- "Examine a Genotype"
TypeGraph <<- 3
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
}
})
tkadd(menuBiplotTools, "command", label = "Examine an Environment",
command = function()
{
SelectEnvironment()
if (venvironment == -1)
{
}
else
{
TypeGraph <<- 2
if (tclvalue(showtitle) == "1") wintitle <<- "Examine an Environment"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
}
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "command", label = "Relation among Environments",
command = function()
{
TypeGraph <<- 4
showcircles <<- tclVar("1")
if (tclvalue(showtitle) == "1") wintitle <<- "Relationship among environments"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "command", label = "Compare two Genotypes",
command = function()
{
SelectTwoGenotype()
TypeGraph <<- 5
if (tclvalue(showtitle) == "1") wintitle <<- "Compare two Genotypes"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "command", label = "Which Won Where/What",
command = function()
{
TypeGraph <<- 6
if (tclvalue(showtitle) == "1") wintitle <<- "Which Won Where/What"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "command", label = "Discrimitiveness vs. representativeness",
command = function()
{
TypeGraph <<- 7
if (tclvalue(showtitle) == "1") wintitle <<- "Discrimitiveness vs. representativenss"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "command", label = "Mean vs. Stability",
command = function()
{
if (tclvalue(showtitle) == "1") wintitle <<- "Mean vs. Stability"
TypeGraph <<- 9
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "cascade", label = "Rank Environment/Genotypes",
menu = menuRank)
tkadd(menuRank, "radiobutton", label = "with ref.to the 'Ideal' Environment",variable = vrank, value = "1",
command = function()
{
TypeGraph <<- 8
if (tclvalue(showtitle) == "1") wintitle <<- "Ranking Environments"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuRank, "radiobutton", label = "with ref.to the 'Ideal' Genotype", variable = vrank, value = "2",
command = function()
{
TypeGraph <<- 10
if (tclvalue(showtitle) == "1") wintitle <<- "Ranking Genotypes"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "command", label = "Back to original data",
command = function()
{
TypeGraph <<- 1
Models()
showboth <- tclVar("0")
if (tclvalue(showtitle) == "1") wintitle <- "GGE Biplot"
tkentryconfigure(menuBiplotTools, 0, state = "normal")
tkentryconfigure(menuView, 2, state = "normal")
tkentryconfigure(menuView, 1, state = "normal")
tkrreplot(img)
})
tkadd(menuFormat, "command", label = "Plot Title",
command = function()
{
ReturnVal = modalDialog("GGE Biplot", "Give your biplot a title: ","")
if (ReturnVal == "ID_CANCEL") return()
wintitle <<- ReturnVal
tkrreplot(img)
tkfocus(winplot)
})
tkadd(menuFormat, "separator")
tkadd(menuChangeFont, "command", label = "Default",
command = function()
{
vcex <<- 1
tkrreplot(img)
})
tkadd(menuChangeFont, "command", label = "Larger",
command = function()
{
vcex <<- 1.5
tkrreplot(img)
})
tkadd(menuChangeFont, "command", label = "Smaller",
command = function()
{
vcex <<- 0.5
tkrreplot(img)
})
tkadd(menuChangeColor, "command", label = "Background",
command = function()
{
background <<- ChangeColorv(background)
tkrreplot(img)
})
tkadd(menuChangeColor, "separator")
tkadd(menuChangeColor, "command", label = "Genotype labels",
command = function()
{
colgenotype[] <<- ChangeColorv(colgenotype[1])
tkrreplot(img)
})
tkadd(menuChangeColor, "command", label = "Environment labels",
command = function()
{
colenv[] <<- ChangeColorv(colenv[1])
tkrreplot(img)
})
tkadd(menuChangeColor, "separator")
tkadd(menuChangeColor, "command", label = "Biplot Title",
command = function()
{
coltitle <<- ChangeColorv(coltitle)
tkrreplot(img)
})
tkadd(menuFormat, "cascade", label = "Change Color", menu = menuChangeColor)
tkadd(menuFormat, "cascade", label = "Change Font", menu = menuChangeFont)
tkadd(menuModels, "cascade", label = "Scaled (divided) by", menu = menuDividedBy)
tkadd(menuModels, "cascade", label = "Centered by", menu = menuCenteredBy)
tkadd(menuModels, "cascade", label = "S.V.P.", menu = menuSVP)
tkadd(topMenu, "cascade", label = "File", menu = menuFile)
tkadd(topMenu, "cascade", label = "View", menu = menuView)
tkadd(topMenu, "cascade", label = "Biplot Tools", menu = menuBiplotTools)
tkadd(topMenu, "cascade", label = "Format", menu = menuFormat)
tkadd(topMenu, "cascade", label = "Models", menu = menuModels)
tkadd(topMenu, "cascade", label = "Biplot", menu = menuBiplot)
tkfocus(winplot)
if (TypeGraph != "1")
{
for (temp1 in 5) tkentryconfigure(menuView,temp1, state = "disabled")
}
}
#
labelClosestPoint <- function(xClick,yClick,imgXcoords,imgYcoords)
{
squared.Distance <- (xClick-imgXcoords)^2 + (yClick-imgYcoords)^2
indexClosest <- which.min(squared.Distance)
#
RightClickOnPoint.Menu <- tkmenu(img, tearoff = FALSE)
tkadd(RightClickOnPoint.Menu, "command", label = "Change Label",
command = function() {
mm <-tktoplevel()
tkwm.title(mm, labelsVec[indexClosest])
framemm <-tkframe(mm, relief = "groove", borderwidth = 2,
background = "white")
Namei <- labelsVec[indexClosest]
tclvalue(Namei) <- labelsVec[indexClosest]
entry.Namei <-tkentry(framemm,width="11",textvariable=Namei)
NameVali <- entry.Namei
OnOKli <- function()
{
NameVali <- tclvalue(Namei)
if (TypeGraph == 1)
{
if (tclvalue(showboth) == "0")
{
labelsVec[indexClosest] <<- NameVali
labelgen <<- labelsVec[1:length(colgenotype)]
labelenv <<- labelsVec[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
}
if (tclvalue(showboth) == "1")
{
labelsVec[indexClosest] <<- NameVali
labelgen <<- labelsVec[1:length(colgenotype)]
}
if (tclvalue(showboth) == "2")
{
labelsVec[indexClosest] <- NameVali
labelenv <<- labelsVec[1:length(colenv)]
}
}
if (TypeGraph == 4 || TypeGraph == 8)
{
labelsVec[indexClosest] <<- NameVali
labelenv <<- labelsVec[1:length(colenv)]
}
if (TypeGraph == 5 || TypeGraph == 6 || TypeGraph == 7 || TypeGraph == 9 || TypeGraph == 10)
{
labelsVec[indexClosest] <<- NameVali
labelgen <<- labelsVec[1:length(colgenotype)]
labelenv <<- labelsVec[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
}
tkrreplot(img)
tkdestroy(mm)
}
OK.butli <-tkbutton(framemm,text="Change label",command=OnOKli,width=12)
tkbind(entry.Namei, "<Return>",OnOKli)
tkpack(entry.Namei,OK.butli,expand = "TRUE", side="left", fill = "both")
tkpack(framemm, expand = "TRUE", side="top", fill = "both")
})
tkadd(RightClickOnPoint.Menu, "command", label = "Change Color",
command = function()
{
if (TypeGraph == 1)
{
if (tclvalue(showboth) == "0")
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colgenotype <<- colorsVec[1:length(colgenotype)]
colenv <<- colorsVec[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
}
if (tclvalue(showboth) == "1")
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colgenotype <<- colorsVec[1:length(colgenotype)]
}
if (tclvalue(showboth) == "2")
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colenv <<- colorsVec[1:length(colenv)]
}
}
if (TypeGraph == 4 || TypeGraph == 8)
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colenv <<- colorsVec[1:length(colenv)]
}
if (TypeGraph == 5 || TypeGraph == 6 || TypeGraph == 7 || TypeGraph == 9 || TypeGraph == 10)
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colgenotype <<- colorsVec[1:length(colgenotype)]
colenv <<- colorsVec[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
}
tkrreplot(img)
})
#
tkpopup(RightClickOnPoint.Menu,tclvalue(tkwinfo("pointerx",
img)), tclvalue(tkwinfo("pointery", img)))
}
#
OnRightClick <- function(x,y)
{
xClick <- x
yClick <- y
width = as.numeric(tclvalue(tkwinfo("reqwidth",img)))
height = as.numeric(tclvalue(tkwinfo("reqheight",img)))
xMin = parPlotSize[1] * width
xMax = parPlotSize[2] * width
yMin = parPlotSize[3] * height
yMax = parPlotSize[4] * height
rangeX = usrCoords[2] - usrCoords[1]
rangeY = usrCoords[4] - usrCoords[3]
imgXcoords = (xCoords-usrCoords[1])*(xMax-xMin)/rangeX + xMin
imgYcoords = (yCoords-usrCoords[3])*(yMax-yMin)/rangeY + yMin
xClick <- as.numeric(xClick)+0.5
yClick <- as.numeric(yClick)+0.5
yClick <- height - yClick
xPlotCoord = usrCoords[1]+(xClick-xMin)*rangeX/(xMax-xMin)
yPlotCoord = usrCoords[3]+(yClick-yMin)*rangeY/(yMax-yMin)
labelClosestPoint(xClick,yClick,imgXcoords,imgYcoords)
}
OnLeftClick.up <- function(x,y)
{
if (TypeGraph != 2 && TypeGraph != 3)
{
msg <- ("-To change the label press Yes.\n-To remove it press No.")
mbval <- tkmessageBox(title="Change of label",message=msg,type="yesno",icon="question")
if (tclvalue(mbval)=="yes")
{
}
if(tclvalue(mbval)=="no")
{
if ((TypeGraph == 4) || (TypeGraph == 1 && tclvalue(showboth) == "2") || (TypeGraph == 8))
{
xtext[indexClosest + length(colgenotype)] <<- xAnt
ytext[indexClosest + length(colgenotype)] <<- yAnt
}
else
{
xtext[indexClosest] <<- xAnt
ytext[indexClosest] <<- yAnt
}
}
tkrreplot(img)
}
}
OnLeftClick.move <- function(x,y)
{
xClick <- x
yClick <- y
width = as.numeric(tclvalue(tkwinfo("reqwidth",img)))
height = as.numeric(tclvalue(tkwinfo("reqheight",img)))
xMin = parPlotSize[1] * width
xMax = parPlotSize[2] * width
yMin = parPlotSize[3] * height
yMax = parPlotSize[4] * height
rangeX = usrCoords[2] - usrCoords[1]
rangeY = usrCoords[4] - usrCoords[3]
imgXcoords = (xCoords-usrCoords[1])*(xMax-xMin)/rangeX + xMin
imgYcoords = (yCoords-usrCoords[3])*(yMax-yMin)/rangeY + yMin
xClick <- as.numeric(xClick)+0.5
yClick <- as.numeric(yClick)+0.5
yClick <- height - yClick
xPlotCoord = usrCoords[1]+(xClick-xMin)*rangeX/(xMax-xMin)
yPlotCoord = usrCoords[3]+(yClick-yMin)*rangeY/(yMax-yMin)
if ((TypeGraph == 4) || (TypeGraph == 1 && tclvalue(showboth) == "2") || (TypeGraph == 8))
{
xtext[indexClosest + length(colgenotype)] <<- xPlotCoord
ytext[indexClosest + length(colgenotype)] <<- yPlotCoord
}
else if (TypeGraph == 2 || TypeGraph == 3)
{
}
else
{
xtext [indexClosest] <<- xPlotCoord
ytext [indexClosest] <<- yPlotCoord
}
###############################
tkrreplot(img)
}
OnLeftClick.down <- function(x,y)
{
xClick <- x
yClick <- y
width = as.numeric(tclvalue(tkwinfo("reqwidth",img)))
height = as.numeric(tclvalue(tkwinfo("reqheight",img)))
xMin = parPlotSize[1] * width
xMax = parPlotSize[2] * width
yMin = parPlotSize[3] * height
yMax = parPlotSize[4] * height
rangeX = usrCoords[2] - usrCoords[1]
rangeY = usrCoords[4] - usrCoords[3]
imgXcoords = (xCoords-usrCoords[1])*(xMax-xMin)/rangeX + xMin
imgYcoords = (yCoords-usrCoords[3])*(yMax-yMin)/rangeY + yMin
xClick <- as.numeric(xClick)+0.5
yClick <- as.numeric(yClick)+0.5
yClick <- height - yClick
xPlotCoord = usrCoords[1]+(xClick-xMin)*rangeX/(xMax-xMin)
yPlotCoord = usrCoords[3]+(yClick-yMin)*rangeY/(yMax-yMin)
squared.Distance <- (xClick-imgXcoords)^2 + (yClick-imgYcoords)^2
indexClosest <<- which.min(squared.Distance)
if ((TypeGraph == 4) || (TypeGraph == 1 && tclvalue(showboth) == "2") || (TypeGraph == 8))
{
xAnt <<- xtext[indexClosest + length(colgenotype)]
yAnt <<- ytext[indexClosest + length(colgenotype)]
}
else if (TypeGraph == 2 || TypeGraph == 3)
{
}
else
{
xAnt <<- xtext[indexClosest]
yAnt <<- ytext[indexClosest]
}
}
winmodel <- tktoplevel()
tkwm.title(winmodel, "Model Selection")
comboscaling <- tkwidget(winmodel, "ComboBox", editable = FALSE,
values = datascaling, width = 30)
defaultscaling <- tclVar(optionscaling)
tkconfigure(comboscaling, textvariable = defaultscaling)
comboscentering <- tkwidget(winmodel, "ComboBox", editable = FALSE,
values = datacentering, width = 30)
defaultcentering <- tclVar(optioncentering)
tkconfigure(comboscentering, textvariable = defaultcentering)
comboSVP <- tkwidget(winmodel, "ComboBox", editable = FALSE,
values = dataSVP, width = 30)
defaultSVP <- tclVar(optionSVP)
tkconfigure(comboSVP, textvariable = defaultSVP)
OK.modelselection <- tkbutton(winmodel, text = " OK ",command = OnOKModelSelection)
tkgrid(tklabel(winmodel, text = "SVP: "),
sticky = "w")
tkgrid(comboSVP)
tkgrid(tklabel(winmodel, text = " "),
sticky = "w")
tkgrid(tklabel(winmodel, text = "Centered By: "),
sticky = "w")
tkgrid(comboscentering)
tkgrid(tklabel(winmodel, text = " "),
sticky = "w")
tkgrid(tklabel(winmodel, text = "Scaled (Divided) By: "),
sticky = "w")
tkgrid(comboscaling)
tkgrid(tklabel(winmodel, text = " "),
sticky = "w")
tkgrid(OK.modelselection)
tkfocus(winmodel)
}
| /GGEBiplotGUI/R/GGEBiplot.R | no_license | ingted/R-Examples | R | false | false | 71,377 | r | GGEBiplot <- function (Data)
{
tclRequire("BWidget")
###########
# Variables
###########
optioncentering <- "2.Tester-Centered G+GE"
optionscaling <- "0.No scaling"
optionSVP <- "GH -(Column Metric Preserving)"
datascaling <- c("0.No scaling", "1.Std Deviation (SD)")
datacentering <- c("0.No centering", "1.Global-Centered E+G+GE",
"2.Tester-Centered G+GE", "3.Double-Centered GE")
dataSVP <- c("JK -(Row Metric Preserving)", "GH -(Column Metric Preserving)",
"HJ -(Dual Metric Preserving)", "SQ - Symmetrical")
wintitle <- "GGE Biplot"
coltitle <- "black"
background <- "white"
centro <- c(0, 0)
symbol = NA_integer_
symbol_gen = NA_integer_
symbol_env = NA_integer_
subtitle <- NULL
ejes <- array()
showtitle <- tclVar("1")
showboth <- tclVar("0")
showsymbols <- tclVar("0")
vaxis <- tclVar("0")
showguidelines <- tclVar("1")
showcircles <- tclVar("0")
scaling <- tclVar("0")
centering <- tclVar("2")
svp <- tclVar("1")
vrank <- tclVar("1")
TypeGraph <- 1
matrixdata <- NULL
desviation <- NULL
colgenotype <- NULL
colenv <- NULL
labelgen <- NULL
labelenv <- NULL
coordgenotype <- NULL
coordenviroment <- NULL
xtext <- NULL
ytext <- NULL
xCoords <- NULL
yCoords <- NULL
xAnt <- NULL
yAnt <- NULL
indexClosest <- NULL
labelsVec <- NULL
colorsVec <- NULL
venvironment <- -1
vgenotype <- -1
vgenotype1 <- -1
vgenotype2 <- -1
dimension1 <- 1
dimension2 <- 2
vcex <- 1
img <- NULL
parPlotSize <- NULL
usrCoords <- NULL
#####################
# Pantalla de dialogo
#####################
modalDialog <- function(title, question, entryInit, entryWidth = 20,
returnValOnCancel = "ID_CANCEL") {
dlg <- tktoplevel()
tkwm.deiconify(dlg)
tkgrab.set(dlg)
tkfocus(dlg)
tkwm.title(dlg, title)
textEntryVarTcl <- tclVar(paste(entryInit))
textEntryWidget <- tkentry(dlg, width = paste(entryWidth),
textvariable = textEntryVarTcl)
tkgrid(tklabel(dlg, text = " "))
tkgrid(tklabel(dlg, text = question), textEntryWidget)
tkgrid(tklabel(dlg, text = " "))
ReturnVal = returnValOnCancel
onOK <- function() {
ReturnVal = tclvalue(textEntryVarTcl)
tkgrab.release(dlg)
tkdestroy(dlg)
}
onCancel <- function() {
ReturnVal = returnValOnCancel
tkgrab.release(dlg)
tkdestroy(dlg)
}
OK.but <- tkbutton(dlg, text = " OK ", command = onOK)
Cancel.but <- tkbutton(dlg, text = " Cancel ", command = onCancel)
tkgrid(OK.but, Cancel.but)
tkgrid(tklabel(dlg, text = " "))
tkfocus(dlg)
tkwait.window(dlg)
return(ReturnVal)
}
#############################
# Funcion que cambia el color
#############################
ChangeColorv <- function(color) {
colorv = tclvalue(tcl("tk_chooseColor", initialcolor = color,
title = "Choose a color"))
if (nchar(colorv) > 0)
return(colorv)
}
##########################################
# Funcion que cambia el tamano de la letra
##########################################
ChangeSize <- function()
{
tt<-tktoplevel()
tkwm.title(tt, "Font")
scr <- tkscrollbar(tt, repeatinterval=5, command=function(...)tkyview(tl,...))
tl<-tklistbox(tt,height=4,selectmode="single",yscrollcommand=function(...)tkset(scr,...),background="white")
tkgrid(tklabel(tt,text="Font"))
tkgrid(tl,scr)
tkgrid.configure(scr,rowspan=4,sticky="nsw")
fonts <- c("Plain","Bold","Italic","Bold Italic","Symbol")
for (i in (1:5))
{
tkinsert(tl,"end",fonts[i])
}
tkselection.set(tl,1) # La fuente por defecto es plana
OnOK <- function()
{
vfont <- as.numeric(tkcurselection(tl))+1
return(vfont)
tkdestroy(tt)
}
OK.but <-tkbutton(tt,text=" OK ",command=OnOK)
tkgrid(OK.but)
tkfocus(tt)
}
######################################
# Funcion para la seleccion del modelo
######################################
Models <- function() {
labelgen <<- rownames(Data)
labelenv <<- colnames(Data)
matrixdata <<- matrix(, nrow(Data), ncol(Data))
for (i in 1:nrow(matrixdata)) for (j in 1:ncol(matrixdata)) matrixdata[i,
j] <<- Data[i, j]
colgenotype <<- rep("green4",dim(matrixdata)[1])
colenv <<- rep("blue",dim(matrixdata)[2])
for (i in 1:ncol(diag(svd(matrixdata)$d))) ejes[i] <<- paste("AXIS",
i, sep = "")
# Opcion de centrado
switch(optioncentering,
"0.No centering" = {
centering <<- tclVar("0")
},
"1.Global-Centered E+G+GE" = {
meanData = mean(matrixdata)
matrixdata <<- matrixdata - meanData
centering <<- tclVar("1")
},
"2.Tester-Centered G+GE" = {
meancolData = colMeans(matrixdata)
for (i in 1:nrow(matrixdata)) for (j in 1:ncol(matrixdata)) matrixdata[i,
j] <<- matrixdata[i, j] - meancolData[j]
centering <<- tclVar("2")
},
"3.Double-Centered GE" = {
meanData = mean(matrixdata)
meancolData = colMeans(matrixdata)
meanrowData = rowMeans(matrixdata)
for (i in 1:nrow(matrixdata)) for (j in 1:ncol(matrixdata)) matrixdata[i,
j] <<- matrixdata[i, j] + meanData - meancolData[j] -
meanrowData[i]
centering <<- tclVar("3")
})
# Opcion de escalado
switch (optionscaling,
"0.No scaling" = {
scaling <<- tclVar("0")
},
"1.Std Deviation (SD)" = {
scaling <<- tclVar("1")
desviation <<- array(, dim = ncol(matrixdata))
for (j in 1:ncol(matrixdata)) desviation[j] <<- sqrt(var(matrixdata[,
j]))
for (i in 1:nrow(matrixdata)) for (j in 1:ncol(matrixdata)) matrixdata[i,
j] <<- matrixdata[i, j]/desviation[j]
})
# Opcion de centrado
switch (optionSVP,
"JK -(Row Metric Preserving)" = {
coordgenotype <<- svd(matrixdata)$u %*% diag(svd(matrixdata)$d)
coordenviroment <<- svd(matrixdata)$v
d1 = (max(coordenviroment[, dimension1]) - min(coordenviroment[,
dimension1]))/(max(coordgenotype[, dimension1]) -
min(coordgenotype[, dimension1]))
d2 = (max(coordenviroment[, dimension2]) - min(coordenviroment[,
dimension2]))/(max(coordgenotype[, dimension2]) -
min(coordgenotype[, dimension2]))
d = max(d1, d2)
coordenviroment <<- coordenviroment/d
svp <<- tclVar("0")
},
"GH -(Column Metric Preserving)" = {
coordgenotype <<- svd(matrixdata)$u
coordenviroment <<- svd(matrixdata)$v %*% diag(svd(matrixdata)$d)
d1 = (max(coordgenotype[, dimension1]) - min(coordgenotype[,
dimension1]))/(max(coordenviroment[, dimension1]) -
min(coordenviroment[, dimension1]))
d2 = (max(coordgenotype[, dimension2]) - min(coordgenotype[,
dimension2]))/(max(coordenviroment[, dimension2]) -
min(coordenviroment[, dimension2]))
d = max(d1, d2)
coordgenotype <<- coordgenotype/d
svp <<- tclVar("1")
},
"SQ - Symmetrical" = {
coordgenotype <<- svd(matrixdata)$u %*% diag(sqrt(svd(matrixdata)$d))
coordenviroment <<- svd(matrixdata)$v %*% diag(sqrt(svd(matrixdata)$d))
svp <<- tclVar("3")
},
"HJ -(Dual Metric Preserving)" = {
coordgenotype <<- svd(matrixdata)$u %*% diag(svd(matrixdata)$d)
coordenviroment <<- svd(matrixdata)$v %*% diag(svd(matrixdata)$d)
svp <<- tclVar("2")
})
xtext <<- rbind(coordgenotype,coordenviroment)[,dimension1]
ytext <<- rbind(coordgenotype,coordenviroment)[,dimension2]
}
# #######################################
# Funcion que construye el fichero de log
#########################################
Addfile <- function() {
valorespropios =- svd(matrixdata)$d
vartotal = round(as.numeric(sum(valorespropios^2)),
2)
varexpl = round(as.numeric((valorespropios^2/vartotal) *
100), 2)
genfile <- as.data.frame(coordgenotype[, dimension1:dimension2])
rownames(genfile) <- labelgen
colnames(genfile) <- ejes[dimension1:dimension2]
envfile <<- as.data.frame(coordenviroment[, dimension1:dimension2])
rownames(envfile) <- labelenv
colnames(envfile) <- ejes[dimension1:dimension2]
coordgencuad = coordgenotype^2
CRFqEi <- coordgencuad
sumacuagen = rowSums(coordgencuad)
CRFqEi[, 1] = round(((coordgencuad)[, dimension1] *
1000)/sumacuagen, 0)
CRFqEi[, 2] = round(((coordgencuad)[, dimension2] *
1000)/sumacuagen, 0)
CRFqEi <- as.data.frame(CRFqEi[, dimension1:dimension2])
rownames(CRFqEi) <- labelgen
colnames(CRFqEi) <- ejes[dimension1:dimension2]
coordenvcuad = coordenviroment^2
CRFqEj <- coordenvcuad
sumacuaenv = rowSums(coordenvcuad)
CRFqEj[, 1] = round(((coordenvcuad)[, dimension1] *
1000)/(sumacuaenv), 0)
CRFqEj[, 2] = round(((coordenvcuad)[, dimension2] *
1000)/(sumacuaenv), 0)
CRFqEj <- as.data.frame(CRFqEj[, 1:2])
rownames(CRFqEj) <- labelenv
colnames(CRFqEj) <- ejes[dimension1:dimension2]
cat("GGE BIPLOT", file = "Results1.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Centered by: ", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat(optioncentering, file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Scaled (Divided) by: ", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat(optionscaling, file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("SVP: ", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat(optionSVP, file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Eigenvalues and variance explained", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(round(svd(matrixdata)$d, 3), file = "temp.xls",
sep = "\t", dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Row coordinates:", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(round(genfile, 3), file = "temp.xls", sep = "\t",
dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Column coordinates:", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(round(envfile, 3), file = "temp.xls", sep = "\t",
dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("RELATIVE CONTRIBUTIONS OF THE FACTOR TO THE ELEMENT:",
file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Row Contributions ----------", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(CRFqEi, file = "temp.xls", sep = "\t", dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("Column Contributions ----------", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
write.table(CRFqEj, file = "temp.xls", sep = "\t", dec = ",")
file.append("Results1.xls", "temp.xls")
cat("\n", file = "temp.xls")
file.append("Results1.xls", "temp.xls")
file.show("Results1.xls")
file.remove("temp.xls")
}
##################################
# Funcion que construye el grafico
##################################
plotFunctiond <- function(screen = TRUE) {
valorespropios = svd(matrixdata)$d
vartotal = round(as.numeric(sum(valorespropios^2)),
2)
varexpl = round(as.numeric((valorespropios^2/vartotal) *
100), 2)
params <- par(bg = background)
plot(rbind(coordgenotype, coordenviroment), main = wintitle,
type = "n", asp = 1, col.main = coltitle, xlab = paste(ejes[dimension1],
varexpl[dimension1], "%", sep = " ", sub = subtitle),
ylab = paste(ejes[dimension2], varexpl[dimension2],
"%", sep = " "))
if (tclvalue(showguidelines) == "1")
abline(h = 0, v = 0, lty = "dotted")
indexLabeledaux<-c()
labeledPoints <- list()
# Tipo de grafico
#
switch(TypeGraph,
# Biplot
"1" = {
if (tclvalue(showboth) == "0" || tclvalue(showboth) ==
"1")
{
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
}
if (tclvalue(showboth) == "0" || tclvalue(showboth) ==
"2")
{
arrows(centro[1], centro[2], coordenviroment[,
dimension1], coordenviroment[, dimension2],
col = colenv, lty = "dotted", length = 0.05)
points(centro[1], centro[2], pch = 18, col = "black")
}
if (tclvalue(showboth) == "0")
{
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
}
if (tclvalue(showboth) == "1")
{
xCoords <<- xtext[1:length(colgenotype)]
yCoords <<- ytext[1:length(colgenotype)]
labelsVec <<- labelgen
colorsVec <<- colgenotype
}
if (tclvalue(showboth) == "2")
{
xCoords <<- xtext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
yCoords <<- ytext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
labelsVec <<- labelenv
colorsVec <<- colenv
}
},
# Examina un ambiente
"2" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
abline(a = 0, b = coordenviroment[venvironment, dimension2]/coordenviroment[venvironment,
dimension1], col = colenv[venvironment], lty = "solid",lwd = 2.5)
abline(a = 0, b = -coordenviroment[venvironment,
dimension1]/coordenviroment[venvironment, dimension2],
col = colenv[venvironment], lty = "solid",lwd = 2.5)
arrows(centro[1], centro[2], coordenviroment[venvironment,
dimension1], coordenviroment[venvironment, dimension2],
col = colenv[venvironment], lty = "solid", length = 0.1)
xCoords <<- c(xtext[1:length(colgenotype)],xtext[length(colgenotype)+venvironment])
yCoords <<- c(ytext[1:length(colgenotype)],ytext[length(colgenotype)+venvironment])
labelsVec <<- c(labelgen,labelenv[venvironment])
colorsVec <<- c(colgenotype,colenv[venvironment])
for (i in 1:nrow(matrixdata))
{
x <- solve(matrix(c(-coordenviroment[venvironment,
dimension2], coordenviroment[venvironment,
dimension1], coordenviroment[venvironment,
dimension1], coordenviroment[venvironment,
dimension2]), nrow = 2), matrix(c(0, coordenviroment[venvironment,
dimension1] * coordgenotype[i, dimension1] +
coordenviroment[venvironment, dimension2] *
coordgenotype[i, dimension2]), ncol = 1))
segments(coordgenotype[i, dimension1], coordgenotype[i,
dimension2], x[1], x[2], lty = "dotted")
}
},
# Examina un genotipo
"3" = {
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
abline(a = 0, b = coordgenotype[vgenotype, dimension2]/coordgenotype[vgenotype,
dimension1], col = colgenotype[vgenotype], lty = "solid" , lwd = 2.5)
abline(a = 0, b = -coordgenotype[vgenotype, dimension1]/coordgenotype[vgenotype,
dimension2], col = colgenotype[vgenotype], lty = "solid", lwd = 2.5 )
arrows(centro[1], centro[2], coordgenotype[vgenotype,
dimension1], coordgenotype[vgenotype, dimension2],
col = colgenotype[vgenotype], lty = "solid", length = 0.1)
xCoords <<- rbind(coordgenotype[vgenotype,], coordenviroment)[,dimension1]
yCoords <<- rbind(coordgenotype[vgenotype,], coordenviroment)[,dimension2]
labelsVec <<- c(labelgen[vgenotype],labelenv)
colorsVec <<- c(colgenotype[vgenotype],colenv)
for (i in 1:ncol(matrixdata))
{
x <- solve(matrix(c(-coordgenotype[vgenotype,
dimension2], coordgenotype[vgenotype, dimension1],
coordgenotype[vgenotype, dimension1], coordgenotype[vgenotype,
dimension2]), nrow = 2), matrix(c(0, coordgenotype[vgenotype,
dimension1] * coordenviroment[i, dimension1] +
coordgenotype[vgenotype, dimension2] * coordenviroment[i,
dimension2]), ncol = 1))
segments(coordenviroment[i, dimension1], coordenviroment[i,
dimension2], x[1], x[2], lty = "dotted")
}
},
# Relacion entre ambientes
"4" = {
arrows(centro[1], centro[2], coordenviroment[, dimension1],
coordenviroment[, dimension2], col = colenv,
lty = "solid", length = 0.05)
points(centro[1], centro[2], pch = 18, col = "black")
if (tclvalue(showcircles) == "1")
{
radio = max((max(coordenviroment[dimension1,
]) - min(coordenviroment[dimension1, ])), (max(coordenviroment[dimension2,
]) - min(coordenviroment[dimension2, ])))/10
for (i in 1:5) symbols(0, 0, circles = radio *
i, add = TRUE, inches = FALSE, fg = "black")
}
xCoords <<- xtext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
yCoords <<- ytext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
labelsVec <<- c(labelenv)
colorsVec <<- c(colenv)
},
# Compara dos genotipos
"5" = {
symbols(coordgenotype[vgenotype1, dimension1], coordgenotype[vgenotype1,
dimension2], circles = 0.2, add = TRUE, inches = FALSE,
fg = colgenotype)
symbols(coordgenotype[vgenotype2, dimension1], coordgenotype[vgenotype2,
dimension2], circles = 0.2, add = TRUE, inches = FALSE,
fg = colgenotype)
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
segments(coordgenotype[vgenotype1, dimension1], coordgenotype[vgenotype1,
dimension2], coordgenotype[vgenotype2, dimension1],
coordgenotype[vgenotype2, dimension2], col = "red",
lty = "solid", lwd = 2.5)
abline(a = 0, b = -(coordgenotype[vgenotype1, dimension1] -
coordgenotype[vgenotype2, dimension1])/(coordgenotype[vgenotype1,
dimension2] - coordgenotype[vgenotype2, dimension2]),
col = "red", lty = "solid",lwd = 2.5)
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
},
# Which-won-where
"6" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
points(centro[1], centro[2], pch = 18, col = "black")
indice = c(chull(coordgenotype[, dimension1], coordgenotype[,
dimension2]))
polygon(coordgenotype[indice, dimension1], coordgenotype[indice,
dimension2], border = "black")
i <<- 1
while (is.na(indice[i + 1]) == FALSE)
{
m<-(coordgenotype[indice[i], dimension2] - coordgenotype[indice[i + 1], dimension2])/(coordgenotype[indice[i],dimension1]-coordgenotype[indice[i + 1],dimension1])
mperp<--1/m
c2<-coordgenotype[indice[i + 1], dimension2] - m*coordgenotype[indice[i + 1],dimension1]
xint<--c2/(m-mperp)
xint<-ifelse(xint<0,min(coordenviroment[, dimension1],coordgenotype[, dimension1]), max(coordenviroment[, dimension1],coordgenotype[, dimension1]))
yint<-mperp*xint
segments(0,0, xint,yint, col="red", lty="solid",lwd=2.5)
i <<- i + 1
}
m<-(coordgenotype[indice[i], dimension2] - coordgenotype[indice[1], dimension2])/(coordgenotype[indice[i],dimension1]-coordgenotype[indice[1],dimension1])
mperp<--1/m
c2<-coordgenotype[indice[i], dimension2] - m*coordgenotype[indice[i],dimension1]
xint<--c2/(m-mperp)
xint<-ifelse(xint<0,min(coordenviroment[, dimension1],coordgenotype[, dimension1]), max(coordenviroment[, dimension1],coordgenotype[, dimension1]))
yint<-mperp*xint
segments(0,0, xint,yint, col="red", lty="solid",lwd=2.5)
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
},
# Discrimitiveness vs. representativenss
"7" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2], pch = symbol_gen, col = colgenotype)
segments(centro[1], centro[2], coordenviroment[,
dimension1], coordenviroment[, dimension2], col = colenv,
lty = "dotted")
points(centro[1], centro[2], pch = 18, col = "black")
arrows(centro[1], centro[2], mean(coordenviroment[,
dimension1]), mean(coordenviroment[, dimension2]),
col = colenv, lty = "solid", length = 0.1)
symbols(mean(coordenviroment[, dimension1]), mean(coordenviroment[,
dimension2]), circles = 0.1, add = TRUE, inches = FALSE,
fg = colenv)
abline(a = 0, b = mean(coordenviroment[, dimension2])/mean(coordenviroment[,
dimension1]), col = colenv, lty = "solid", lwd = 2.5)
radio = max((max(coordenviroment[dimension1, ]) -
min(coordenviroment[dimension1, ])), (max(coordenviroment[dimension2,
]) - min(coordenviroment[dimension2, ])))/10
for (i in 1:5) symbols(0, 0, circles = radio * i,
add = TRUE, inches = FALSE, fg = "black")
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
},
# Ranking Environments
"8" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2],pch = symbol_gen, col = colgenotype, cex = vcex)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
points(centro[1], centro[2], pch = 18, col = "black")
med1 = mean(coordenviroment[, dimension1])
med2 = mean(coordenviroment[, dimension2])
abline(a = 0, b = med2/med1, col = colenv, lty = "solid",
lwd = 2.5)
abline(a = 0, b = -med1/med2, col = colenv, lty = "solid",
lwd = 2.5)
symbols(med1, med2, circles = 0.1, add = TRUE, inches = FALSE,
fg = colenv)
mod = max((coordenviroment[, dimension1]^2 + coordenviroment[,
dimension2]^2)^0.5)
xcoord = sign(med1) * (mod^2/(1 + med2^2/med1^2))^0.5
ycoord = (med2/med1) * xcoord
arrows(centro[1], centro[2], xcoord, ycoord, col = colenv,
lty = "solid", length = 0.1)
radio = ((xcoord - med1)^2 + (ycoord - med2)^2)^0.5/3
for (i in 1:8) symbols(xcoord, ycoord, circles = radio *
i, add = TRUE, inches = FALSE, fg = "gray")
xCoords <<- xtext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
yCoords <<- ytext[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
labelsVec <<- labelenv
colorsVec <<- colenv
},
# Mean vs Stability
"9" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2],pch = symbol_gen, col = colgenotype, cex = vcex)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
med1 = mean(coordenviroment[, dimension1])
med2 = mean(coordenviroment[, dimension2])
abline(a = 0, b = med2/med1, col = colgenotype, lty = "solid",
lwd = 2.5)
abline(a = 0, b = -med1/med2, col = colgenotype,
lty = "solid", lwd = 2.5)
arrows(centro[1], centro[2], med1, med2, col = colgenotype,
lty = "solid", length = 0.1)
symbols(med1, med2, circles = 0.1, add = TRUE, inches = FALSE,
fg = colenv)
for (i in 1:nrow(matrixdata))
{
x <- solve(matrix(c(-med2, med1, med1, med2),
nrow = 2), matrix(c(0, med2 * coordgenotype[i,
dimension2] + med1 * coordgenotype[i, dimension1]),
ncol = 1))
segments(coordgenotype[i, dimension1], coordgenotype[i,
dimension2], x[1], x[2], lty = "dotted")
}
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
},
# Ranking genotypes
"10" = {
points(coordgenotype[, dimension1], coordgenotype[,
dimension2],pch = symbol_gen, col = colgenotype, cex = vcex)
points(coordenviroment[, dimension1], coordenviroment[,
dimension2], pch = symbol_env, col = colenv)
med1 = mean(coordenviroment[, dimension1])
med2 = mean(coordenviroment[, dimension2])
abline(a = 0, b = med2/med1, col = colgenotype, lty = "solid",
lwd = 2.5)
abline(a = 0, b = -med1/med2, col = colgenotype,
lty = "solid", lwd = 2.5)
coordx <<- 0
coordy <<- 0
for (i in 1:nrow(matrixdata)) {
x <- solve(matrix(c(-med2, med1, med1, med2),
nrow = 2), matrix(c(0, med2 * coordgenotype[i,
dimension2] + med1 * coordgenotype[i, dimension1]),
ncol = 1))
if (sign(x[1]) == sign(med1)) {
if (abs(x[1]) > abs(coordx)) {
coordx <- x[1]
coordy <- x[2]
}
}
}
arrows(centro[1], centro[2], coordx, coordy, col = colgenotype,
lty = "solid", length = 0.1)
radio = ((coordx - med1)^2 + (coordy - med2)^2)^0.5/3
for (i in 1:10) symbols(coordx, coordy, circles = radio *
i, add = TRUE, inches = FALSE, fg = "gray")
xCoords <<- xtext
yCoords <<- ytext
labelsVec <<- c(labelgen,labelenv)
colorsVec <<- c(colgenotype,colenv)
}
)
#
indexLabeled <- c(1:length(xCoords))
if (length(indexLabeled)>0)
for (i in (1:length(indexLabeled)))
{
indexClosest <- indexLabeled[i]
text(xCoords[indexClosest],yCoords[indexClosest],
labels=labelsVec[indexClosest], col= colorsVec[indexClosest], cex= vcex)
}
parPlotSize <<- par("plt")
usrCoords <<- par("usr")
#
}
############################
# Biplot en tres dimensiones
############################
Biplot3D <- function() {
dimensions <- 1:3
rgl.clear("all")
rgl.bg(sphere = TRUE, color = c("whitesmoke", "gray90"),
lit = FALSE)
rgl.light()
points3d(coordgenotype[, 1], coordgenotype[, 2], coordgenotype[,
3], pch = symbol_gen, col = colgenotype)
text3d(coordgenotype[, 1], coordgenotype[, 2], coordgenotype[,
3], labelgen, col = colgenotype, cex = vcex)
text3d(coordenviroment[, 1], coordenviroment[, 2], coordenviroment[,
3], labelenv, col = colenv, cex = vcex)
aspect3d("iso")
lims <- par3d("bbox")
segments3d(matrix(c(lims[1], lims[3], lims[5], lims[2],
lims[3], lims[5], lims[1], lims[3], lims[5], lims[1],
lims[4], lims[5], lims[1], lims[3], lims[5], lims[1],
lims[3], lims[6]), byrow = TRUE, ncol = 3), col = "gray60")
text3d(matrix(c((lims[1] + lims[2])/2, lims[3], lims[5],
lims[1], (lims[3] + lims[4])/2, lims[5], lims[1],
lims[3], (lims[5] + lims[6])/2), byrow = TRUE, nrow = 3),
texts = paste("Dimension ", dimensions), col = "gray60",
family = "sans", font = 1, cex = vcex)
if (tclvalue(showguidelines) == "1")
axes3d()
for (i in 1:(dim(coordenviroment)[1])) {
linea <- rbind(coordenviroment[i, ], c(0, 0, 0))
segments3d(linea[, 1], linea[, 2], linea[, 3], col = colenv)
}
if (tclvalue(showtitle) == "1")
title3d(wintitle, color = "black", family = "sans",
font = 2, cex = vcex)
start <- proc.time()[3]
while (proc.time()[3] - start < 0.75) {
}
start <- proc.time()[3]
while ((i <- 36 * (proc.time()[3] - start)) < 360) rgl.viewpoint(i,
15 - (i - 90)/4, zoom = (if (i < 180)
(i + 1)^-0.5
else (360 - i + 1)^-0.5))
rgl.viewpoint(zoom = 1)
}
######################################
# Pantalla de seleccion de un genotipo
######################################
SelectGenotype <- function() {
wingenotype <- tktoplevel()
tkwm.title(wingenotype, "Select a Genotype")
combogenotype <- tkwidget(wingenotype, "ComboBox", editable = FALSE,
values = labelgen, width = 20)
onOK <- function() {
vgenotype <<- as.numeric(tclvalue(tcl(combogenotype,
"getvalue"))) + 1
tkdestroy(wingenotype)
}
onCancel <- function() {
vgenotype <<- -1
tkdestroy(wingenotype)
}
OK.but <- tkbutton(wingenotype, text = " OK ", command = onOK)
Cancel.but <- tkbutton(wingenotype, text = " Cancel ",
command = onCancel)
tkgrid(tklabel(wingenotype, text = " "))
tkgrid(tklabel(wingenotype, text = " "))
tkgrid(tklabel(wingenotype, text = "Select a Genotype: "),
combogenotype)
tkgrid(tklabel(wingenotype, text = " "))
tkgrid(OK.but, Cancel.but)
tkgrid(tklabel(wingenotype, text = " "))
tkfocus(wingenotype)
tkwait.window(wingenotype)
}
####################################
# Pantalla de seleccion de ambientes
####################################
SelectEnvironment <- function() {
winenvironment <- tktoplevel()
tkwm.title(winenvironment, "Select an Environment")
comboenvironment <- tkwidget(winenvironment, "ComboBox",
editable = FALSE, values = labelenv, width = 20)
onOK <- function() {
venvironment <<- as.numeric(tclvalue(tcl(comboenvironment,
"getvalue"))) + 1
tkdestroy(winenvironment)
}
onCancel <- function() {
venvironment <<- -1
tkdestroy(winenvironment)
}
OK.but <- tkbutton(winenvironment, text = " OK ",
command = onOK)
Cancel.but <- tkbutton(winenvironment, text = " Cancel ",
command = onCancel)
tkgrid(tklabel(winenvironment, text = " "))
tkgrid(tklabel(winenvironment, text = " "))
tkgrid(tklabel(winenvironment, text = "Select an Environment: "),
comboenvironment)
tkgrid(tklabel(winenvironment, text = " "))
tkgrid(OK.but, Cancel.but)
tkgrid(tklabel(winenvironment, text = " "))
tkfocus(winenvironment)
tkwait.window(winenvironment)
}
#############################################
# Pantalla para la seleccion de dos genotipos
#############################################
SelectTwoGenotype <- function() {
winEnvGen <- tktoplevel()
tkwm.title(winEnvGen, "Select Genotypes")
vgenotype1 <<- -1
vgenotype2 <<- -1
combogenotype1 <- tkwidget(winEnvGen, "ComboBox", editable = FALSE,
values = labelgen, width = 20)
combogenotype2 <- tkwidget(winEnvGen, "ComboBox", editable = FALSE,
values = labelgen, width = 20)
onOK <- function() {
vgenotype1 <<- as.numeric(tclvalue(tcl(combogenotype1,
"getvalue"))) + 1
vgenotype2 <<- as.numeric(tclvalue(tcl(combogenotype2,
"getvalue"))) + 1
tkdestroy(winEnvGen)
}
onCancel <- function() {
vgenotype1 <<- -1
vgenotype2 <<- -1
tkdestroy(winEnvGen)
}
OK.but <- tkbutton(winEnvGen, text = " OK ", command = onOK)
Cancel.but <- tkbutton(winEnvGen, text = " Cancel ",
command = onCancel)
tkgrid(tklabel(winEnvGen, text = "Select two genotypes to compare: "))
tkgrid(tklabel(winEnvGen, text = " "))
tkgrid(tklabel(winEnvGen, text = " "))
tkgrid(tklabel(winEnvGen, text = "Genotype 1: "), combogenotype1)
tkgrid(tklabel(winEnvGen, text = "Genotype 2: "), combogenotype2)
tkgrid(tklabel(winEnvGen, text = " "))
tkgrid(OK.but, Cancel.but)
tkgrid(tklabel(winEnvGen, text = " "))
tkfocus(winEnvGen)
tkwait.window(winEnvGen)
}
##################################
# Guarda la imagen con formato JPG
##################################
SaveFileJPG <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Jpeg files} {.jpg .jpeg}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".jpg")
FileName <- paste(FileName, ".jpg", sep = "")
jpeg(FileName, width = 8, height = 8, units = "in",
restoreConsole = FALSE, res = 96, quality = 50)
plotFunctiond(screen = FALSE)
dev.off()
}
}
#######################################
# Guarda la imagen con formato Metafile
#######################################
# SaveFileMetafile <- function() {
# FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Metafiles} {.wmf}} {{All files} *}"))
# if (nchar(FileName)) {
# nn <- nchar(FileName)
# if (nn < 5 || substr(FileName, nn - 3, nn) != ".wmf")
# FileName <- paste(FileName, ".wmf", sep = "")
# win.metafile(FileName, width = 8, height = 8, restoreConsole = FALSE)
# plotFunctiond(screen = FALSE)
# dev.off()
# }
# }
#########################################
# Guarda la imagen con formato postscript
#########################################
SaveFilePostscript <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Postscript files} {.ps}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 4 || substr(FileName, nn - 2, nn) != ".ps")
FileName <- paste(FileName, ".ps", sep = "")
postscript(file = FileName, width = 8, height = 8,
horizontal = FALSE, onefile = FALSE, paper = "default",
family = "URWHelvetica")
plotFunctiond(screen = FALSE)
dev.off()
}
}
##################################
# Guarda la imagen con formato PDF
##################################
SaveFilePDF <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{PDF files} {.pdf}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".pdf")
FileName <- paste(FileName, ".pdf", sep = "")
pdf(FileName, width = 7, height = 7)
plotFunctiond(screen = FALSE)
dev.off()
}
}
SaveFileBmp <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Bitmap files} {.bmp}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".bmp")
FileName <- paste(FileName, ".bmp", sep = "")
bmp(FileName, width = 8, height = 8, units = "in",
restoreConsole = FALSE, res = 96)
plotFunctiond(screen = FALSE)
dev.off()
}
}
SaveFilePng <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{Png files} {.png}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".png")
FileName <- paste(FileName, ".png", sep = "")
png(FileName, width = 8, height = 8, units = "in",
restoreConsole = FALSE, res = 96)
plotFunctiond(screen = FALSE)
dev.off()
}
}
SaveFileTeX <- function() {
FileName <- tclvalue(tkgetSaveFile(filetypes = "{{TeX files} {.tex}} {{All files} *}"))
if (nchar(FileName)) {
nn <- nchar(FileName)
if (nn < 5 || substr(FileName, nn - 3, nn) != ".tex")
FileName <- paste(FileName, ".tex", sep = "")
pictex(FileName, width = 8, height = 8, debug = FALSE,
bg = "white", fg = "black")
plotFunctiond(screen = FALSE)
dev.off()
}
}
# Print <- function() {
#try(win.print(), silent = TRUE)
#if (geterrmessage() != "Error in win.print() : unable to start device devWindows\n") {
# plotFunctiond(screen = FALSE)
# dev.off()
#}
# }
#
# Pantalla principal
#
OnOKModelSelection <- function()
{
optioncentering <<- datacentering[as.numeric(tclvalue(tcl(comboscentering,
"getvalue"))) + 1]
optionscaling <<- datascaling[as.numeric(tclvalue(tcl(comboscaling,
"getvalue"))) + 1]
optionSVP <<- dataSVP[as.numeric(tclvalue(tcl(comboSVP,
"getvalue"))) + 1]
Models()
tkdestroy(winmodel)
winplot <- tktoplevel()
tkwm.title(winplot, "GGE Biplot")
img <<- tkrplot(winplot, fun = plotFunctiond, hscale = 1.5, vscale = 1.5)
tkpack(img, expand = "TRUE", fill = "both")
tkbind(img, "<B1-Motion>",OnLeftClick.move)
tkbind(img, "<ButtonPress-1>",OnLeftClick.down)
tkbind(img, "<ButtonRelease-1>",OnLeftClick.up)
tkbind(img, "<Button-3>",OnRightClick)
topMenu <- tkmenu(winplot)
tkconfigure(winplot, menu = topMenu)
menuFile <- tkmenu(topMenu, tearoff = FALSE)
menuView <- tkmenu(topMenu, tearoff = FALSE)
menuBiplotTools <- tkmenu(topMenu, tearoff = FALSE)
menuFormat <- tkmenu(topMenu, tearoff = FALSE)
menuChangeColor <- tkmenu(topMenu, tearoff = FALSE)
menuChangeFont <- tkmenu(topMenu, tearoff = FALSE)
menuRank <- tkmenu(topMenu, tearoff = FALSE)
menuModels <- tkmenu(topMenu, tearoff = FALSE)
menuBiplot <- tkmenu(topMenu, tearoff = FALSE)
menuDividedBy <- tkmenu(topMenu, tearoff = FALSE)
menuCenteredBy <- tkmenu(topMenu, tearoff = FALSE)
menuSVP <- tkmenu(topMenu, tearoff = FALSE)
menuSaveAs <- tkmenu(topMenu, tearoff = FALSE)
tkadd(menuFile, "command", label = "Open log file",
command = function()
{
Addfile()
})
tkadd(menuFile, "separator")
tkadd(menuFile, "command", label = "Copy image",
command = function()
{
tkrreplot(img)
})
tkadd(menuFile, "cascade", label = "Save image", menu = menuSaveAs)
tkadd(menuSaveAs, "command", label = "PDF file",
command = function()
{
SaveFilePDF()
})
tkadd(menuSaveAs, "command", label = "Postscript file",
command = function()
{
SaveFilePostscript()
})
# tkadd(menuSaveAs, "command", label = "Metafile",
# command = function()
# {
# SaveFileMetafile()
# })
tkadd(menuSaveAs, "command", label = "Bmp file",
command = function()
{
SaveFileBmp()
})
tkadd(menuSaveAs, "command", label = "Png file",
command = function()
{
SaveFilePng()
})
tkadd(menuSaveAs, "command", label = "Jpg/Jpeg file",
command = function()
{
SaveFileJPG()
})
tkadd(menuSaveAs, "command", label = "TeX file",
command = function()
{
SaveFileTeX()
})
# tkadd(menuFile, "command", label = "Print image",
# command = function()
# {
# Print()
# })
tkadd(menuFile, "separator")
tkadd(menuFile, "command", label = "Exit",
command = function()
{
tkdestroy(winplot)
})
tkadd(menuBiplot, "radiobutton", label = "PC1 vs. PC2 (Primary)",variable = vaxis, value = "0",
command = function()
{
dimension1 <<- 1
dimension2 <<- 2
tkrreplot(img)
})
tkadd(menuBiplot, "radiobutton", label = "PC3 vs. PC4", variable = vaxis, value = "1",
command = function()
{
dimension1 <<- 3
dimension2 <<- 4
tkrreplot(img)
})
tkadd(menuBiplot, "radiobutton", label = "PC5 vs. PC6", variable = vaxis, value = "2",
command = function()
{
dimension1 <<- 5
dimension2 <<- 6
tkrreplot(img)
})
tkadd(menuBiplot, "separator")
tkadd(menuBiplot, "radiobutton", label = "PC1 vs. PC3", variable = vaxis, value = "3",
command = function()
{
dimension1 <<- 1
dimension2 <<- 3
tkrreplot(img)
})
tkadd(menuBiplot, "radiobutton", label = "PC2 vs. PC3",variable = vaxis, value = "4",
command = function()
{
dimension1 <<- 2
dimension2 <<- 3
tkrreplot(img)
})
tkadd(menuBiplot, "separator")
tkadd(menuBiplot, "command", label = "Biplot 3D",
command = function()
{
Biplot3D()
})
tkadd(menuView, "radiobutton", label = "Show Both", variable = showboth, value = "0",
command = function()
{
tkrreplot(img)
})
tkadd(menuView, "radiobutton", label = "Show Genotypes", variable = showboth, value = "1",
command = function()
{
tkrreplot(img)
})
tkadd(menuView, "radiobutton", label = "Show Environments", variable = showboth, value = "2",
command = function()
{
tkrreplot(img)
})
tkadd(menuView, "separator")
tkadd(menuDividedBy, "radiobutton", label = "0.No scaling", variable = scaling, value = "0",
command = function()
{
optionscaling <<- "0.No scaling"
Models()
tkrreplot(img)
})
tkadd(menuDividedBy, "radiobutton", label = "1.Std Deviation (SD)", variable = scaling, value = "1",
command = function()
{
optionscaling <<- "1.Std Deviation (SD)"
Models()
tkrreplot(img)
})
tkadd(menuCenteredBy, "radiobutton", label = "0.No centering", variable = centering, value = "0",
command = function()
{
optioncentering <<- "0.No centering"
Models()
tkrreplot(img)
})
tkadd(menuCenteredBy, "radiobutton", label = "1.Global-Centered E+G+GE", variable = centering, value = "1",
command = function()
{
optioncentering <<- "1.Global-Centered E+G+GE"
Models()
tkrreplot(img)
})
tkadd(menuCenteredBy, "radiobutton", label = "2.Tester-Centered G+GE", variable = centering, value = "2",
command = function()
{
optioncentering <<- "2.Tester-Centered G+GE"
Models()
tkrreplot(img)
})
tkadd(menuCenteredBy, "radiobutton", label = "3.Double-Centered GE", variable = centering, value = "3",
command = function()
{
optioncentering <<- "3.Double-Centered GE"
Models()
tkrreplot(img)
})
tkadd(menuSVP, "radiobutton", label = "JK -(Row Metric Preserving)", variable = svp, value = "0",
command = function()
{
optionSVP <<- "JK -(Row Metric Preserving)"
Models()
tkrreplot(img)
})
tkadd(menuSVP, "radiobutton", label = "GH -(Column Metric Preserving)", variable = svp, value = "1",
command = function()
{
optionSVP <<- "GH -(Column Metric Preserving)"
Models()
tkrreplot(img)
})
tkadd(menuSVP, "radiobutton", label = "HJ -(Dual Metric Preserving)", variable = svp, value = "2",
command = function()
{
optionSVP <<- "HJ -(Dual Metric Preserving)"
Models()
tkrreplot(img)
})
tkadd(menuSVP, "radiobutton", label = "SQ - Symmetrical", variable = svp, value = "3",
command = function()
{
optionSVP <<- "SQ - Symmetrical"
Models()
tkrreplot(img)
})
tkadd(menuView, "checkbutton", label = "Show/Hide Title", variable = showtitle,
command = function()
{
if (tclvalue(showtitle) == "1") wintitle <<- "GGE Biplot"
if (tclvalue(showtitle) == "0") wintitle <<- NULL
tkrreplot(img)
})
tkadd(menuView, "checkbutton", label = "Show/Hide Gidelines", variable = showguidelines,
command = function()
{
tkrreplot(img)
})
tkadd(menuView, "checkbutton", label = "Add/Remove Symbols",variable = showsymbols,
command = function()
{
if (tclvalue(showsymbols) == "1")
{
symbol_gen <<- 20
symbol_env <<- 18
}
if (tclvalue(showsymbols) == "0")
{
symbol_gen <<- NA_integer_
symbol_env <<- NA_integer_
}
tkrreplot(img)
})
tkadd(menuBiplotTools, "command", label = "Examine a Genotype",
command = function()
{
SelectGenotype()
if (vgenotype == -1)
{
}
else
{
if (tclvalue(showtitle) == "1") wintitle <<- "Examine a Genotype"
TypeGraph <<- 3
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
}
})
tkadd(menuBiplotTools, "command", label = "Examine an Environment",
command = function()
{
SelectEnvironment()
if (venvironment == -1)
{
}
else
{
TypeGraph <<- 2
if (tclvalue(showtitle) == "1") wintitle <<- "Examine an Environment"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
}
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "command", label = "Relation among Environments",
command = function()
{
TypeGraph <<- 4
showcircles <<- tclVar("1")
if (tclvalue(showtitle) == "1") wintitle <<- "Relationship among environments"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "command", label = "Compare two Genotypes",
command = function()
{
SelectTwoGenotype()
TypeGraph <<- 5
if (tclvalue(showtitle) == "1") wintitle <<- "Compare two Genotypes"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "command", label = "Which Won Where/What",
command = function()
{
TypeGraph <<- 6
if (tclvalue(showtitle) == "1") wintitle <<- "Which Won Where/What"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "command", label = "Discrimitiveness vs. representativeness",
command = function()
{
TypeGraph <<- 7
if (tclvalue(showtitle) == "1") wintitle <<- "Discrimitiveness vs. representativenss"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "command", label = "Mean vs. Stability",
command = function()
{
if (tclvalue(showtitle) == "1") wintitle <<- "Mean vs. Stability"
TypeGraph <<- 9
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "cascade", label = "Rank Environment/Genotypes",
menu = menuRank)
tkadd(menuRank, "radiobutton", label = "with ref.to the 'Ideal' Environment",variable = vrank, value = "1",
command = function()
{
TypeGraph <<- 8
if (tclvalue(showtitle) == "1") wintitle <<- "Ranking Environments"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuRank, "radiobutton", label = "with ref.to the 'Ideal' Genotype", variable = vrank, value = "2",
command = function()
{
TypeGraph <<- 10
if (tclvalue(showtitle) == "1") wintitle <<- "Ranking Genotypes"
tkentryconfigure(menuView, 2, state = "disabled")
tkentryconfigure(menuView, 1, state = "disabled")
tkrreplot(img)
})
tkadd(menuBiplotTools, "separator")
tkadd(menuBiplotTools, "command", label = "Back to original data",
command = function()
{
TypeGraph <<- 1
Models()
showboth <- tclVar("0")
if (tclvalue(showtitle) == "1") wintitle <- "GGE Biplot"
tkentryconfigure(menuBiplotTools, 0, state = "normal")
tkentryconfigure(menuView, 2, state = "normal")
tkentryconfigure(menuView, 1, state = "normal")
tkrreplot(img)
})
tkadd(menuFormat, "command", label = "Plot Title",
command = function()
{
ReturnVal = modalDialog("GGE Biplot", "Give your biplot a title: ","")
if (ReturnVal == "ID_CANCEL") return()
wintitle <<- ReturnVal
tkrreplot(img)
tkfocus(winplot)
})
tkadd(menuFormat, "separator")
tkadd(menuChangeFont, "command", label = "Default",
command = function()
{
vcex <<- 1
tkrreplot(img)
})
tkadd(menuChangeFont, "command", label = "Larger",
command = function()
{
vcex <<- 1.5
tkrreplot(img)
})
tkadd(menuChangeFont, "command", label = "Smaller",
command = function()
{
vcex <<- 0.5
tkrreplot(img)
})
tkadd(menuChangeColor, "command", label = "Background",
command = function()
{
background <<- ChangeColorv(background)
tkrreplot(img)
})
tkadd(menuChangeColor, "separator")
tkadd(menuChangeColor, "command", label = "Genotype labels",
command = function()
{
colgenotype[] <<- ChangeColorv(colgenotype[1])
tkrreplot(img)
})
tkadd(menuChangeColor, "command", label = "Environment labels",
command = function()
{
colenv[] <<- ChangeColorv(colenv[1])
tkrreplot(img)
})
tkadd(menuChangeColor, "separator")
tkadd(menuChangeColor, "command", label = "Biplot Title",
command = function()
{
coltitle <<- ChangeColorv(coltitle)
tkrreplot(img)
})
tkadd(menuFormat, "cascade", label = "Change Color", menu = menuChangeColor)
tkadd(menuFormat, "cascade", label = "Change Font", menu = menuChangeFont)
tkadd(menuModels, "cascade", label = "Scaled (divided) by", menu = menuDividedBy)
tkadd(menuModels, "cascade", label = "Centered by", menu = menuCenteredBy)
tkadd(menuModels, "cascade", label = "S.V.P.", menu = menuSVP)
tkadd(topMenu, "cascade", label = "File", menu = menuFile)
tkadd(topMenu, "cascade", label = "View", menu = menuView)
tkadd(topMenu, "cascade", label = "Biplot Tools", menu = menuBiplotTools)
tkadd(topMenu, "cascade", label = "Format", menu = menuFormat)
tkadd(topMenu, "cascade", label = "Models", menu = menuModels)
tkadd(topMenu, "cascade", label = "Biplot", menu = menuBiplot)
tkfocus(winplot)
if (TypeGraph != "1")
{
for (temp1 in 5) tkentryconfigure(menuView,temp1, state = "disabled")
}
}
#
labelClosestPoint <- function(xClick,yClick,imgXcoords,imgYcoords)
{
squared.Distance <- (xClick-imgXcoords)^2 + (yClick-imgYcoords)^2
indexClosest <- which.min(squared.Distance)
#
RightClickOnPoint.Menu <- tkmenu(img, tearoff = FALSE)
tkadd(RightClickOnPoint.Menu, "command", label = "Change Label",
command = function() {
mm <-tktoplevel()
tkwm.title(mm, labelsVec[indexClosest])
framemm <-tkframe(mm, relief = "groove", borderwidth = 2,
background = "white")
Namei <- labelsVec[indexClosest]
tclvalue(Namei) <- labelsVec[indexClosest]
entry.Namei <-tkentry(framemm,width="11",textvariable=Namei)
NameVali <- entry.Namei
OnOKli <- function()
{
NameVali <- tclvalue(Namei)
if (TypeGraph == 1)
{
if (tclvalue(showboth) == "0")
{
labelsVec[indexClosest] <<- NameVali
labelgen <<- labelsVec[1:length(colgenotype)]
labelenv <<- labelsVec[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
}
if (tclvalue(showboth) == "1")
{
labelsVec[indexClosest] <<- NameVali
labelgen <<- labelsVec[1:length(colgenotype)]
}
if (tclvalue(showboth) == "2")
{
labelsVec[indexClosest] <- NameVali
labelenv <<- labelsVec[1:length(colenv)]
}
}
if (TypeGraph == 4 || TypeGraph == 8)
{
labelsVec[indexClosest] <<- NameVali
labelenv <<- labelsVec[1:length(colenv)]
}
if (TypeGraph == 5 || TypeGraph == 6 || TypeGraph == 7 || TypeGraph == 9 || TypeGraph == 10)
{
labelsVec[indexClosest] <<- NameVali
labelgen <<- labelsVec[1:length(colgenotype)]
labelenv <<- labelsVec[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
}
tkrreplot(img)
tkdestroy(mm)
}
OK.butli <-tkbutton(framemm,text="Change label",command=OnOKli,width=12)
tkbind(entry.Namei, "<Return>",OnOKli)
tkpack(entry.Namei,OK.butli,expand = "TRUE", side="left", fill = "both")
tkpack(framemm, expand = "TRUE", side="top", fill = "both")
})
tkadd(RightClickOnPoint.Menu, "command", label = "Change Color",
command = function()
{
if (TypeGraph == 1)
{
if (tclvalue(showboth) == "0")
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colgenotype <<- colorsVec[1:length(colgenotype)]
colenv <<- colorsVec[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
}
if (tclvalue(showboth) == "1")
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colgenotype <<- colorsVec[1:length(colgenotype)]
}
if (tclvalue(showboth) == "2")
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colenv <<- colorsVec[1:length(colenv)]
}
}
if (TypeGraph == 4 || TypeGraph == 8)
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colenv <<- colorsVec[1:length(colenv)]
}
if (TypeGraph == 5 || TypeGraph == 6 || TypeGraph == 7 || TypeGraph == 9 || TypeGraph == 10)
{
colorsVec[indexClosest] <- ChangeColorv(colorsVec [indexClosest])
colgenotype <<- colorsVec[1:length(colgenotype)]
colenv <<- colorsVec[(length(colgenotype)+1):(length(colgenotype)+length(colenv))]
}
tkrreplot(img)
})
#
tkpopup(RightClickOnPoint.Menu,tclvalue(tkwinfo("pointerx",
img)), tclvalue(tkwinfo("pointery", img)))
}
#
OnRightClick <- function(x,y)
{
xClick <- x
yClick <- y
width = as.numeric(tclvalue(tkwinfo("reqwidth",img)))
height = as.numeric(tclvalue(tkwinfo("reqheight",img)))
xMin = parPlotSize[1] * width
xMax = parPlotSize[2] * width
yMin = parPlotSize[3] * height
yMax = parPlotSize[4] * height
rangeX = usrCoords[2] - usrCoords[1]
rangeY = usrCoords[4] - usrCoords[3]
imgXcoords = (xCoords-usrCoords[1])*(xMax-xMin)/rangeX + xMin
imgYcoords = (yCoords-usrCoords[3])*(yMax-yMin)/rangeY + yMin
xClick <- as.numeric(xClick)+0.5
yClick <- as.numeric(yClick)+0.5
yClick <- height - yClick
xPlotCoord = usrCoords[1]+(xClick-xMin)*rangeX/(xMax-xMin)
yPlotCoord = usrCoords[3]+(yClick-yMin)*rangeY/(yMax-yMin)
labelClosestPoint(xClick,yClick,imgXcoords,imgYcoords)
}
OnLeftClick.up <- function(x,y)
{
if (TypeGraph != 2 && TypeGraph != 3)
{
msg <- ("-To change the label press Yes.\n-To remove it press No.")
mbval <- tkmessageBox(title="Change of label",message=msg,type="yesno",icon="question")
if (tclvalue(mbval)=="yes")
{
}
if(tclvalue(mbval)=="no")
{
if ((TypeGraph == 4) || (TypeGraph == 1 && tclvalue(showboth) == "2") || (TypeGraph == 8))
{
xtext[indexClosest + length(colgenotype)] <<- xAnt
ytext[indexClosest + length(colgenotype)] <<- yAnt
}
else
{
xtext[indexClosest] <<- xAnt
ytext[indexClosest] <<- yAnt
}
}
tkrreplot(img)
}
}
OnLeftClick.move <- function(x,y)
{
xClick <- x
yClick <- y
width = as.numeric(tclvalue(tkwinfo("reqwidth",img)))
height = as.numeric(tclvalue(tkwinfo("reqheight",img)))
xMin = parPlotSize[1] * width
xMax = parPlotSize[2] * width
yMin = parPlotSize[3] * height
yMax = parPlotSize[4] * height
rangeX = usrCoords[2] - usrCoords[1]
rangeY = usrCoords[4] - usrCoords[3]
imgXcoords = (xCoords-usrCoords[1])*(xMax-xMin)/rangeX + xMin
imgYcoords = (yCoords-usrCoords[3])*(yMax-yMin)/rangeY + yMin
xClick <- as.numeric(xClick)+0.5
yClick <- as.numeric(yClick)+0.5
yClick <- height - yClick
xPlotCoord = usrCoords[1]+(xClick-xMin)*rangeX/(xMax-xMin)
yPlotCoord = usrCoords[3]+(yClick-yMin)*rangeY/(yMax-yMin)
if ((TypeGraph == 4) || (TypeGraph == 1 && tclvalue(showboth) == "2") || (TypeGraph == 8))
{
xtext[indexClosest + length(colgenotype)] <<- xPlotCoord
ytext[indexClosest + length(colgenotype)] <<- yPlotCoord
}
else if (TypeGraph == 2 || TypeGraph == 3)
{
}
else
{
xtext [indexClosest] <<- xPlotCoord
ytext [indexClosest] <<- yPlotCoord
}
###############################
tkrreplot(img)
}
OnLeftClick.down <- function(x,y)
{
xClick <- x
yClick <- y
width = as.numeric(tclvalue(tkwinfo("reqwidth",img)))
height = as.numeric(tclvalue(tkwinfo("reqheight",img)))
xMin = parPlotSize[1] * width
xMax = parPlotSize[2] * width
yMin = parPlotSize[3] * height
yMax = parPlotSize[4] * height
rangeX = usrCoords[2] - usrCoords[1]
rangeY = usrCoords[4] - usrCoords[3]
imgXcoords = (xCoords-usrCoords[1])*(xMax-xMin)/rangeX + xMin
imgYcoords = (yCoords-usrCoords[3])*(yMax-yMin)/rangeY + yMin
xClick <- as.numeric(xClick)+0.5
yClick <- as.numeric(yClick)+0.5
yClick <- height - yClick
xPlotCoord = usrCoords[1]+(xClick-xMin)*rangeX/(xMax-xMin)
yPlotCoord = usrCoords[3]+(yClick-yMin)*rangeY/(yMax-yMin)
squared.Distance <- (xClick-imgXcoords)^2 + (yClick-imgYcoords)^2
indexClosest <<- which.min(squared.Distance)
if ((TypeGraph == 4) || (TypeGraph == 1 && tclvalue(showboth) == "2") || (TypeGraph == 8))
{
xAnt <<- xtext[indexClosest + length(colgenotype)]
yAnt <<- ytext[indexClosest + length(colgenotype)]
}
else if (TypeGraph == 2 || TypeGraph == 3)
{
}
else
{
xAnt <<- xtext[indexClosest]
yAnt <<- ytext[indexClosest]
}
}
winmodel <- tktoplevel()
tkwm.title(winmodel, "Model Selection")
comboscaling <- tkwidget(winmodel, "ComboBox", editable = FALSE,
values = datascaling, width = 30)
defaultscaling <- tclVar(optionscaling)
tkconfigure(comboscaling, textvariable = defaultscaling)
comboscentering <- tkwidget(winmodel, "ComboBox", editable = FALSE,
values = datacentering, width = 30)
defaultcentering <- tclVar(optioncentering)
tkconfigure(comboscentering, textvariable = defaultcentering)
comboSVP <- tkwidget(winmodel, "ComboBox", editable = FALSE,
values = dataSVP, width = 30)
defaultSVP <- tclVar(optionSVP)
tkconfigure(comboSVP, textvariable = defaultSVP)
OK.modelselection <- tkbutton(winmodel, text = " OK ",command = OnOKModelSelection)
tkgrid(tklabel(winmodel, text = "SVP: "),
sticky = "w")
tkgrid(comboSVP)
tkgrid(tklabel(winmodel, text = " "),
sticky = "w")
tkgrid(tklabel(winmodel, text = "Centered By: "),
sticky = "w")
tkgrid(comboscentering)
tkgrid(tklabel(winmodel, text = " "),
sticky = "w")
tkgrid(tklabel(winmodel, text = "Scaled (Divided) By: "),
sticky = "w")
tkgrid(comboscaling)
tkgrid(tklabel(winmodel, text = " "),
sticky = "w")
tkgrid(OK.modelselection)
tkfocus(winmodel)
}
|
f <- function(x, y) {
z <- x + y
g(z)
}
g <- function(x) {
z <- round(x)
h(z)
}
h <- function(x) {
set.seed(1234)
z <- rnorm(x)
print(z)
}
options(error = recover) # enable debugging mode
f(2, 3)
f(2, -3) # enters debugging mode at this point
## type
## number - choose frame
## c - quit from that frame and choose another frame
## q - help
options(error = NULL) # disable debugging mode
| /r/learning/debug/recover.r | no_license | jk983294/math | R | false | false | 424 | r | f <- function(x, y) {
z <- x + y
g(z)
}
g <- function(x) {
z <- round(x)
h(z)
}
h <- function(x) {
set.seed(1234)
z <- rnorm(x)
print(z)
}
options(error = recover) # enable debugging mode
f(2, 3)
f(2, -3) # enters debugging mode at this point
## type
## number - choose frame
## c - quit from that frame and choose another frame
## q - help
options(error = NULL) # disable debugging mode
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1295127508L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615939078-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 826 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1295127508L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
## ----module, echo=FALSE, results="asis"----------------------------------
Module <- "DataO"
cat(paste0("\\newcommand{\\Module}{", Module, "}"))
## ----setup, child="mycourse.Rnw"-----------------------------------------
## ----setup_options, include=FALSE----------------------------------------
library(knitr)
library(xtable)
opts_chunk$set(cache=FALSE)
opts_chunk$set(out.width='0.8\\textwidth')
opts_chunk$set(fig.align='center')
opts_chunk$set(src.top=NULL)
opts_chunk$set(src.bot=NULL)
opts_chunk$set(out.lines=4)
opts_chunk$set(out.truncate=80)
opts_chunk$set(fig.path=sprintf("figures/%s/", Module))
opts_chunk$set(cache.path=sprintf("cache/%s/", Module))
opts_chunk$set(bib.file=paste0(Module, ".bib"))
opts_chunk$set(background='#E7E7E7')
# Leave code as I have formatted it.
opts_chunk$set(tidy=FALSE)
# Hooks
# Allow auto crop of base graphics plots when crop=TRUE.
knit_hooks$set(crop=hook_pdfcrop)
# Truncate long lines and long output
hook_output <- knit_hooks$get("output")
hook_source <- knit_hooks$get("source")
knit_hooks$set(output=function(x, options)
{
if (options$results != "asis")
{
# Split string into separate lines.
x <- unlist(stringr::str_split(x, "\n"))
# Trim to the number of lines specified.
if (!is.null(n <- options$out.lines))
{
if (length(x) > n)
{
# Truncate the output.
x <- c(head(x, n), "....\n")
}
}
# Truncate each line to length specified.
if (!is.null(m <- options$out.truncate))
{
len <- nchar(x)
x[len>m] <- paste0(substr(x[len>m], 0, m-3), "...")
}
# Paste lines back together.
x <- paste(x, collapse="\n")
# Replace ' = ' with '=' - my preference. Hopefully won't
# affect things inappropriately.
x <- gsub(" = ", "=", x)
}
hook_output(x, options)
},
source=function(x, options)
{
# Split string into separate lines.
x <- unlist(stringr::str_split(x, "\n"))
# Trim to the number of lines specified.
if (!is.null(n <- options$src.top))
{
if (length(x) > n)
{
# Truncate the output.
if (is.null(m <-options$src.bot)) m <- 0
x <- c(head(x, n+1), "\n....\n", tail(x, m+2))
}
}
# Paste lines back together.
x <- paste(x, collapse="\n")
hook_source(x, options)
})
# Optionally allow R Code chunks to be environments so we can refer to them.
knit_hooks$set(rcode=function(before, options, envir)
{
if (before)
sprintf('\\begin{rcode}\\label{%s}\\hfill{}', options$label)
else
'\\end{rcode}'
})
## ----load_packages, message=FALSE----------------------------------------
library(rattle) # The weather dataset and normVarNames().
library(randomForest) # Impute missing values using na.roughfix().
library(tidyr) # Tidy the dataset.
library(ggplot2) # Visualise data.
library(dplyr) # Data preparation and pipes %>%.
library(lubridate) # Handle dates.
library(FSelector) # Feature selection.
## ----additional_dependent_pacakges, echo=FALSE, message=FALSE------------
# These are dependencies that would otherwise be loaded as required.
library(stringr)
## ----common_intro, child='documentation.Rnw', eval=TRUE------------------
## ----help_library, eval=FALSE, tidy=FALSE--------------------------------
## ?read.csv
## ----help_package, eval=FALSE--------------------------------------------
## library(help=rattle)
## ----child-bib, child='generatebib.Rnw', eval=TRUE-----------------------
## ----record_start_time, echo=FALSE---------------------------------------
start.time <- proc.time()
## ----generate_bib, echo=FALSE, message=FALSE, warning=FALSE--------------
# Write all packages in the current session to a bib file
if (is.null(opts_chunk$get("bib.file"))) opts_chunk$set(bib.file="Course.bib")
write_bib(sub("^.*/", "", grep("^/", searchpaths(), value=TRUE)),
file=opts_chunk$get("bib.file"))
system(paste("cat extra.bib >>", opts_chunk$get("bib.file")))
# Fix up specific issues.
# R-earth
system(paste("perl -pi -e 's|. Derived from .*$|},|'",
opts_chunk$get("bib.file")))
# R-randomForest
system(paste("perl -pi -e 's|Fortran original by Leo Breiman",
"and Adele Cutler and R port by|Leo Breiman and",
"Adele Cutler and|'", opts_chunk$get("bib.file")))
# R-C50
system(paste("perl -pi -e 's|. C code for C5.0 by R. Quinlan|",
" and J. Ross Quinlan|'", opts_chunk$get("bib.file")))
# R-caret
system(paste("perl -pi -e 's|. Contributions from|",
" and|'", opts_chunk$get("bib.file")))
# Me
system(paste("perl -pi -e 's|Graham Williams|",
"Graham J Williams|'", opts_chunk$get("bib.file")))
## ----eval=FALSE----------------------------------------------------------
## dspath <- "http://rattle.togaware.com/weather.csv"
## ------------------------------------------------------------------------
dspath <- system.file("csv", "weather.csv", package="rattle")
## ----dataset_load--------------------------------------------------------
weather <- read.csv(dspath)
## ------------------------------------------------------------------------
library(rattle) # weather, normVarNames().
## ----basic_summary_weather-----------------------------------------------
dim(weather)
names(weather)
str(weather)
## ----prepare_the_dataset, out.lines=7------------------------------------
dsname <- "weather"
ds <- get(dsname)
dim(ds)
names(ds)
## ----alternative_dataset_assignment, eval=FALSE--------------------------
## ds <- weather
## ----using_tbl_df--------------------------------------------------------
class(ds)
ds <- tbl_df(ds)
class(ds)
## ----tbl_df_print, out.lines=NULL----------------------------------------
ds
## ----dataset_head, out.lines=10------------------------------------------
head(ds)
## ----dataset_tail, out.lines=10------------------------------------------
tail(ds)
## ----dataset_sample, out.lines=10----------------------------------------
ds[sample(nrow(ds), 6),]
## ----dataset_structure, out.lines=30-------------------------------------
str(ds)
## ----dataset_summary, out.lines=43---------------------------------------
summary(ds)
## ----message=FALSE-------------------------------------------------------
names(ds)
names(ds) <- normVarNames(names(ds))
names(ds)
## ------------------------------------------------------------------------
sapply(ds, class)
## ----convert_date, message=FALSE-----------------------------------------
library(lubridate) # ymd()
head(ds$date)
ds$date <- ymd(as.character(ds$date))
head(ds$date)
## ------------------------------------------------------------------------
sapply(ds, class)
## ----variable_roles, out.lines=NULL--------------------------------------
(vars <- names(ds))
target <- "rain_tomorrow"
risk <- "risk_mm"
id <- c("date", "location")
## ------------------------------------------------------------------------
ignore <- union(id, if (exists("risk")) risk)
## ------------------------------------------------------------------------
(ids <- which(sapply(ds, function(x) length(unique(x))) == nrow(ds)))
ignore <- union(ignore, names(ids))
## ----ignore_missing_variables--------------------------------------------
mvc <- sapply(ds[vars], function(x) sum(is.na(x)))
mvn <- names(which(mvc == nrow(ds)))
ignore <- union(ignore, mvn)
## ----ignore_mostly_missing_variables-------------------------------------
mvn <- names(which(mvc >= 0.7*nrow(ds)))
ignore <- union(ignore, mvn)
## ----ignore_factors_with_many_levels-------------------------------------
factors <- which(sapply(ds[vars], is.factor))
lvls <- sapply(factors, function(x) length(levels(ds[[x]])))
(many <- names(which(lvls > 20)))
ignore <- union(ignore, many)
## ----ignore_variables_constant_values------------------------------------
(constants <- names(which(sapply(ds[vars], function(x) all(x == x[1L])))))
ignore <- union(ignore, constants)
## ------------------------------------------------------------------------
mc <- cor(ds[which(sapply(ds, is.numeric))], use="complete.obs")
mc[upper.tri(mc, diag=TRUE)] <- NA
mc <-
mc %>%
abs() %>%
data.frame() %>%
mutate(var1=row.names(mc)) %>%
gather(var2, cor, -var1) %>%
na.omit()
mc <- mc[order(-abs(mc$cor)),]
mc
## ------------------------------------------------------------------------
ignore <- union(ignore, c("temp_3pm", "pressure_9am", "temp_9am"))
## ------------------------------------------------------------------------
length(vars)
vars <- setdiff(vars, ignore)
length(vars)
## ----out.lines=NULL------------------------------------------------------
library(FSelector) # information.gain()
form <- formula(paste(target, "~ ."))
cfs(form, ds[vars])
information.gain(form, ds[vars])
## ----remove_missing_target-----------------------------------------------
dim(ds)
sum(is.na(ds[target]))
ds <- ds[!is.na(ds[target]),]
sum(is.na(ds[target]))
dim(ds)
## ------------------------------------------------------------------------
ods <- ds
## ----impute_missing_values-----------------------------------------------
dim(ds[vars])
sum(is.na(ds[vars]))
ds[vars] <- na.roughfix(ds[vars])
sum(is.na(ds[vars]))
dim(ds[vars])
## ------------------------------------------------------------------------
ds <- ods
## ------------------------------------------------------------------------
ods <- ds
omit <- NULL
## ----remove_missing_values-----------------------------------------------
dim(ds[vars])
sum(is.na(ds[vars]))
mo <- attr(na.omit(ds[vars]), "na.action")
omit <- union(omit, mo)
if (length(omit)) ds <- ds[-omit,]
sum(is.na(ds[vars]))
dim(ds[vars])
## ------------------------------------------------------------------------
ds <- ods
## ----normalise_factors---------------------------------------------------
factors <- which(sapply(ds[vars], is.factor))
for (f in factors) levels(ds[[f]]) <- normVarNames(levels(ds[[f]]))
## ----ensure_target_is_categoric------------------------------------------
ds[target] <- as.factor(ds[[target]])
table(ds[target])
## ----fig.height=4--------------------------------------------------------
p <- ggplot(ds, aes_string(x=target))
p <- p + geom_bar(width=0.2)
print(p)
## ----identify_variables_inputc-------------------------------------------
inputc <- setdiff(vars, target)
inputc
## ----identify_variables_inputi-------------------------------------------
inputi <- sapply(inputc, function(x) which(x == names(ds)), USE.NAMES=FALSE)
inputi
## ----number_of_observations----------------------------------------------
nobs <- nrow(ds)
nobs
## ----dimensions----------------------------------------------------------
dim(ds)
dim(ds[vars])
dim(ds[inputc])
dim(ds[inputi])
## ----identify_variable_types---------------------------------------------
numi <- intersect(inputi, which(sapply(ds, is.numeric)))
numi
numc <- names(ds)[numi]
numc
cati <- intersect(inputi, which(sapply(ds, is.factor)))
cati
catc <- names(ds)[cati]
catc
## ----eval=FALSE----------------------------------------------------------
## dsdate <- paste0("_", format(Sys.Date(), "%y%m%d"))
## dsrdata <- paste0(dsname, dsdate, ".RData")
## save(ds, dsname, dspath, dsdate, target, risk, id, ignore, vars,
## nobs, omit, inputi, inputc, numi, numc, cati, catc, file=dsrdata)
## ----echo=FALSE----------------------------------------------------------
# Do this so we know what to load into ModelsO.Rnw
dsdate <- paste0("_", "130704")
dsrdata <- paste0(dsname, dsdate, ".RData")
save(ds, dsname, dspath, dsdate, target, risk, id, ignore, vars,
nobs, omit, inputi, inputc, numi, numc, cati, catc, file=dsrdata)
## ------------------------------------------------------------------------
(load(dsrdata))
dsname
dspath
dim(ds)
id
target
risk
ignore
vars
## ----review_load, eval=FALSE---------------------------------------------
## # Required packages
## library(rattle) # The weather dataset and normVarNames().
## library(randomForest) # Impute missing values using na.roughfix().
##
## # Identify the dataset.
## dsname <- "weather"
## dspath <- system.file("csv", "weather.csv", package="rattle")
## weather <- read.csv(dspath)
## ds <- get(dsname) %>% tbl_df()
## names(ds) <- normVarNames(names(ds)) # Optional lower case variable names.
## vars <- names(ds)
## target <- "rain_tomorrow"
## risk <- "risk_mm"
## id <- c("date", "location")
##
## # Summarise
## ds
## dim(ds)
## names(ds)
## str(ds)
## summary(ds)
## ----review_ignore, eval=FALSE-------------------------------------------
## # Ignore the IDs and the risk variable.
## ignore <- union(id, if (exists("risk")) risk)
##
## # Ignore variables that look like identifiers.
## ids <- which(sapply(ds, function(x) length(unique(x))) == nrow(ds))
## ignore <- union(ignore, names(ids))
##
## # Ignore variables which are completely missing.
## mvc <- sapply(ds[vars], function(x) sum(is.na(x))) # Missing value count.
## mvn <- names(ds)[(which(mvc == nrow(ds)))] # Missing var names.
## ignore <- union(ignore, mvn)
##
## # Ignore variables that are mostly missing - e.g., 70% or more missing
## mvn <- names(ds)[(which(mvc >= 0.7*nrow(ds)))]
## ignore <- union(ignore, mvn)
##
## # Ignore variables with many levels.
## factors <- which(sapply(ds[vars], is.factor))
## lvls <- sapply(factors, function(x) length(levels(ds[[x]])))
## many <- names(which(lvls > 20)) # Factors with too many levels.
## ignore <- union(ignore, many)
##
## # Ignore constants.
## constants <- names(which(sapply(ds[vars], function(x) all(x == x[1L]))))
## ignore <- union(ignore, constants)
##
## # Initialise the variables
## vars <- setdiff(vars, ignore)
##
## ----review_finalise, eval=FALSE-----------------------------------------
## # Variable roles.
## inputc <- setdiff(vars, target)
## inputi <- sapply(inputc, function(x) which(x == names(ds)), USE.NAMES=FALSE)
## numi <- intersect(inputi, which(sapply(ds, is.numeric)))
## numc <- names(numi)
## cati <- intersect(inputi, which(sapply(ds, is.factor)))
## catc <- names(cati)
##
## # Remove all observations with a missing target.
## ds <- ds[!is.na(ds[target]),]
##
## # Impute missing values, but do this wisely - understand why missing.
## if (sum(is.na(ds[vars]))) ds[vars] <- na.roughfix(ds[vars])
##
## # Omit observations with missing values.
## omit <- NULL
## mo <- attr(na.omit(ds[vars]), "na.action")
## omit <- union(omit, mo)
## if (length(omit)) ds <- ds[-omit,] # Remove ommited observations.
##
## # Normalise factors.
## factors <- which(sapply(ds[vars], is.factor))
## for (f in factors) levels(ds[[f]]) <- normVarNames(levels(ds[[f]]))
##
## # Ensure the target is categoric.
## ds[target] <- as.factor(ds[[target]])
##
## # Number of observations.
## nobs <- nrow(ds)
##
## # Save the dataset
## dsdate <- paste0("_", format(Sys.Date(), "%y%m%d"))
## dsrdata <- paste0(dsname, dsdate, ".RData")
## save(ds, dsname, dspath, dsdate, target, risk, id, ignore, vars,
## nobs, omit, inputi, inputc, numi, numc, cati, catc, file=dsrdata)
## ----common_outtro, child="finale.Rnw", eval=TRUE------------------------
## ----syinfo, child="sysinfo.Rnw", eval=TRUE------------------------------
## ----echo=FALSE, message=FALSE-------------------------------------------
require(Hmisc)
pkg <- "knitr"
pkg.version <- installed.packages()[pkg, 'Version']
pkg.date <- installed.packages(fields="Date")[pkg, 'Date']
pkg.info <- paste(pkg, pkg.version, pkg.date)
rev <- system("bzr revno", intern=TRUE)
cpu <- system(paste("cat /proc/cpuinfo | grep 'model name' |",
"head -n 1 | cut -d':' -f2"), intern=TRUE)
ram <- system("cat /proc/meminfo | grep MemTotal: | awk '{print $2}'",
intern=TRUE)
ram <- paste0(round(as.integer(ram)/1e6, 1), "GB")
user <- Sys.getenv("LOGNAME")
node <- Sys.info()[["nodename"]]
user.node <- paste0(user, "@", node)
gcc.version <- system("g++ -v 2>&1 | grep 'gcc version' | cut -d' ' -f1-3",
intern=TRUE)
os <- system("lsb_release -d | cut -d: -f2 | sed 's/^[ \t]*//'", intern=TRUE)
| /snippets/R/survivalguide-to-datascience/ch1/DataO.R | no_license | zedoul/air | R | false | false | 16,679 | r |
## ----module, echo=FALSE, results="asis"----------------------------------
Module <- "DataO"
cat(paste0("\\newcommand{\\Module}{", Module, "}"))
## ----setup, child="mycourse.Rnw"-----------------------------------------
## ----setup_options, include=FALSE----------------------------------------
library(knitr)
library(xtable)
opts_chunk$set(cache=FALSE)
opts_chunk$set(out.width='0.8\\textwidth')
opts_chunk$set(fig.align='center')
opts_chunk$set(src.top=NULL)
opts_chunk$set(src.bot=NULL)
opts_chunk$set(out.lines=4)
opts_chunk$set(out.truncate=80)
opts_chunk$set(fig.path=sprintf("figures/%s/", Module))
opts_chunk$set(cache.path=sprintf("cache/%s/", Module))
opts_chunk$set(bib.file=paste0(Module, ".bib"))
opts_chunk$set(background='#E7E7E7')
# Leave code as I have formatted it.
opts_chunk$set(tidy=FALSE)
# Hooks
# Allow auto crop of base graphics plots when crop=TRUE.
knit_hooks$set(crop=hook_pdfcrop)
# Truncate long lines and long output
hook_output <- knit_hooks$get("output")
hook_source <- knit_hooks$get("source")
knit_hooks$set(output=function(x, options)
{
if (options$results != "asis")
{
# Split string into separate lines.
x <- unlist(stringr::str_split(x, "\n"))
# Trim to the number of lines specified.
if (!is.null(n <- options$out.lines))
{
if (length(x) > n)
{
# Truncate the output.
x <- c(head(x, n), "....\n")
}
}
# Truncate each line to length specified.
if (!is.null(m <- options$out.truncate))
{
len <- nchar(x)
x[len>m] <- paste0(substr(x[len>m], 0, m-3), "...")
}
# Paste lines back together.
x <- paste(x, collapse="\n")
# Replace ' = ' with '=' - my preference. Hopefully won't
# affect things inappropriately.
x <- gsub(" = ", "=", x)
}
hook_output(x, options)
},
source=function(x, options)
{
# Split string into separate lines.
x <- unlist(stringr::str_split(x, "\n"))
# Trim to the number of lines specified.
if (!is.null(n <- options$src.top))
{
if (length(x) > n)
{
# Truncate the output.
if (is.null(m <-options$src.bot)) m <- 0
x <- c(head(x, n+1), "\n....\n", tail(x, m+2))
}
}
# Paste lines back together.
x <- paste(x, collapse="\n")
hook_source(x, options)
})
# Optionally allow R Code chunks to be environments so we can refer to them.
knit_hooks$set(rcode=function(before, options, envir)
{
if (before)
sprintf('\\begin{rcode}\\label{%s}\\hfill{}', options$label)
else
'\\end{rcode}'
})
## ----load_packages, message=FALSE----------------------------------------
library(rattle) # The weather dataset and normVarNames().
library(randomForest) # Impute missing values using na.roughfix().
library(tidyr) # Tidy the dataset.
library(ggplot2) # Visualise data.
library(dplyr) # Data preparation and pipes %>%.
library(lubridate) # Handle dates.
library(FSelector) # Feature selection.
## ----additional_dependent_pacakges, echo=FALSE, message=FALSE------------
# These are dependencies that would otherwise be loaded as required.
library(stringr)
## ----common_intro, child='documentation.Rnw', eval=TRUE------------------
## ----help_library, eval=FALSE, tidy=FALSE--------------------------------
## ?read.csv
## ----help_package, eval=FALSE--------------------------------------------
## library(help=rattle)
## ----child-bib, child='generatebib.Rnw', eval=TRUE-----------------------
## ----record_start_time, echo=FALSE---------------------------------------
start.time <- proc.time()
## ----generate_bib, echo=FALSE, message=FALSE, warning=FALSE--------------
# Write all packages in the current session to a bib file
if (is.null(opts_chunk$get("bib.file"))) opts_chunk$set(bib.file="Course.bib")
write_bib(sub("^.*/", "", grep("^/", searchpaths(), value=TRUE)),
file=opts_chunk$get("bib.file"))
system(paste("cat extra.bib >>", opts_chunk$get("bib.file")))
# Fix up specific issues.
# R-earth
system(paste("perl -pi -e 's|. Derived from .*$|},|'",
opts_chunk$get("bib.file")))
# R-randomForest
system(paste("perl -pi -e 's|Fortran original by Leo Breiman",
"and Adele Cutler and R port by|Leo Breiman and",
"Adele Cutler and|'", opts_chunk$get("bib.file")))
# R-C50
system(paste("perl -pi -e 's|. C code for C5.0 by R. Quinlan|",
" and J. Ross Quinlan|'", opts_chunk$get("bib.file")))
# R-caret
system(paste("perl -pi -e 's|. Contributions from|",
" and|'", opts_chunk$get("bib.file")))
# Me
system(paste("perl -pi -e 's|Graham Williams|",
"Graham J Williams|'", opts_chunk$get("bib.file")))
## ----eval=FALSE----------------------------------------------------------
## dspath <- "http://rattle.togaware.com/weather.csv"
## ------------------------------------------------------------------------
dspath <- system.file("csv", "weather.csv", package="rattle")
## ----dataset_load--------------------------------------------------------
weather <- read.csv(dspath)
## ------------------------------------------------------------------------
library(rattle) # weather, normVarNames().
## ----basic_summary_weather-----------------------------------------------
dim(weather)
names(weather)
str(weather)
## ----prepare_the_dataset, out.lines=7------------------------------------
dsname <- "weather"
ds <- get(dsname)
dim(ds)
names(ds)
## ----alternative_dataset_assignment, eval=FALSE--------------------------
## ds <- weather
## ----using_tbl_df--------------------------------------------------------
class(ds)
ds <- tbl_df(ds)
class(ds)
## ----tbl_df_print, out.lines=NULL----------------------------------------
ds
## ----dataset_head, out.lines=10------------------------------------------
head(ds)
## ----dataset_tail, out.lines=10------------------------------------------
tail(ds)
## ----dataset_sample, out.lines=10----------------------------------------
ds[sample(nrow(ds), 6),]
## ----dataset_structure, out.lines=30-------------------------------------
str(ds)
## ----dataset_summary, out.lines=43---------------------------------------
summary(ds)
## ----message=FALSE-------------------------------------------------------
names(ds)
names(ds) <- normVarNames(names(ds))
names(ds)
## ------------------------------------------------------------------------
sapply(ds, class)
## ----convert_date, message=FALSE-----------------------------------------
library(lubridate) # ymd()
head(ds$date)
ds$date <- ymd(as.character(ds$date))
head(ds$date)
## ------------------------------------------------------------------------
sapply(ds, class)
## ----variable_roles, out.lines=NULL--------------------------------------
(vars <- names(ds))
target <- "rain_tomorrow"
risk <- "risk_mm"
id <- c("date", "location")
## ------------------------------------------------------------------------
ignore <- union(id, if (exists("risk")) risk)
## ------------------------------------------------------------------------
(ids <- which(sapply(ds, function(x) length(unique(x))) == nrow(ds)))
ignore <- union(ignore, names(ids))
## ----ignore_missing_variables--------------------------------------------
mvc <- sapply(ds[vars], function(x) sum(is.na(x)))
mvn <- names(which(mvc == nrow(ds)))
ignore <- union(ignore, mvn)
## ----ignore_mostly_missing_variables-------------------------------------
mvn <- names(which(mvc >= 0.7*nrow(ds)))
ignore <- union(ignore, mvn)
## ----ignore_factors_with_many_levels-------------------------------------
factors <- which(sapply(ds[vars], is.factor))
lvls <- sapply(factors, function(x) length(levels(ds[[x]])))
(many <- names(which(lvls > 20)))
ignore <- union(ignore, many)
## ----ignore_variables_constant_values------------------------------------
(constants <- names(which(sapply(ds[vars], function(x) all(x == x[1L])))))
ignore <- union(ignore, constants)
## ------------------------------------------------------------------------
mc <- cor(ds[which(sapply(ds, is.numeric))], use="complete.obs")
mc[upper.tri(mc, diag=TRUE)] <- NA
mc <-
mc %>%
abs() %>%
data.frame() %>%
mutate(var1=row.names(mc)) %>%
gather(var2, cor, -var1) %>%
na.omit()
mc <- mc[order(-abs(mc$cor)),]
mc
## ------------------------------------------------------------------------
ignore <- union(ignore, c("temp_3pm", "pressure_9am", "temp_9am"))
## ------------------------------------------------------------------------
length(vars)
vars <- setdiff(vars, ignore)
length(vars)
## ----out.lines=NULL------------------------------------------------------
library(FSelector) # information.gain()
form <- formula(paste(target, "~ ."))
cfs(form, ds[vars])
information.gain(form, ds[vars])
## ----remove_missing_target-----------------------------------------------
dim(ds)
sum(is.na(ds[target]))
ds <- ds[!is.na(ds[target]),]
sum(is.na(ds[target]))
dim(ds)
## ------------------------------------------------------------------------
ods <- ds
## ----impute_missing_values-----------------------------------------------
dim(ds[vars])
sum(is.na(ds[vars]))
ds[vars] <- na.roughfix(ds[vars])
sum(is.na(ds[vars]))
dim(ds[vars])
## ------------------------------------------------------------------------
ds <- ods
## ------------------------------------------------------------------------
ods <- ds
omit <- NULL
## ----remove_missing_values-----------------------------------------------
dim(ds[vars])
sum(is.na(ds[vars]))
mo <- attr(na.omit(ds[vars]), "na.action")
omit <- union(omit, mo)
if (length(omit)) ds <- ds[-omit,]
sum(is.na(ds[vars]))
dim(ds[vars])
## ------------------------------------------------------------------------
ds <- ods
## ----normalise_factors---------------------------------------------------
factors <- which(sapply(ds[vars], is.factor))
for (f in factors) levels(ds[[f]]) <- normVarNames(levels(ds[[f]]))
## ----ensure_target_is_categoric------------------------------------------
ds[target] <- as.factor(ds[[target]])
table(ds[target])
## ----fig.height=4--------------------------------------------------------
p <- ggplot(ds, aes_string(x=target))
p <- p + geom_bar(width=0.2)
print(p)
## ----identify_variables_inputc-------------------------------------------
inputc <- setdiff(vars, target)
inputc
## ----identify_variables_inputi-------------------------------------------
inputi <- sapply(inputc, function(x) which(x == names(ds)), USE.NAMES=FALSE)
inputi
## ----number_of_observations----------------------------------------------
nobs <- nrow(ds)
nobs
## ----dimensions----------------------------------------------------------
dim(ds)
dim(ds[vars])
dim(ds[inputc])
dim(ds[inputi])
## ----identify_variable_types---------------------------------------------
numi <- intersect(inputi, which(sapply(ds, is.numeric)))
numi
numc <- names(ds)[numi]
numc
cati <- intersect(inputi, which(sapply(ds, is.factor)))
cati
catc <- names(ds)[cati]
catc
## ----eval=FALSE----------------------------------------------------------
## dsdate <- paste0("_", format(Sys.Date(), "%y%m%d"))
## dsrdata <- paste0(dsname, dsdate, ".RData")
## save(ds, dsname, dspath, dsdate, target, risk, id, ignore, vars,
## nobs, omit, inputi, inputc, numi, numc, cati, catc, file=dsrdata)
## ----echo=FALSE----------------------------------------------------------
# Do this so we know what to load into ModelsO.Rnw
dsdate <- paste0("_", "130704")
dsrdata <- paste0(dsname, dsdate, ".RData")
save(ds, dsname, dspath, dsdate, target, risk, id, ignore, vars,
nobs, omit, inputi, inputc, numi, numc, cati, catc, file=dsrdata)
## ------------------------------------------------------------------------
(load(dsrdata))
dsname
dspath
dim(ds)
id
target
risk
ignore
vars
## ----review_load, eval=FALSE---------------------------------------------
## # Required packages
## library(rattle) # The weather dataset and normVarNames().
## library(randomForest) # Impute missing values using na.roughfix().
##
## # Identify the dataset.
## dsname <- "weather"
## dspath <- system.file("csv", "weather.csv", package="rattle")
## weather <- read.csv(dspath)
## ds <- get(dsname) %>% tbl_df()
## names(ds) <- normVarNames(names(ds)) # Optional lower case variable names.
## vars <- names(ds)
## target <- "rain_tomorrow"
## risk <- "risk_mm"
## id <- c("date", "location")
##
## # Summarise
## ds
## dim(ds)
## names(ds)
## str(ds)
## summary(ds)
## ----review_ignore, eval=FALSE-------------------------------------------
## # Ignore the IDs and the risk variable.
## ignore <- union(id, if (exists("risk")) risk)
##
## # Ignore variables that look like identifiers.
## ids <- which(sapply(ds, function(x) length(unique(x))) == nrow(ds))
## ignore <- union(ignore, names(ids))
##
## # Ignore variables which are completely missing.
## mvc <- sapply(ds[vars], function(x) sum(is.na(x))) # Missing value count.
## mvn <- names(ds)[(which(mvc == nrow(ds)))] # Missing var names.
## ignore <- union(ignore, mvn)
##
## # Ignore variables that are mostly missing - e.g., 70% or more missing
## mvn <- names(ds)[(which(mvc >= 0.7*nrow(ds)))]
## ignore <- union(ignore, mvn)
##
## # Ignore variables with many levels.
## factors <- which(sapply(ds[vars], is.factor))
## lvls <- sapply(factors, function(x) length(levels(ds[[x]])))
## many <- names(which(lvls > 20)) # Factors with too many levels.
## ignore <- union(ignore, many)
##
## # Ignore constants.
## constants <- names(which(sapply(ds[vars], function(x) all(x == x[1L]))))
## ignore <- union(ignore, constants)
##
## # Initialise the variables
## vars <- setdiff(vars, ignore)
##
## ----review_finalise, eval=FALSE-----------------------------------------
## # Variable roles.
## inputc <- setdiff(vars, target)
## inputi <- sapply(inputc, function(x) which(x == names(ds)), USE.NAMES=FALSE)
## numi <- intersect(inputi, which(sapply(ds, is.numeric)))
## numc <- names(numi)
## cati <- intersect(inputi, which(sapply(ds, is.factor)))
## catc <- names(cati)
##
## # Remove all observations with a missing target.
## ds <- ds[!is.na(ds[target]),]
##
## # Impute missing values, but do this wisely - understand why missing.
## if (sum(is.na(ds[vars]))) ds[vars] <- na.roughfix(ds[vars])
##
## # Omit observations with missing values.
## omit <- NULL
## mo <- attr(na.omit(ds[vars]), "na.action")
## omit <- union(omit, mo)
## if (length(omit)) ds <- ds[-omit,] # Remove ommited observations.
##
## # Normalise factors.
## factors <- which(sapply(ds[vars], is.factor))
## for (f in factors) levels(ds[[f]]) <- normVarNames(levels(ds[[f]]))
##
## # Ensure the target is categoric.
## ds[target] <- as.factor(ds[[target]])
##
## # Number of observations.
## nobs <- nrow(ds)
##
## # Save the dataset
## dsdate <- paste0("_", format(Sys.Date(), "%y%m%d"))
## dsrdata <- paste0(dsname, dsdate, ".RData")
## save(ds, dsname, dspath, dsdate, target, risk, id, ignore, vars,
## nobs, omit, inputi, inputc, numi, numc, cati, catc, file=dsrdata)
## ----common_outtro, child="finale.Rnw", eval=TRUE------------------------
## ----syinfo, child="sysinfo.Rnw", eval=TRUE------------------------------
## ----echo=FALSE, message=FALSE-------------------------------------------
require(Hmisc)
pkg <- "knitr"
pkg.version <- installed.packages()[pkg, 'Version']
pkg.date <- installed.packages(fields="Date")[pkg, 'Date']
pkg.info <- paste(pkg, pkg.version, pkg.date)
rev <- system("bzr revno", intern=TRUE)
cpu <- system(paste("cat /proc/cpuinfo | grep 'model name' |",
"head -n 1 | cut -d':' -f2"), intern=TRUE)
ram <- system("cat /proc/meminfo | grep MemTotal: | awk '{print $2}'",
intern=TRUE)
ram <- paste0(round(as.integer(ram)/1e6, 1), "GB")
user <- Sys.getenv("LOGNAME")
node <- Sys.info()[["nodename"]]
user.node <- paste0(user, "@", node)
gcc.version <- system("g++ -v 2>&1 | grep 'gcc version' | cut -d' ' -f1-3",
intern=TRUE)
os <- system("lsb_release -d | cut -d: -f2 | sed 's/^[ \t]*//'", intern=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Briggs_2021}
\alias{Briggs_2021}
\title{Inferring local and crossborder transmission with human and mobility data,
Kanunga District in Southwest Uganda (Briggs et al., 2019)}
\format{
A list of multiple data objects:
\itemize{
\item \code{final_samples_Dec}: contains allele lengths with barcodes.
\item \code{Kanungu_pairwise_comparison_df}: epidemiological data set with
\code{final_samples_Dec} keys.
}
\code{Kanungu_pairwise_comparison_df}: Metadata for the allele sizes
located in \code{final_samples_Dec}.
}
\source{
\href{https://malariajournal.biomedcentral.com/articles/10.1186/s12936-021-03603-7}{Supplementary file 1}
}
\usage{
data(Briggs_2021)
}
\description{
Data from Briggs et al. (2021). Here we give a brief summary of the data -
see the original paper for full details.
\cr
\cr
The study uses 80 randomly sampled households around a single health facility
and includes all children (6 mo - 10 years old) and at least one adult
caretaker from the households. DNA was acquired though dried blood spots, and
after two rounds of PCR amplification, the PCR products were sized using
capillary electrophoresis. Finally, allele length was calculated.
}
\references{
\insertRef{briggs_withinhousehold_2021}{SIMPLEGEN}
}
\keyword{datasets}
| /man/Briggs_2021.Rd | permissive | mrc-ide/SIMPLEGEN | R | false | true | 1,376 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Briggs_2021}
\alias{Briggs_2021}
\title{Inferring local and crossborder transmission with human and mobility data,
Kanunga District in Southwest Uganda (Briggs et al., 2019)}
\format{
A list of multiple data objects:
\itemize{
\item \code{final_samples_Dec}: contains allele lengths with barcodes.
\item \code{Kanungu_pairwise_comparison_df}: epidemiological data set with
\code{final_samples_Dec} keys.
}
\code{Kanungu_pairwise_comparison_df}: Metadata for the allele sizes
located in \code{final_samples_Dec}.
}
\source{
\href{https://malariajournal.biomedcentral.com/articles/10.1186/s12936-021-03603-7}{Supplementary file 1}
}
\usage{
data(Briggs_2021)
}
\description{
Data from Briggs et al. (2021). Here we give a brief summary of the data -
see the original paper for full details.
\cr
\cr
The study uses 80 randomly sampled households around a single health facility
and includes all children (6 mo - 10 years old) and at least one adult
caretaker from the households. DNA was acquired though dried blood spots, and
after two rounds of PCR amplification, the PCR products were sized using
capillary electrophoresis. Finally, allele length was calculated.
}
\references{
\insertRef{briggs_withinhousehold_2021}{SIMPLEGEN}
}
\keyword{datasets}
|
library(gapminder)
library(dplyr)
# Find median life expectancy and maximum GDP per capita in each year/continent combination
gapminder %>%
group_by(continent, year) %>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
| /DCamp/Intro Tidyverse/Grouping Summarizing/Summarizing by two variables.R | no_license | shinichimatsuda/R_Training | R | false | false | 249 | r | library(gapminder)
library(dplyr)
# Find median life expectancy and maximum GDP per capita in each year/continent combination
gapminder %>%
group_by(continent, year) %>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
|
## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that
## stores a matrix and caches its inverse.
## This function creates a special "matrix" object that can cache its inverse.
rm(list=ls())
ls()
makeCacheMatrix <- function(x = matrix()) {
j <- NULL
set <- function(y){
x <<- y
j <<- NULL
}
get <- function()x
setInverse <- function(inverse) j <<- inverse
getInverse <- function() j
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'#############
j <- x$getInverse()
if(!is.null(j)){
message
return(j)
}
mat <- x$get()
j <- solve(mat,...)
x$setInverse(j)
j
}
##########Checking the Solution#########################
my_matrix <- makeCacheMatrix(matrix(rnorm(16), 4, 4))
my_matrix$get()
#########my_matrix$getInverse() produces NULL value as no computations have been done##############
my_matrix$getInverse()
cacheSolve(my_matrix)
#######inverse stored in cache########
cacheSolve(my_matrix)
######### Even though no calculations are performed but because of using cache,###########
#########my_matrix$getInverse()provides inverse stored in cache######
my_matrix$getInverse()
####### Another Example #############
my_matrix$set(matrix(c(5, 7, 9, 11), 2, 2))
my_matrix$get()
#########my_matrix$getInverse() produces NULL value as no computations have been done##############
my_matrix$getInverse()
cacheSolve(my_matrix)
#######inverse stored in cache########
cacheSolve(my_matrix)
######### Even though no calculations are performed but because of using cache,###########
#########my_matrix$getInverse()provides inverse stored in cache######
my_matrix$getInverse()
| /MatrixAssignmentweek3.R | no_license | ABANTAKM/ProgrammingAssignment2 | R | false | false | 2,455 | r | ## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that
## stores a matrix and caches its inverse.
## This function creates a special "matrix" object that can cache its inverse.
rm(list=ls())
ls()
makeCacheMatrix <- function(x = matrix()) {
j <- NULL
set <- function(y){
x <<- y
j <<- NULL
}
get <- function()x
setInverse <- function(inverse) j <<- inverse
getInverse <- function() j
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'#############
j <- x$getInverse()
if(!is.null(j)){
message
return(j)
}
mat <- x$get()
j <- solve(mat,...)
x$setInverse(j)
j
}
##########Checking the Solution#########################
my_matrix <- makeCacheMatrix(matrix(rnorm(16), 4, 4))
my_matrix$get()
#########my_matrix$getInverse() produces NULL value as no computations have been done##############
my_matrix$getInverse()
cacheSolve(my_matrix)
#######inverse stored in cache########
cacheSolve(my_matrix)
######### Even though no calculations are performed but because of using cache,###########
#########my_matrix$getInverse()provides inverse stored in cache######
my_matrix$getInverse()
####### Another Example #############
my_matrix$set(matrix(c(5, 7, 9, 11), 2, 2))
my_matrix$get()
#########my_matrix$getInverse() produces NULL value as no computations have been done##############
my_matrix$getInverse()
cacheSolve(my_matrix)
#######inverse stored in cache########
cacheSolve(my_matrix)
######### Even though no calculations are performed but because of using cache,###########
#########my_matrix$getInverse()provides inverse stored in cache######
my_matrix$getInverse()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepareData.R
\name{prepareData}
\alias{prepareData}
\title{Prepare data for estimation}
\usage{
prepareData(Y, W, X, match_on, trimming = NULL, model_options, M_matches,
J_var_matches)
}
\arguments{
\item{Y}{A response vector (1 x n)}
\item{W}{A treatment vector (1 x n) with numerical values indicating
treatment groups}
\item{X}{A covariate matrix (p x n) with no intercept. When
match_on="existing", then X must be a vector (1 x n) of user-specified
propensity scores.}
\item{match_on}{User specifies "covariates" to match on raw covariates, or
"existing" to match on user-supplied propensity score values, or "polr" or
"multinom" to fit a propensity score model.}
\item{trimming}{an indicator of whether trimming the sample to ensure overlap}
\item{model_options}{A list of the options to pass to propensity model.
Currently under development. Can only pass reference level to multinomial
logistic regression.}
\item{M_matches}{Number of matches per unit for imputing potential outcomes,
as in Abadie and Imbens (2006).}
\item{J_var_matches}{Number of matches when estimating \eqn{\sigma^2(X,W)} as
in Abadie and Imbens (2006).}
}
\value{
A list of information, including the \code{X, W, Y} arguments after
sorting observeations, and information on \code{unit_ids}, etc.
}
\description{
A series of checks, tests, re-ordering, and other operations to prepare the
data for matching. This function can be run standalone, before running
\code{\link{multiMatch}}.
}
\examples{
sim_data <- multilevelMatching::simulated_data
Y <- sim_data$outcome
W <- sim_data$treatment
X <- as.matrix(sim_data[ ,-(1:2)])
names(Y) <- paste0("ID", 1:length(Y))
trimming <- FALSE
method <- c("covariates", "polr", "multinom")[2]
prepared_data <- prepareData(
Y = Y,
W = W,
X = X,
match_on = "polr",
trimming = FALSE,
model_options = list(reference_level = sort(W)[1]),
M_matches = 3,
J_var_matches = 2
)
}
| /man/prepareData.Rd | no_license | Crazyoumashu/multilevelMatching | R | false | true | 2,013 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepareData.R
\name{prepareData}
\alias{prepareData}
\title{Prepare data for estimation}
\usage{
prepareData(Y, W, X, match_on, trimming = NULL, model_options, M_matches,
J_var_matches)
}
\arguments{
\item{Y}{A response vector (1 x n)}
\item{W}{A treatment vector (1 x n) with numerical values indicating
treatment groups}
\item{X}{A covariate matrix (p x n) with no intercept. When
match_on="existing", then X must be a vector (1 x n) of user-specified
propensity scores.}
\item{match_on}{User specifies "covariates" to match on raw covariates, or
"existing" to match on user-supplied propensity score values, or "polr" or
"multinom" to fit a propensity score model.}
\item{trimming}{an indicator of whether trimming the sample to ensure overlap}
\item{model_options}{A list of the options to pass to propensity model.
Currently under development. Can only pass reference level to multinomial
logistic regression.}
\item{M_matches}{Number of matches per unit for imputing potential outcomes,
as in Abadie and Imbens (2006).}
\item{J_var_matches}{Number of matches when estimating \eqn{\sigma^2(X,W)} as
in Abadie and Imbens (2006).}
}
\value{
A list of information, including the \code{X, W, Y} arguments after
sorting observeations, and information on \code{unit_ids}, etc.
}
\description{
A series of checks, tests, re-ordering, and other operations to prepare the
data for matching. This function can be run standalone, before running
\code{\link{multiMatch}}.
}
\examples{
sim_data <- multilevelMatching::simulated_data
Y <- sim_data$outcome
W <- sim_data$treatment
X <- as.matrix(sim_data[ ,-(1:2)])
names(Y) <- paste0("ID", 1:length(Y))
trimming <- FALSE
method <- c("covariates", "polr", "multinom")[2]
prepared_data <- prepareData(
Y = Y,
W = W,
X = X,
match_on = "polr",
trimming = FALSE,
model_options = list(reference_level = sort(W)[1]),
M_matches = 3,
J_var_matches = 2
)
}
|
#' 1-D surface rainfall-runoff finite difference simulator.
#'
#' See Fortran procedure in `../src'.
#'
#' @param dx spatial step size (meters) of the numerical grid.
#' @param nx number of cells.
#' @param dt time step size (seconds).
#' @param nt number of time steps to simulate.
#' @param h0
#' initial flowdepth. Scalar, or vector of length \code{nx + 1}.
#' @param q0
#' initial discharge. Scalar, or vector of length \code{nx + 1}.
#' @param qt.ub
#' upstream discharge (that is, inflow) at all times.
#' Scalar, or vector of length \code{nt}.
#' @param rain rainfall input (mm/hr).
#' Scalar, or \code{nx} by \code{nt} matrix,
#' one column for each time step.
#' @param rough roughness. Scalar, or vector of length \code{nx}.
#' @param s0 scalar, or vector of length \code{nx}.
#' @param return.config
#'
#' @return
#' A list with members \code{discharge} and \code{flowdepth},
#' each being a matrix.
#' Each row is the requested times at one requested location;
#' each col is a temporal snapshot (at one time step)
#' for the requested locations.
#' Flow velocity can be obtained as \code{discharge / flowdepth}.
#'
#' @export
#' @useDynLib AnchoredInversionClient f_runoff_1d
runoff <- function(
dx, nx, dt, nt,
h0 = 0, q0 = 0, qt.ub = 0,
rain, rough, s0,
return.config
# List containing members 'discharge' and 'flowdepth'.
# If either is missing, don't return that data.
# Each member contains elements 'space.idx' and 'time.idx'.
)
{
if (length(h0) == 1) h0 <- rep(h0, nx + 1)
if (length(q0) == 1) q0 <- rep(q0, nx + 1)
if (length(qt.ub) == 1) qt.ub <- rep(qt.ub, nt)
if (length(rough) == 1) rough <- rep(rough, nx)
if (length(s0) == 1) s0 <- rep(s0, nx)
rain <- rain / 1000 / 3600
# mm/hr --> m/sec
if (length(rain) == 1)
rain <- matrix(rain, nx, nt)
else if (length(rain) == nt)
rain <- matrix(rain, nrow = nx, ncol = nt, byrow = TRUE)
else
stopifnot(is.matrix(rain), dim(rain) == c(nx, nt))
ret.discharge.space <- return.config$discharge$space.idx
# May be NULL.
ret.discharge.time <- return.config$discharge$time.idx
# May be NULL.
ret.flowdepth.space <- return.config$flowdepth$space.idx
# May be NULL.
ret.flowdepth.time <- return.config$flowdepth$time.idx
# May be NULL.
z <- .Fortran(f_runoff_1d,
dx = as.double(dx),
nx = as.integer(nx),
dt = as.double(dt),
nt = as.integer(nt),
h0 = as.double(h0),
q0 = as.double(q0),
qt_ub = as.double(qt.ub),
rain = as.double(rain),
rough = as.double(rough),
s0 = as.double(s0),
discharge_space_idx = as.integer(ret.discharge.space),
discharge_space_n = as.integer(length(ret.discharge.space)),
discharge_time_idx = as.integer(ret.discharge.time),
discharge_time_n = as.integer(length(ret.discharge.time)),
flowdepth_space_idx = as.integer(ret.flowdepth.space),
flowdepth_space_n = as.integer(length(ret.flowdepth.space)),
flowdepth_time_idx = as.integer(ret.flowdepth.time),
flowdepth_time_n = as.integer(length(ret.flowdepth.time)),
discharge = double(length(ret.discharge.space) *
length(ret.discharge.time)),
flowdepth = double(length(ret.flowdepth.space) *
length(ret.flowdepth.time)),
flag = integer(1)
)
if (z$flag != 0)
stop('Numerical stability criterion failed')
else
{
z <- z[c('discharge', 'flowdepth')]
if (length(ret.discharge.space) && length(ret.discharge.time))
{
if (length(ret.discharge.space) > 1L &&
length(ret.discharge.time) > 1L)
dim(z$discharge) <- c(length(ret.discharge.space),
length(ret.discharge.time))
} else
z$discharge <- NULL
if (length(ret.flowdepth.space) && length(ret.flowdepth.time))
{
if (length(ret.flowdepth.space) > 1L &&
length(ret.flowdepth.time) > 1L)
dim(z$flowdepth) <- c(length(ret.flowdepth.space),
length(ret.flowdepth.time))
} else
z$flowdepth <- NULL
z
}
}
runoff.1d <- function(
n.x = 100,
seed = NULL
)
{
if (is.null(seed)) {
seed <- sample(1000, 1)
}
set.seed(seed)
#----------------------
# read in data
#----------------------
# data(volcano)
# myfield <- volcano[, sample(ncol(volcano), 1)]
# # Vector of length 87.
# myfield <- average.line(myfield, 2) $y
data(Denali)
i <- sample(length(Denali$data) - n.x, 1)
myfield <- Denali$data[i : (i+n.x)]
myfield <- average.line(myfield, 2) $y
mygrid <- list(
from = 2,
by = 4,
len = length(myfield)
)
# For the runoff model to run stably,
# space step should be large compared to time step.
myfield <- myfield - (5 * min(myfield) - max(myfield)) / (5 - 1)
# Scale the data to make the value range span 5 times.
myfield <- myfield / mean(myfield) * .02
# Make the average close to .02,
# so that this data are reasonable as synthetic
# Manning's roughness coef.
f.field.transform <- function(x, reverse = FALSE)
log.transform(x, lower = 1e-10, reverse = reverse)
myfield <- f.field.transform(myfield)
#---------------------
# linear data
#---------------------
n.linear <- 0
linear.data <- NULL
#---------------------------------
# forward function and data
#---------------------------------
rain.config <- list(
# Can have multiple elements of the same structure.
# Each element represent one experiment, ie rain event.
list(
dt = 1,
# Time step in seconds.
# An integer.
# Make 60 divisible by 'dt'.
t.total = 180,
# Total experiment time in minutes.
# An integer.
rain.stop = 60,
# Rain stops at the end of this minute.
# (Rain starts at the beginning of the whole period.
# It is useless to consider otherwise.)
# An integer.
rain.int = runif(1, 2, 10)
# Rain intensity in mm/hr, constant during this period.
),
list(
dt = 1,
t.total = 240,
rain.stop = 30,
rain.int = runif(1, 10, 30)
)
)
runoff.config <- lapply(rain.config,
function(x) {
list(
dt = x$dt,
nt = ceiling(x$t.total * 60 / x$dt),
# Total number of numerical time steps.
rain = rep(c(x$rain.int, 0),
c(x$rain.stop, x$t.total - x$rain.stop) * 60 / x$dt)
# Rain intensity vector correponding to each
# numerical time step.
)
} )
# What model output to take as forward data?
forward.config <- list(
# Have as many elements as 'runoff.config' does.
list(
discharge = list(
space.idx = prod(mygrid$len),
time.idx = seq(from = 1,
by = round(600 / rain.config[[1]]$dt),
# One measurement per 10 minutes.
to = runoff.config[[1]]$nt)
# Time index at which observations are extracted.
)
),
list(
flowdepth = list(
space.idx = prod(mygrid$len),
time.idx = seq(from = 30,
by = round(300 / rain.config[[2]]$dt),
# One measurement per 5 minutes.
to = runoff.config[[2]]$nt)
)
)
)
f.runoff <- function(
x, # roughness field in its natural unit.
grid = mygrid,
runoff.config,
return.config
)
{
z <- vector('list', length(runoff.config))
for (i.config in seq_along(z))
{
zz <- tryCatch(
do.call(AnchoredInversionClient::runoff.1d,
c(list(
dx = grid$by,
nx = grid$len,
h0 = 0, q0 = 0, qt.ub = 0,
rough = x,
s0 = .01,
return.config = return.config[[i.config]]),
runoff.config[[i.config]]
)
),
error = function(...) NULL
)
if (is.null(zz))
z[[i.config]] <- lapply(return.config[[i.config]],
function(x) rep(NA, prod(sapply(x, length))))
else
z[[i.config]] <- zz
}
z
}
f.forward.transform <- function(x, reverse = FALSE)
{
if (reverse)
log.transform(x, reverse = TRUE) + 1e-100
else
log.transform(x + 1e-100)
# Guard against '0' values in 'x'.
}
forward.fun <- function(
x, # 'x' is a list of fields in the _transformed_ unit.
grid,
forward.config,
runoff.config
)
{
x.is.list <- is.list(x)
if (!x.is.list)
x <- list(x)
z.runoff <- lapply(
lapply(x, f.field.transform, rev = TRUE),
f.runoff,
grid = grid,
runoff.config = runoff.config,
return.config = forward.config
)
result <- lapply(z.runoff, function(x)
f.forward.transform(unname(unlist(x))))
if (x.is.list)
result
else
result[[1]]
}
forward.args <- list(
grid = mygrid,
forward.config = forward.config,
runoff.config = runoff.config)
f.forward <- function(x) { do.call(forward.fun, c(list(x), forward.args)) }
forward.data <- f.forward(myfield)
list(
mygrid = mygrid,
myfield = myfield,
f.field.transform = f.field.transform,
f.forward = f.forward,
f.forward.transform = f.forward.transform,
forward.data = forward.data,
linear.data = linear.data
)
}
| /archive/runoff.R | no_license | anchored-inversion/client.R | R | false | false | 10,549 | r | #' 1-D surface rainfall-runoff finite difference simulator.
#'
#' See Fortran procedure in `../src'.
#'
#' @param dx spatial step size (meters) of the numerical grid.
#' @param nx number of cells.
#' @param dt time step size (seconds).
#' @param nt number of time steps to simulate.
#' @param h0
#' initial flowdepth. Scalar, or vector of length \code{nx + 1}.
#' @param q0
#' initial discharge. Scalar, or vector of length \code{nx + 1}.
#' @param qt.ub
#' upstream discharge (that is, inflow) at all times.
#' Scalar, or vector of length \code{nt}.
#' @param rain rainfall input (mm/hr).
#' Scalar, or \code{nx} by \code{nt} matrix,
#' one column for each time step.
#' @param rough roughness. Scalar, or vector of length \code{nx}.
#' @param s0 scalar, or vector of length \code{nx}.
#' @param return.config
#'
#' @return
#' A list with members \code{discharge} and \code{flowdepth},
#' each being a matrix.
#' Each row is the requested times at one requested location;
#' each col is a temporal snapshot (at one time step)
#' for the requested locations.
#' Flow velocity can be obtained as \code{discharge / flowdepth}.
#'
#' @export
#' @useDynLib AnchoredInversionClient f_runoff_1d
runoff <- function(
dx, nx, dt, nt,
h0 = 0, q0 = 0, qt.ub = 0,
rain, rough, s0,
return.config
# List containing members 'discharge' and 'flowdepth'.
# If either is missing, don't return that data.
# Each member contains elements 'space.idx' and 'time.idx'.
)
{
if (length(h0) == 1) h0 <- rep(h0, nx + 1)
if (length(q0) == 1) q0 <- rep(q0, nx + 1)
if (length(qt.ub) == 1) qt.ub <- rep(qt.ub, nt)
if (length(rough) == 1) rough <- rep(rough, nx)
if (length(s0) == 1) s0 <- rep(s0, nx)
rain <- rain / 1000 / 3600
# mm/hr --> m/sec
if (length(rain) == 1)
rain <- matrix(rain, nx, nt)
else if (length(rain) == nt)
rain <- matrix(rain, nrow = nx, ncol = nt, byrow = TRUE)
else
stopifnot(is.matrix(rain), dim(rain) == c(nx, nt))
ret.discharge.space <- return.config$discharge$space.idx
# May be NULL.
ret.discharge.time <- return.config$discharge$time.idx
# May be NULL.
ret.flowdepth.space <- return.config$flowdepth$space.idx
# May be NULL.
ret.flowdepth.time <- return.config$flowdepth$time.idx
# May be NULL.
z <- .Fortran(f_runoff_1d,
dx = as.double(dx),
nx = as.integer(nx),
dt = as.double(dt),
nt = as.integer(nt),
h0 = as.double(h0),
q0 = as.double(q0),
qt_ub = as.double(qt.ub),
rain = as.double(rain),
rough = as.double(rough),
s0 = as.double(s0),
discharge_space_idx = as.integer(ret.discharge.space),
discharge_space_n = as.integer(length(ret.discharge.space)),
discharge_time_idx = as.integer(ret.discharge.time),
discharge_time_n = as.integer(length(ret.discharge.time)),
flowdepth_space_idx = as.integer(ret.flowdepth.space),
flowdepth_space_n = as.integer(length(ret.flowdepth.space)),
flowdepth_time_idx = as.integer(ret.flowdepth.time),
flowdepth_time_n = as.integer(length(ret.flowdepth.time)),
discharge = double(length(ret.discharge.space) *
length(ret.discharge.time)),
flowdepth = double(length(ret.flowdepth.space) *
length(ret.flowdepth.time)),
flag = integer(1)
)
if (z$flag != 0)
stop('Numerical stability criterion failed')
else
{
z <- z[c('discharge', 'flowdepth')]
if (length(ret.discharge.space) && length(ret.discharge.time))
{
if (length(ret.discharge.space) > 1L &&
length(ret.discharge.time) > 1L)
dim(z$discharge) <- c(length(ret.discharge.space),
length(ret.discharge.time))
} else
z$discharge <- NULL
if (length(ret.flowdepth.space) && length(ret.flowdepth.time))
{
if (length(ret.flowdepth.space) > 1L &&
length(ret.flowdepth.time) > 1L)
dim(z$flowdepth) <- c(length(ret.flowdepth.space),
length(ret.flowdepth.time))
} else
z$flowdepth <- NULL
z
}
}
runoff.1d <- function(
n.x = 100,
seed = NULL
)
{
if (is.null(seed)) {
seed <- sample(1000, 1)
}
set.seed(seed)
#----------------------
# read in data
#----------------------
# data(volcano)
# myfield <- volcano[, sample(ncol(volcano), 1)]
# # Vector of length 87.
# myfield <- average.line(myfield, 2) $y
data(Denali)
i <- sample(length(Denali$data) - n.x, 1)
myfield <- Denali$data[i : (i+n.x)]
myfield <- average.line(myfield, 2) $y
mygrid <- list(
from = 2,
by = 4,
len = length(myfield)
)
# For the runoff model to run stably,
# space step should be large compared to time step.
myfield <- myfield - (5 * min(myfield) - max(myfield)) / (5 - 1)
# Scale the data to make the value range span 5 times.
myfield <- myfield / mean(myfield) * .02
# Make the average close to .02,
# so that this data are reasonable as synthetic
# Manning's roughness coef.
f.field.transform <- function(x, reverse = FALSE)
log.transform(x, lower = 1e-10, reverse = reverse)
myfield <- f.field.transform(myfield)
#---------------------
# linear data
#---------------------
n.linear <- 0
linear.data <- NULL
#---------------------------------
# forward function and data
#---------------------------------
rain.config <- list(
# Can have multiple elements of the same structure.
# Each element represent one experiment, ie rain event.
list(
dt = 1,
# Time step in seconds.
# An integer.
# Make 60 divisible by 'dt'.
t.total = 180,
# Total experiment time in minutes.
# An integer.
rain.stop = 60,
# Rain stops at the end of this minute.
# (Rain starts at the beginning of the whole period.
# It is useless to consider otherwise.)
# An integer.
rain.int = runif(1, 2, 10)
# Rain intensity in mm/hr, constant during this period.
),
list(
dt = 1,
t.total = 240,
rain.stop = 30,
rain.int = runif(1, 10, 30)
)
)
runoff.config <- lapply(rain.config,
function(x) {
list(
dt = x$dt,
nt = ceiling(x$t.total * 60 / x$dt),
# Total number of numerical time steps.
rain = rep(c(x$rain.int, 0),
c(x$rain.stop, x$t.total - x$rain.stop) * 60 / x$dt)
# Rain intensity vector correponding to each
# numerical time step.
)
} )
# What model output to take as forward data?
forward.config <- list(
# Have as many elements as 'runoff.config' does.
list(
discharge = list(
space.idx = prod(mygrid$len),
time.idx = seq(from = 1,
by = round(600 / rain.config[[1]]$dt),
# One measurement per 10 minutes.
to = runoff.config[[1]]$nt)
# Time index at which observations are extracted.
)
),
list(
flowdepth = list(
space.idx = prod(mygrid$len),
time.idx = seq(from = 30,
by = round(300 / rain.config[[2]]$dt),
# One measurement per 5 minutes.
to = runoff.config[[2]]$nt)
)
)
)
f.runoff <- function(
x, # roughness field in its natural unit.
grid = mygrid,
runoff.config,
return.config
)
{
z <- vector('list', length(runoff.config))
for (i.config in seq_along(z))
{
zz <- tryCatch(
do.call(AnchoredInversionClient::runoff.1d,
c(list(
dx = grid$by,
nx = grid$len,
h0 = 0, q0 = 0, qt.ub = 0,
rough = x,
s0 = .01,
return.config = return.config[[i.config]]),
runoff.config[[i.config]]
)
),
error = function(...) NULL
)
if (is.null(zz))
z[[i.config]] <- lapply(return.config[[i.config]],
function(x) rep(NA, prod(sapply(x, length))))
else
z[[i.config]] <- zz
}
z
}
f.forward.transform <- function(x, reverse = FALSE)
{
if (reverse)
log.transform(x, reverse = TRUE) + 1e-100
else
log.transform(x + 1e-100)
# Guard against '0' values in 'x'.
}
forward.fun <- function(
x, # 'x' is a list of fields in the _transformed_ unit.
grid,
forward.config,
runoff.config
)
{
x.is.list <- is.list(x)
if (!x.is.list)
x <- list(x)
z.runoff <- lapply(
lapply(x, f.field.transform, rev = TRUE),
f.runoff,
grid = grid,
runoff.config = runoff.config,
return.config = forward.config
)
result <- lapply(z.runoff, function(x)
f.forward.transform(unname(unlist(x))))
if (x.is.list)
result
else
result[[1]]
}
forward.args <- list(
grid = mygrid,
forward.config = forward.config,
runoff.config = runoff.config)
f.forward <- function(x) { do.call(forward.fun, c(list(x), forward.args)) }
forward.data <- f.forward(myfield)
list(
mygrid = mygrid,
myfield = myfield,
f.field.transform = f.field.transform,
f.forward = f.forward,
f.forward.transform = f.forward.transform,
forward.data = forward.data,
linear.data = linear.data
)
}
|
#' @title Class to complete the csv with the preprocessed instance and synsets
#' @description Complete the csv with the preprocessed instance and synsets.
#' @docType class
#' @usage TeeCSVFromSynsetFeatureVectorPipe$new(propertyName = "",
#' alwaysBeforeDeps = list(),
#' notAfterDeps = list())
#' @param propertyName (character) Name of the property associated with the pipe.
#' @param alwaysBeforeDeps (list) The dependences alwaysBefore (pipes that must
#' be executed before this one).
#' @param notAfterDeps (list) The dependences notAfter (pipes that cannot be
#' executed after this one).
#' @details It is necessary to identify the properties associated with the
#' synsets that the instance will have, so as not to include them in the
#' data.frame.
#'
#' @section Inherit:
#' This class inherit from \code{\link{PipeGeneric}} and implements the
#' \code{pipe} abstract function.
#' @section Methods:
#' \itemize{
#' \item{\bold{pipe}}{
#' Function that complete the csv with the preprocessed instance and synsets.
#' \itemize{
#' \item{\emph{Usage}}{
#'
#' \code{pipe(instance, withData = TRUE, withSource = TRUE,
#' listPropertySynsets = c("synsetVector", "synsetFeatureVector"),
#' outPutPath = "dataFrameAllSynsets.csv")}
#' }
#' \item{\emph{Value}}{
#'
#' The instance with the modifications that have occurred in the pipe.
#' }
#' \item{\emph{Arguments}}{
#' \itemize{
#' \item{\strong{instance}}{
#' (Instance) Instance to preproccess.
#' }
#' \item{\strong{withData}}{
#' (logical) Indicate if the data is added to csv.
#' }
#' \item{\strong{withSource}}{
#' (logical) Indicate if the source is added to csv.
#' }
#' \item{\strong{listPropertySynsets}}{
#' (character) vector indicating properties related to synsets.
#' }
#' \item{\strong{outPutPath}}{
#' (character) name of the csv to store synsets and properties of the instance.
#' }
#' }
#' }
#' }
#' }
#' }
#'
#' @seealso \code{\link{PipeGeneric}}, \code{\link{Instance}}
#'
#' @import R6
#' @export TeeCSVFromSynsetFeatureVectorPipe
TeeCSVFromSynsetFeatureVectorPipe <- R6Class(
"TeeCSVFromSynsetFeatureVectorPipe",
inherit = PipeGeneric,
public = list(
initialize = function(propertyName = "",
alwaysBeforeDeps = list(),
notAfterDeps = list()) {
if (!"character" %in% class(propertyName)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][initialize][Error]
Checking the type of the variable: propertyName ",
class(propertyName))
}
if (!"list" %in% class(alwaysBeforeDeps)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][initialize][Error]
Checking the type of the variable: alwaysBeforeDeps ",
class(alwaysBeforeDeps))
}
if (!"list" %in% class(notAfterDeps)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][initialize][Error]
Checking the type of the variable: notAfterDeps ",
class(notAfterDeps))
}
super$initialize(propertyName, alwaysBeforeDeps, notAfterDeps)
},
pipe = function(instance, withData = TRUE, withSource = TRUE,
listPropertySynsets = c("synsetVector", "synsetFeatureVector"),
outPutPath = "dataFrameAllSynsets.csv") {
if (!"Instance" %in% class(instance)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: instance ",
class(instance))
}
if (!"logical" %in% class(withSource)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: withSource ",
class(withSource))
}
if (!"logical" %in% class(withData)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: withData ",
class(withData))
}
if (!"character" %in% class(listPropertySynsets)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: listPropertySynsets ",
class(listPropertySynsets))
}
if (!"character" %in% class(outPutPath)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: outPutPath ",
class(outPutPath))
}
if (!"csv" %in% file_ext(outPutPath)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the extension of the file: outPutPath ",
file_ext(outPutPath))
}
instance$addFlowPipes("TeeCSVFromSynsetFeatureVectorPipe")
if (!instance$checkCompatibility("TeeCSVFromSynsetFeatureVectorPipe", self$getAlwaysBeforeDeps())) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error] Bad compatibility between Pipes.")
}
instance$addBanPipes(unlist(super$getNotAfterDeps()))
if (!instance$isInstanceValid()) {
return(instance)
}
if (file.exists(outPutPath)) {
dataFrameAllSynsets <- read.csv(file = outPutPath, header = TRUE,
sep = ";", dec = ".", fill = FALSE, stringsAsFactors = FALSE)
} else {
dataFrameAllSynsets <- data.frame()
}
pos <- dim(dataFrameAllSynsets)[1] + 1
dataFrameAllSynsets[pos, "path"] <- instance$getPath()
if (withData) {
dataFrameAllSynsets[pos, "data"] <- instance$getData()
}
if (withSource) {
dataFrameAllSynsets[pos, "source"] <- as.character(paste0(unlist(instance$getSource())))
}
dataFrameAllSynsets[pos, "date"] <- instance$getDate()
namesPropertiesList <- as.list(instance$getNamesOfProperties())
names(namesPropertiesList) <- instance$getNamesOfProperties()
for (name in list.remove(namesPropertiesList, listPropertySynsets)) {
dataFrameAllSynsets[pos, name] <-
paste0(unlist(instance$getSpecificProperty(name)), collapse = "|")
}
synsets <- instance$getSpecificProperty("synsetFeatureVector")
synsetFeature <- synsets$getSynsetsFeature()
for (synset in names(synsetFeature)) {
dataFrameAllSynsets[pos, synset] <- synsetFeature[[synset]]
}
write.table(x = dataFrameAllSynsets,
file = outPutPath,
sep = ";",
dec = ".",
quote = T,
col.names = TRUE,
row.names = FALSE,
qmethod = c("double"),
fileEncoding = "UTF-8")
return(instance)
}
)
) | /content-preprocessorinr/pipes/TeeCSVFromSynsetFeatureVectorPipe.R | no_license | miferreiro/content-preprocessorinr | R | false | false | 6,956 | r | #' @title Class to complete the csv with the preprocessed instance and synsets
#' @description Complete the csv with the preprocessed instance and synsets.
#' @docType class
#' @usage TeeCSVFromSynsetFeatureVectorPipe$new(propertyName = "",
#' alwaysBeforeDeps = list(),
#' notAfterDeps = list())
#' @param propertyName (character) Name of the property associated with the pipe.
#' @param alwaysBeforeDeps (list) The dependences alwaysBefore (pipes that must
#' be executed before this one).
#' @param notAfterDeps (list) The dependences notAfter (pipes that cannot be
#' executed after this one).
#' @details It is necessary to identify the properties associated with the
#' synsets that the instance will have, so as not to include them in the
#' data.frame.
#'
#' @section Inherit:
#' This class inherit from \code{\link{PipeGeneric}} and implements the
#' \code{pipe} abstract function.
#' @section Methods:
#' \itemize{
#' \item{\bold{pipe}}{
#' Function that complete the csv with the preprocessed instance and synsets.
#' \itemize{
#' \item{\emph{Usage}}{
#'
#' \code{pipe(instance, withData = TRUE, withSource = TRUE,
#' listPropertySynsets = c("synsetVector", "synsetFeatureVector"),
#' outPutPath = "dataFrameAllSynsets.csv")}
#' }
#' \item{\emph{Value}}{
#'
#' The instance with the modifications that have occurred in the pipe.
#' }
#' \item{\emph{Arguments}}{
#' \itemize{
#' \item{\strong{instance}}{
#' (Instance) Instance to preproccess.
#' }
#' \item{\strong{withData}}{
#' (logical) Indicate if the data is added to csv.
#' }
#' \item{\strong{withSource}}{
#' (logical) Indicate if the source is added to csv.
#' }
#' \item{\strong{listPropertySynsets}}{
#' (character) vector indicating properties related to synsets.
#' }
#' \item{\strong{outPutPath}}{
#' (character) name of the csv to store synsets and properties of the instance.
#' }
#' }
#' }
#' }
#' }
#' }
#'
#' @seealso \code{\link{PipeGeneric}}, \code{\link{Instance}}
#'
#' @import R6
#' @export TeeCSVFromSynsetFeatureVectorPipe
TeeCSVFromSynsetFeatureVectorPipe <- R6Class(
"TeeCSVFromSynsetFeatureVectorPipe",
inherit = PipeGeneric,
public = list(
initialize = function(propertyName = "",
alwaysBeforeDeps = list(),
notAfterDeps = list()) {
if (!"character" %in% class(propertyName)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][initialize][Error]
Checking the type of the variable: propertyName ",
class(propertyName))
}
if (!"list" %in% class(alwaysBeforeDeps)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][initialize][Error]
Checking the type of the variable: alwaysBeforeDeps ",
class(alwaysBeforeDeps))
}
if (!"list" %in% class(notAfterDeps)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][initialize][Error]
Checking the type of the variable: notAfterDeps ",
class(notAfterDeps))
}
super$initialize(propertyName, alwaysBeforeDeps, notAfterDeps)
},
pipe = function(instance, withData = TRUE, withSource = TRUE,
listPropertySynsets = c("synsetVector", "synsetFeatureVector"),
outPutPath = "dataFrameAllSynsets.csv") {
if (!"Instance" %in% class(instance)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: instance ",
class(instance))
}
if (!"logical" %in% class(withSource)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: withSource ",
class(withSource))
}
if (!"logical" %in% class(withData)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: withData ",
class(withData))
}
if (!"character" %in% class(listPropertySynsets)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: listPropertySynsets ",
class(listPropertySynsets))
}
if (!"character" %in% class(outPutPath)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the type of the variable: outPutPath ",
class(outPutPath))
}
if (!"csv" %in% file_ext(outPutPath)) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error]
Checking the extension of the file: outPutPath ",
file_ext(outPutPath))
}
instance$addFlowPipes("TeeCSVFromSynsetFeatureVectorPipe")
if (!instance$checkCompatibility("TeeCSVFromSynsetFeatureVectorPipe", self$getAlwaysBeforeDeps())) {
stop("[TeeCSVFromSynsetFeatureVectorPipe][pipe][Error] Bad compatibility between Pipes.")
}
instance$addBanPipes(unlist(super$getNotAfterDeps()))
if (!instance$isInstanceValid()) {
return(instance)
}
if (file.exists(outPutPath)) {
dataFrameAllSynsets <- read.csv(file = outPutPath, header = TRUE,
sep = ";", dec = ".", fill = FALSE, stringsAsFactors = FALSE)
} else {
dataFrameAllSynsets <- data.frame()
}
pos <- dim(dataFrameAllSynsets)[1] + 1
dataFrameAllSynsets[pos, "path"] <- instance$getPath()
if (withData) {
dataFrameAllSynsets[pos, "data"] <- instance$getData()
}
if (withSource) {
dataFrameAllSynsets[pos, "source"] <- as.character(paste0(unlist(instance$getSource())))
}
dataFrameAllSynsets[pos, "date"] <- instance$getDate()
namesPropertiesList <- as.list(instance$getNamesOfProperties())
names(namesPropertiesList) <- instance$getNamesOfProperties()
for (name in list.remove(namesPropertiesList, listPropertySynsets)) {
dataFrameAllSynsets[pos, name] <-
paste0(unlist(instance$getSpecificProperty(name)), collapse = "|")
}
synsets <- instance$getSpecificProperty("synsetFeatureVector")
synsetFeature <- synsets$getSynsetsFeature()
for (synset in names(synsetFeature)) {
dataFrameAllSynsets[pos, synset] <- synsetFeature[[synset]]
}
write.table(x = dataFrameAllSynsets,
file = outPutPath,
sep = ";",
dec = ".",
quote = T,
col.names = TRUE,
row.names = FALSE,
qmethod = c("double"),
fileEncoding = "UTF-8")
return(instance)
}
)
) |
##' QGIS Algorithm provided by SAGA Raster values to points (randomly) (saga:rastervaluestopointsrandomly)
##'
##' @title QGIS algorithm Raster values to points (randomly)
##'
##' @param GRID `raster` - Grid. Path to a raster layer.
##' @param FREQ `number` - Frequency. A numeric value.
##' @param POINTS `vectorDestination` - Points. Path for new vector layer.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * POINTS - outputVector - Points
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
saga_rastervaluestopointsrandomly <- function(GRID = qgisprocess::qgis_default_value(), FREQ = qgisprocess::qgis_default_value(), POINTS = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("saga:rastervaluestopointsrandomly")
output <- qgisprocess::qgis_run_algorithm("saga:rastervaluestopointsrandomly", `GRID` = GRID, `FREQ` = FREQ, `POINTS` = POINTS,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "POINTS")
}
} | /R/saga_rastervaluestopointsrandomly.R | permissive | VB6Hobbyst7/r_package_qgis | R | false | false | 1,349 | r | ##' QGIS Algorithm provided by SAGA Raster values to points (randomly) (saga:rastervaluestopointsrandomly)
##'
##' @title QGIS algorithm Raster values to points (randomly)
##'
##' @param GRID `raster` - Grid. Path to a raster layer.
##' @param FREQ `number` - Frequency. A numeric value.
##' @param POINTS `vectorDestination` - Points. Path for new vector layer.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * POINTS - outputVector - Points
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
saga_rastervaluestopointsrandomly <- function(GRID = qgisprocess::qgis_default_value(), FREQ = qgisprocess::qgis_default_value(), POINTS = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("saga:rastervaluestopointsrandomly")
output <- qgisprocess::qgis_run_algorithm("saga:rastervaluestopointsrandomly", `GRID` = GRID, `FREQ` = FREQ, `POINTS` = POINTS,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "POINTS")
}
} |
pc.stable.backend = function(x, cluster = NULL, whitelist, blacklist, test,
alpha, B, debug = FALSE) {
nodes = names(x)
nnodes = length(nodes)
mb = structure(vector(length(nodes), mode = "list"), names = nodes)
skeleton = subsets(nodes, 2)
node.pairs =
apply(skeleton, 1, function(x) list(arc = x, max.adjacent = nnodes - 1))
nbr.size = rep(nnodes - 1, nnodes)
# find out which nodes are adjacent.
for (dsep.size in seq(from = 0, to = length(nodes) - 2)) {
# perform the conditional independence tests.
node.pairs[dsep.size <= nbr.size] =
smartSapply(cluster, node.pairs[dsep.size <= nbr.size], pc.heuristic, data = x, alpha = alpha,
B = B, whitelist = whitelist, blacklist = blacklist, test = test,
skeleton = skeleton, dsep.size = dsep.size, debug = debug)
# find out which undirected arcs are still present.
arcs.still.present = lapply(node.pairs, function(x) {
if (x$p.value < alpha)
return(x$arc)
else
return(NULL)
})
# update the skeleton.
skeleton = do.call(rbind, arcs.still.present)
# count how many nodes (at most) are adjacent to each of the endpoints.
nbr.size = sapply(node.pairs, `[[`, "max.adjacent")
# if that number is smaller than the current size of the d-separation set,
# there are no valid conditioning sets to test.
if (all(nbr.size <= dsep.size))
break
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* remaining arcs:\n")
print(arcs.rbind(skeleton, skeleton, reverse2 = TRUE))
}#THEN
}#FOR
# start encoding the bn object.
skeleton = cache.structure(nodes, arcs.rbind(skeleton, skeleton, reverse2 = TRUE))
# attach the d-separating sets.
attr(skeleton, "dsep.set") = node.pairs
return(skeleton)
}#PC.STABLE.OPTIMIZED
pc.heuristic = function(pair, data, alpha, B, whitelist, blacklist,
test, skeleton, dsep.size, debug = FALSE) {
arc = pair$arc
# check whether the arc is blacklisted in both directions (so that we do not
# include it) or whitelisted in at least one direction (so that we include it).
if (is.whitelisted(whitelist, arc, either = TRUE))
return(list(arc = arc, p.value = 0, dsep.set = NULL, max.adjacent = 0))
else if (is.blacklisted(blacklist, arc, both = TRUE))
return(list(arc = arc, p.value = 1, dsep.set = NULL, max.adjacent = 0))
# check whether the nodes are already d-separated.
if (!is.null(pair$dsep.set))
return(pair)
# only nodes that are adjacent to the arc endpoints are investigated in the
# search for a d-separating set.
nbr1 = union(skeleton[skeleton[, 2] == arc[1], 1], skeleton[skeleton[, 1] == arc[1], 2])
nbr1 = setdiff(nbr1, arc[2])
nbr2 = union(skeleton[skeleton[, 2] == arc[2], 1], skeleton[skeleton[, 1] == arc[2], 2])
nbr2 = setdiff(nbr2, arc[1])
# not enough nodes to form a d-separating set of the given size.
if ((length(nbr1) < dsep.size) && (length(nbr2) < dsep.size))
return(list(arc = arc, p.value = pair$p.value, dsep.set = pair$dsep.set,
max.adjacent = 0))
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* investigating", arc[1], "-", arc[2] ,
", d-separating sets of size", dsep.size, ".\n")
cat(" > neighbours of", arc[1], ":", nbr1, "\n")
}#THEN
if (length(nbr1) >= dsep.size) {
a1 = allsubs.test(x = arc[1], y = arc[2], sx = nbr1, min = dsep.size,
max = dsep.size, data = data, test = test, alpha = alpha, B = B,
debug = debug, epi = 0L)
if (a1["p.value"] > alpha)
return(list(arc = arc, p.value = a1["p.value"],
dsep.set = attr(a1, "dsep.set"), max.adjacent = 0))
}#THEN
if (debug)
cat(" > neighbours of", arc[2], ":", nbr2, "\n")
# there are cases in which checking the neighbours of the second endpoint
# is redundant:
# * if d-separating set is the empty set, because marginal tests are
# symmetric;
# * if the d-separating sets are of size 1 (just single nodes), then it is
# trivial not test them again when the same node is adjacent to both
# endpoints;
# * if nbr1 and nbr2 are identical, and thus produce the same set of tests.
if (dsep.size == 1)
nbr2 = setdiff(nbr2, nbr1)
if ((length(nbr2) >= dsep.size) && (dsep.size > 0) && !setequal(nbr1, nbr2)) {
a2 = allsubs.test(x = arc[2], y = arc[1], sx = nbr2, min = dsep.size,
max = dsep.size, data = data, test = test, alpha = alpha, B = B,
debug = debug, epi = 0L)
if (a2["p.value"] > alpha)
return(list(arc = arc, p.value = a2["p.value"],
dsep.set = attr(a2, "dsep.set"), max.adjacent = 0))
}#THEN
return(list(arc = arc, p.value = 0, dsep.set = NULL,
max.adjacent = max(length(nbr1), length(nbr2))))
}#PC.HEURISTIC
| /ombFast/R/pcalgo.R | no_license | kanyulongkkk/KtreeBN | R | false | false | 4,880 | r |
pc.stable.backend = function(x, cluster = NULL, whitelist, blacklist, test,
alpha, B, debug = FALSE) {
nodes = names(x)
nnodes = length(nodes)
mb = structure(vector(length(nodes), mode = "list"), names = nodes)
skeleton = subsets(nodes, 2)
node.pairs =
apply(skeleton, 1, function(x) list(arc = x, max.adjacent = nnodes - 1))
nbr.size = rep(nnodes - 1, nnodes)
# find out which nodes are adjacent.
for (dsep.size in seq(from = 0, to = length(nodes) - 2)) {
# perform the conditional independence tests.
node.pairs[dsep.size <= nbr.size] =
smartSapply(cluster, node.pairs[dsep.size <= nbr.size], pc.heuristic, data = x, alpha = alpha,
B = B, whitelist = whitelist, blacklist = blacklist, test = test,
skeleton = skeleton, dsep.size = dsep.size, debug = debug)
# find out which undirected arcs are still present.
arcs.still.present = lapply(node.pairs, function(x) {
if (x$p.value < alpha)
return(x$arc)
else
return(NULL)
})
# update the skeleton.
skeleton = do.call(rbind, arcs.still.present)
# count how many nodes (at most) are adjacent to each of the endpoints.
nbr.size = sapply(node.pairs, `[[`, "max.adjacent")
# if that number is smaller than the current size of the d-separation set,
# there are no valid conditioning sets to test.
if (all(nbr.size <= dsep.size))
break
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* remaining arcs:\n")
print(arcs.rbind(skeleton, skeleton, reverse2 = TRUE))
}#THEN
}#FOR
# start encoding the bn object.
skeleton = cache.structure(nodes, arcs.rbind(skeleton, skeleton, reverse2 = TRUE))
# attach the d-separating sets.
attr(skeleton, "dsep.set") = node.pairs
return(skeleton)
}#PC.STABLE.OPTIMIZED
pc.heuristic = function(pair, data, alpha, B, whitelist, blacklist,
test, skeleton, dsep.size, debug = FALSE) {
arc = pair$arc
# check whether the arc is blacklisted in both directions (so that we do not
# include it) or whitelisted in at least one direction (so that we include it).
if (is.whitelisted(whitelist, arc, either = TRUE))
return(list(arc = arc, p.value = 0, dsep.set = NULL, max.adjacent = 0))
else if (is.blacklisted(blacklist, arc, both = TRUE))
return(list(arc = arc, p.value = 1, dsep.set = NULL, max.adjacent = 0))
# check whether the nodes are already d-separated.
if (!is.null(pair$dsep.set))
return(pair)
# only nodes that are adjacent to the arc endpoints are investigated in the
# search for a d-separating set.
nbr1 = union(skeleton[skeleton[, 2] == arc[1], 1], skeleton[skeleton[, 1] == arc[1], 2])
nbr1 = setdiff(nbr1, arc[2])
nbr2 = union(skeleton[skeleton[, 2] == arc[2], 1], skeleton[skeleton[, 1] == arc[2], 2])
nbr2 = setdiff(nbr2, arc[1])
# not enough nodes to form a d-separating set of the given size.
if ((length(nbr1) < dsep.size) && (length(nbr2) < dsep.size))
return(list(arc = arc, p.value = pair$p.value, dsep.set = pair$dsep.set,
max.adjacent = 0))
if (debug) {
cat("----------------------------------------------------------------\n")
cat("* investigating", arc[1], "-", arc[2] ,
", d-separating sets of size", dsep.size, ".\n")
cat(" > neighbours of", arc[1], ":", nbr1, "\n")
}#THEN
if (length(nbr1) >= dsep.size) {
a1 = allsubs.test(x = arc[1], y = arc[2], sx = nbr1, min = dsep.size,
max = dsep.size, data = data, test = test, alpha = alpha, B = B,
debug = debug, epi = 0L)
if (a1["p.value"] > alpha)
return(list(arc = arc, p.value = a1["p.value"],
dsep.set = attr(a1, "dsep.set"), max.adjacent = 0))
}#THEN
if (debug)
cat(" > neighbours of", arc[2], ":", nbr2, "\n")
# there are cases in which checking the neighbours of the second endpoint
# is redundant:
# * if d-separating set is the empty set, because marginal tests are
# symmetric;
# * if the d-separating sets are of size 1 (just single nodes), then it is
# trivial not test them again when the same node is adjacent to both
# endpoints;
# * if nbr1 and nbr2 are identical, and thus produce the same set of tests.
if (dsep.size == 1)
nbr2 = setdiff(nbr2, nbr1)
if ((length(nbr2) >= dsep.size) && (dsep.size > 0) && !setequal(nbr1, nbr2)) {
a2 = allsubs.test(x = arc[2], y = arc[1], sx = nbr2, min = dsep.size,
max = dsep.size, data = data, test = test, alpha = alpha, B = B,
debug = debug, epi = 0L)
if (a2["p.value"] > alpha)
return(list(arc = arc, p.value = a2["p.value"],
dsep.set = attr(a2, "dsep.set"), max.adjacent = 0))
}#THEN
return(list(arc = arc, p.value = 0, dsep.set = NULL,
max.adjacent = max(length(nbr1), length(nbr2))))
}#PC.HEURISTIC
|
# Random variables are numeric outcomes resulting from random processes.
# An urn with 2 red beads and 3 blue beads
beads <- rep(c("red", "blue"), times = c(2, 3))
# define random variable x to be 1 if blue, 0 otherwise
x <- ifelse(sample(beads, 1) == "blue", 1, 0)
# demonstrate that the random variable is different every time
ifelse(sample(beads, 1) == "blue", 1, 0)
ifelse(sample(beads, 1) == "blue", 1, 0)
ifelse(sample(beads, 1) == "blue", 1, o)
# A sampling model models the random behavior of a process as the sampling of draws from an urn.
# The probability distribution of a random variable is the probability of the observed value falling in any given interval.
# a CDF: F(a)= Pr(S<=a) to answer questions related to the probability of S being in any interval
# The average of many draws of a random variable is called its expected value.
# The standard deviation of many draws of a random variable is called its standard error.
# Monte Carlo simulation: Chance of casino losing money on roulette
# We build a sampling model for the random variable S
# that represents the casino's total winnings.
# sampling model 1: define urn, then sample
# 18 black, 18 Red and 2 green
color <- rep(c("Black", "Red", "Green"), c(18, 18, 2))
# No of iterations
n <- 1000
# 1 means win, -1 means loss
X <- sample(ifelse(color == "Red", -1, 1), n, replace = TRUE)
# First ten results
X[1:10]
# sampling model 2: define urn inside sample function by noting probabilities
x <- sample(c(-1, 1), n, replace = TRUE, prob = c(18/38, 20/38)) # 1000 independent draws
S <- sum(x) # total winnings = sum of draws
S
# run a Monte Carlo simulation and use the results to estimate the probability of the casino losing money.
n <- 1000 # number of roulette players
B <- 10000 # number of Monte Carlo experiments
S <- replicate(B, {
X <- sample(c(-1,1), n, replace = TRUE, prob = c(9/19, 10/19)) # simulate 1000 spins
sum(X) # determine total profit
})
mean(S < 0) # probability of the casino losing money
# We can plot a histogram of the observed values of S as well as the normal density curve based on the mean and standard deviation of S.
library(dplyr)
library(ggplot2)
s <- seq(min(S), max(S), length = 100) # sequence of 100 values across range of S
normal_density <- data.frame(s = s, f = dnorm(s, mean(S), sd(S))) # generate normal density for S
data.frame (S = S) %>% # make data frame of S for histogram
ggplot(aes(S, ..density..)) + # ..desnity.. is computed variable
geom_histogram(color = "black", binwidth = 10) +
ylab("Probability") +
geom_line(data = normal_density, mapping = aes(s, f), color = "blue")
| /Probability/Sample models.R | no_license | yogeshwar1996/BasicsOfDataScience | R | false | false | 2,667 | r | # Random variables are numeric outcomes resulting from random processes.
# An urn with 2 red beads and 3 blue beads
beads <- rep(c("red", "blue"), times = c(2, 3))
# define random variable x to be 1 if blue, 0 otherwise
x <- ifelse(sample(beads, 1) == "blue", 1, 0)
# demonstrate that the random variable is different every time
ifelse(sample(beads, 1) == "blue", 1, 0)
ifelse(sample(beads, 1) == "blue", 1, 0)
ifelse(sample(beads, 1) == "blue", 1, o)
# A sampling model models the random behavior of a process as the sampling of draws from an urn.
# The probability distribution of a random variable is the probability of the observed value falling in any given interval.
# a CDF: F(a)= Pr(S<=a) to answer questions related to the probability of S being in any interval
# The average of many draws of a random variable is called its expected value.
# The standard deviation of many draws of a random variable is called its standard error.
# Monte Carlo simulation: Chance of casino losing money on roulette
# We build a sampling model for the random variable S
# that represents the casino's total winnings.
# sampling model 1: define urn, then sample
# 18 black, 18 Red and 2 green
color <- rep(c("Black", "Red", "Green"), c(18, 18, 2))
# No of iterations
n <- 1000
# 1 means win, -1 means loss
X <- sample(ifelse(color == "Red", -1, 1), n, replace = TRUE)
# First ten results
X[1:10]
# sampling model 2: define urn inside sample function by noting probabilities
x <- sample(c(-1, 1), n, replace = TRUE, prob = c(18/38, 20/38)) # 1000 independent draws
S <- sum(x) # total winnings = sum of draws
S
# run a Monte Carlo simulation and use the results to estimate the probability of the casino losing money.
n <- 1000 # number of roulette players
B <- 10000 # number of Monte Carlo experiments
S <- replicate(B, {
X <- sample(c(-1,1), n, replace = TRUE, prob = c(9/19, 10/19)) # simulate 1000 spins
sum(X) # determine total profit
})
mean(S < 0) # probability of the casino losing money
# We can plot a histogram of the observed values of S as well as the normal density curve based on the mean and standard deviation of S.
library(dplyr)
library(ggplot2)
s <- seq(min(S), max(S), length = 100) # sequence of 100 values across range of S
normal_density <- data.frame(s = s, f = dnorm(s, mean(S), sd(S))) # generate normal density for S
data.frame (S = S) %>% # make data frame of S for histogram
ggplot(aes(S, ..density..)) + # ..desnity.. is computed variable
geom_histogram(color = "black", binwidth = 10) +
ylab("Probability") +
geom_line(data = normal_density, mapping = aes(s, f), color = "blue")
|
# Copyright (c) 2016 Al Warren. All rights reserved.
# Author: Al Warren
#
# This program is a solution to plot 1 of course project 1
# from the Coursera course Exploratory Data Analysis.
#
# Input data is from the “Individual household
# electric power consumption Data Set” of the UC Irvine Machine
# Learning Repository. The data can be downloaded at
# https://d396qusza40orc.cloudfront.net/
# exdata%2Fdata%2Fhousehold_power_consumption.zip
#
# Previous commit 11674880daea81e6c5ffa6f239f9ffc0c7194fa7
#
# Output is a PNG file named plot1.png.
#
# Packages required: data.table, lubridate
#
# See README.md for additional details
suppressMessages(library(data.table))
suppressMessages(library(lubridate))
# Read the file. Set all columns to character to avoid coersion
DT <- fread("household_power_consumption.txt", sep = ";", na.strings="?")
# Filter only the dates we need and remove NA values
DT <- na.omit(DT[Date %in% c("1/2/2007", "2/2/2007"), .(Global_active_power)])
#Draw the plot
png(filename = "plot1.png", width = 480, height = 480, type = "cairo-png")
hist(DT$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", col="red")
dev.off()
| /plot1.R | no_license | alwarren/ExData_Plotting1 | R | false | false | 1,210 | r | # Copyright (c) 2016 Al Warren. All rights reserved.
# Author: Al Warren
#
# This program is a solution to plot 1 of course project 1
# from the Coursera course Exploratory Data Analysis.
#
# Input data is from the “Individual household
# electric power consumption Data Set” of the UC Irvine Machine
# Learning Repository. The data can be downloaded at
# https://d396qusza40orc.cloudfront.net/
# exdata%2Fdata%2Fhousehold_power_consumption.zip
#
# Previous commit 11674880daea81e6c5ffa6f239f9ffc0c7194fa7
#
# Output is a PNG file named plot1.png.
#
# Packages required: data.table, lubridate
#
# See README.md for additional details
suppressMessages(library(data.table))
suppressMessages(library(lubridate))
# Read the file. Set all columns to character to avoid coersion
DT <- fread("household_power_consumption.txt", sep = ";", na.strings="?")
# Filter only the dates we need and remove NA values
DT <- na.omit(DT[Date %in% c("1/2/2007", "2/2/2007"), .(Global_active_power)])
#Draw the plot
png(filename = "plot1.png", width = 480, height = 480, type = "cairo-png")
hist(DT$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", col="red")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blockseg-class.R
\docType{methods}
\name{predict,blockSeg-method}
\alias{predict,blockSeg-method}
\title{Predict method for a \code{blockSeg} object}
\usage{
\S4method{predict}{blockSeg}(object, Y, lambda = NULL)
}
\arguments{
\item{object}{an object of class \code{blockSeg}.}
\item{Y}{matrix of observations.}
\item{lambda}{a numeric vector giving the list of \eqn{\lambda}{lambda} for which to predict.
By default, \code{NULL}. If \code{NULL}, it is set to the \code{lambdalist} slot
of \code{object}. If this slot is empty, \code{lambda} is set to the fusion times detected in
the \code{blockSeg} function.}
}
\description{
Produce a prediction for a vector of \code{lambda} parameter and an array of \code{class}.
}
\examples{
require(blockseg)
n <- 100
K <- 5
mu <- suppressWarnings(matrix(rep(c(1,0),ceiling(K**2/2)), K,K))
Y <- rblockdata(n,mu,sigma=.5)$Y
res <- blockSeg(Y, 100)
predict(res, Y, lambda=slot(res, "Lambda")[1:3])
}
\seealso{
\code{\linkS4class{blockSeg}}.
}
| /man/predict.Rd | no_license | jchiquet/blockseg | R | false | true | 1,063 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blockseg-class.R
\docType{methods}
\name{predict,blockSeg-method}
\alias{predict,blockSeg-method}
\title{Predict method for a \code{blockSeg} object}
\usage{
\S4method{predict}{blockSeg}(object, Y, lambda = NULL)
}
\arguments{
\item{object}{an object of class \code{blockSeg}.}
\item{Y}{matrix of observations.}
\item{lambda}{a numeric vector giving the list of \eqn{\lambda}{lambda} for which to predict.
By default, \code{NULL}. If \code{NULL}, it is set to the \code{lambdalist} slot
of \code{object}. If this slot is empty, \code{lambda} is set to the fusion times detected in
the \code{blockSeg} function.}
}
\description{
Produce a prediction for a vector of \code{lambda} parameter and an array of \code{class}.
}
\examples{
require(blockseg)
n <- 100
K <- 5
mu <- suppressWarnings(matrix(rep(c(1,0),ceiling(K**2/2)), K,K))
Y <- rblockdata(n,mu,sigma=.5)$Y
res <- blockSeg(Y, 100)
predict(res, Y, lambda=slot(res, "Lambda")[1:3])
}
\seealso{
\code{\linkS4class{blockSeg}}.
}
|
get_random_stacked <- function(stacked = "By Column",
n_rows,
n_cols,
matrix_checks = NULL,
Fillers = FALSE,
checks = NULL,
data = NULL,
data_dim_each_block = NULL) {
data_entries <- as.vector(data[,1])
data_entries_no_checks <- data_entries[!(data_entries %in% checks)]
b <- length(data_dim_each_block)
target <- rep(paste0("B", 1:b), times = data_dim_each_block)
if (sum(matrix_checks == 0) != sum(data_dim_each_block)) {
stop("Block dimensions do not fit to the matrix")
}
target <- rep(paste0("B", 1:b), times = data_dim_each_block)
v <- 1
for(j in 1:ncol(matrix_checks)) {
for (i in nrow(matrix_checks):1) {
if (matrix_checks[i,j] == 0){
matrix_checks[i,j] <- target[v]
v <- v + 1
}else{
matrix_checks[i,j] <- matrix_checks[i,j]
v <- v
}
}
}
w_map_letters <- matrix_checks
levels_target <- levels(as.factor(target))
split_entries <- split_vectors(data_entries_no_checks, data_dim_each_block)
z <- 1
for(k in 1:b){
matrix_checks[matrix_checks == levels_target[z]] <- sample(split_entries[[k]])
z <- z + 1
}
treatments_random <- sum(data_entries_no_checks %in% matrix_checks)
len_entries_to_random <- length(data_entries_no_checks)
if (treatments_random == len_entries_to_random) {
matrix_checks_random_entries <- matrix_checks
# print("Randomization was successful. It passed all tests! Great!!")
# print(c(treatments_random, len_entries_to_random))
} else stop("Some entries are missing in the randomization!!")
return(list(rand = matrix_checks_random_entries,
Entries = split_entries,
Lines = data_dim_each_block,
w_map_letters = w_map_letters))
}
| /R/utils_get_random_stacked.R | permissive | DidierMurilloF/FielDHub | R | false | false | 1,963 | r | get_random_stacked <- function(stacked = "By Column",
n_rows,
n_cols,
matrix_checks = NULL,
Fillers = FALSE,
checks = NULL,
data = NULL,
data_dim_each_block = NULL) {
data_entries <- as.vector(data[,1])
data_entries_no_checks <- data_entries[!(data_entries %in% checks)]
b <- length(data_dim_each_block)
target <- rep(paste0("B", 1:b), times = data_dim_each_block)
if (sum(matrix_checks == 0) != sum(data_dim_each_block)) {
stop("Block dimensions do not fit to the matrix")
}
target <- rep(paste0("B", 1:b), times = data_dim_each_block)
v <- 1
for(j in 1:ncol(matrix_checks)) {
for (i in nrow(matrix_checks):1) {
if (matrix_checks[i,j] == 0){
matrix_checks[i,j] <- target[v]
v <- v + 1
}else{
matrix_checks[i,j] <- matrix_checks[i,j]
v <- v
}
}
}
w_map_letters <- matrix_checks
levels_target <- levels(as.factor(target))
split_entries <- split_vectors(data_entries_no_checks, data_dim_each_block)
z <- 1
for(k in 1:b){
matrix_checks[matrix_checks == levels_target[z]] <- sample(split_entries[[k]])
z <- z + 1
}
treatments_random <- sum(data_entries_no_checks %in% matrix_checks)
len_entries_to_random <- length(data_entries_no_checks)
if (treatments_random == len_entries_to_random) {
matrix_checks_random_entries <- matrix_checks
# print("Randomization was successful. It passed all tests! Great!!")
# print(c(treatments_random, len_entries_to_random))
} else stop("Some entries are missing in the randomization!!")
return(list(rand = matrix_checks_random_entries,
Entries = split_entries,
Lines = data_dim_each_block,
w_map_letters = w_map_letters))
}
|
# new plotting functions
library(dplyr)
library(reshape2)
library(ggplot2)
library(grid)
library(gridExtra)
plotEOCYElev <- function(zz, yrs, var, myTitle)
{
zz <- dplyr::filter(zz, Year %in% yrs, Variable == var)
# compute the 10/50/90 and aggregate by start month
zz <- zz %>%
dplyr::group_by(StartMonth, Year, Variable) %>%
dplyr::summarise('50th' = median(Value), '10th' = quantile(Value,.1),
'90th' = quantile(Value,.9))
# reshape in format to easily plot
#zz <- dplyr::select(zz, StartMonth, '10th', '50th', '90th')
zz <- reshape2::melt(zz, value.name = 'Value', measure.vars = c('10th','50th','90th'),
id.vars = c('StartMonth','Year'), variable.name = 'Percentile')
# ploting values
qLt <- c(3,1,2)
names(qLt) <- c('10th','50th','90th')
# plot
gg <- ggplot(zz, aes(Year,Value, color = StartMonth, linetype = Percentile))
gg <- gg + geom_line(size = 1) +
scale_x_continuous(minor_breaks = 1990:3000, breaks = seq(1990,3000,5)) +
theme(panel.grid.minor = element_line(color = 'white', size = .4),
panel.grid.major = element_line(color = 'white', size = .6)) +
labs(y = '[feet]', title = myTitle) +
theme(legend.key.height = unit(2,'line'), legend.key.width = grid::unit(2, 'line')) +
scale_color_discrete(guide = guide_legend(title = 'Start Month')) +
scale_linetype_manual(values = qLt)
}
# annText is text that's added to annotation
# legendTitle
# legLoc is the location of the legend
# nC is number of columns in legend
# annSize is the size of the annotation
plotCritStats <- function(zz, yrs, annText, legendTitle = '', legLoc = 'bottom', nC = 4,
annSize = 3)
{
zz <- dplyr::filter(zz, Year %in% yrs)
# rename the variables to strings
zz$vName <- 'LB Shortage'
zz$vName[zz$Variable == 'meadLt1000'] <- 'Mead < 1,000\'\nin Any Month'
zz$vName[zz$Variable == 'meadLt1020'] <- 'Mead < 1,020\'\nin Any Month'
zz$vName[zz$Variable == 'meadLt1025'] <- 'Mead < 1,025\'\nin Any Month'
zz$vName[zz$Variable == 'powellLt3490'] <- 'Powell < 3,490\'\nin Any Month'
# compute the percent of traces by averaging values
zz <- zz %>% dplyr::group_by(Year,Variable,vName) %>%
dplyr::summarise(Value = mean(Value))
yL <- c(0,100)
gg <- ggplot(zz, aes(Year, Value, color = vName))
gg <- gg + geom_line(size = 1) +
coord_cartesian(ylim = yL) +
scale_x_continuous(minor_breaks = 1990:3000, breaks = seq(1990,3000,1)) +
scale_y_continuous(minor_breaks = seq(yL[1],yL[2],5), breaks = seq(yL[1],yL[2],10)) +
theme(panel.grid.minor = element_line(color = 'white', size = .4),
panel.grid.major = element_line(color = 'white', size = .6)) +
scale_color_discrete(guide = guide_legend(title = legendTitle,ncol = nC)) +
theme(legend.position = legLoc, axis.text.x = element_text(angle = 90,vjust=.5)) +
annotate('text', x = min(yrs), y = 95, label = annText, vjust=0, hjust=0,size = annSize) +
labs(y = 'Percent of Traces [%]')
gg
}
# monthRun will be added to the title
# legLoc is the location of the legend
# nC is number of columns in legend
plotShortageSurplus <- function(zz, yrs, monthRun, legendTitle = '', nC = 2, legLoc = 'bottom')
{
zz <- dplyr::filter(zz, Year %in% yrs)
# compute the chances of shortage/surplus
# averaging accross the traces results in total % of traces
zz <- zz %>%
dplyr::group_by(Year, Variable) %>%
dplyr::summarise(prob = mean(Value)*100)
zz$vName <- 'Shortage of Any Amount'
zz$vName[zz$Variable == 'lbSurplus'] <- 'Surplus of Any Amount'
# plot:
gg <- ggplot(zz, aes(Year, prob, color = vName))
yL <- c(0,100)
myTitle <- paste('Percent of Traces with Lower Basin Surplus or Shortage\nResults from the',
monthRun, 'CRSS Run*')
gg <- gg + geom_line(size = 1) +
coord_cartesian(ylim = yL) +
scale_x_continuous(minor_breaks = 1990:3000, breaks = seq(1990,3000,1)) +
scale_y_continuous(minor_breaks = seq(yL[1],yL[2],5), breaks = seq(yL[1],yL[2],10)) +
theme(panel.grid.minor = element_line(color = 'white', size = .4),
panel.grid.major = element_line(color = 'white', size = .6)) +
scale_color_discrete(guide = guide_legend(title = legendTitle,ncol = nC)) +
theme(legend.position = legLoc) +
labs(x = 'Year', y = '[%]', title = myTitle)
gg
}
plotShortStackedBar <- function(zz, yrs, annText, annSize = 4)
{
zz <- dplyr::filter(zz, Year %in% yrs)
zz <- zz %>%
dplyr::group_by(Year,Variable) %>%
dplyr::summarize(prob = mean(Value)*100)
# rename variables for plotting
zz$VName<- 'Step 1 Shortage'
zz$VName[zz$Variable == 'lbShortageStep2'] <- 'Step 2 Shortage'
zz$VName[zz$Variable == 'lbShortageStep3'] <- 'Step 3 Shortage'
yL <- c(0,100)
gg <- ggplot(zz,aes(Year,prob,fill = VName))
gg <- gg + geom_bar(stat = 'identity') +
coord_cartesian(ylim = yL) +
scale_x_continuous(minor_breaks = 1990:3000, breaks = seq(1990,3000,1)) +
scale_y_continuous(minor_breaks = seq(yL[1],yL[2],5), breaks = seq(yL[1],yL[2],10)) +
theme(panel.grid.minor = element_line(color = 'white', size = .4),
panel.grid.major = element_line(color = 'white', size = .6)) +
scale_fill_discrete(guide = guide_legend(title = '')) +
theme(legend.position = 'bottom') +
labs(x = 'Year', y = '[%]', title = 'Lower Basin Shortages by Tier') +
annotate('text', x = min(zz$Year), y = 95, label = annText, vjust=0, hjust=0,size = annSize)
gg
}
# assumes zz is data already read in and will return one variable for the given yrs
# rownames of zz should be years, and colnames should be variable names
getSingleVarData <- function(zz, yrs, var)
{
rr <- match(yrs, rownames(zz))
cc <- which(colnames(zz) == var)
zz[rr,cc]
}
formatSimpleTable <- function(zz, scenNames, yrs)
{
zzRound <- round(zz,0)
zzRound <- matrix(paste0(zzRound,'%'),nrow = nrow(zz), byrow = F)
# check to see if values are non-zero, but rounded to zero
# if they are, replace with '< 1%'
for(i in 1:nrow(zz)){
for(j in 1:ncol(zz)){
if(zz[i,j] > 0 & zzRound[i,j] == '0%'){
zzRound[i,j] <- '< 1%'
} else if(zz[i,j] < 0 & zzRound[i,j] == '0%'){
zzRound[i,j] <- '< -1%'
}
}
}
rownames(zzRound) <- c(scenNames, 'Difference')
colnames(zzRound) <- yrs
zzRound <- as.data.frame(zzRound)
zzRound
}
# scenNames: names to use for row names
# iFiles: character vector with paths to the two files to use get the data from multiple scenarios
# scenNames and iFiles should be the same length
# yrs to show
# Assumes that there are only two scenarios to process
creat5YrSimpleTable <- function(scenNames, iFiles, yrs)
{
if(length(scenNames) != length(iFiles) | length(scenNames) != 2){
stop(paste0('Invalid number of scenarios passed to create5YrSimpleTable.\n',
'Please ensure scenNames and iFiles have two scenarios each.'))
}
i1 <- read.csv(iFiles[1],row.names = 1)
i2 <- read.csv(iFiles[2],row.names = 1)
cc <- scan(iFiles[1], sep = ',', nlines = 1, what = 'character')
cc2 <- scan(iFiles[2],sep = ',', nlines = 1, what = 'character')
colnames(i1) <- cc[2:length(cc)]
colnames(i2) <- cc2[2:length(cc2)]
shortTable <- rbind(getSingleVarData(i1,yrs,'LB Shortage'),
getSingleVarData(i2,yrs,'LB Shortage'))
shortTable <- rbind(shortTable, shortTable[2,] - shortTable[1,])
pTable <- rbind(getSingleVarData(i1,yrs,'Powell < 3,490\' in Any Month'),
getSingleVarData(i2,yrs,'Powell < 3,490\' in Any Month'))
pTable <- rbind(pTable, pTable[2,] - pTable[1,])
shortTable <- formatSimpleTable(shortTable, scenNames, yrs)
pTable <- formatSimpleTable(pTable, scenNames, paste('WY',yrs))
shortGrob <- gridExtra::tableGrob(shortTable,gpar.coltext = gpar(cex = 1),
gpar.rowtext = gpar(cex = 1), show.hlines = T,
core.just = 'right')
pGrob <- gridExtra::tableGrob(pTable,gpar.coltext = gpar(cex = 1),
gpar.rowtext = gpar(cex = 1), show.hlines = T,
core.just = 'right')
shortLabel <- '% Traces with Lower Basin Shortage'
pLabel <- '% Traces below 3,490\' (power pool) at Powell'
gg <- qplot(1:7,1:7,geom = 'blank') + theme_bw() +
theme(line = element_blank(), text = element_blank()) +
annotation_custom(grob = pGrob, xmin = 0, ymin = 2,xmax = 7, ymax = 6) +
annotation_custom(grob = shortGrob, xmin = 0, ymin = 4,xmax = 6, ymax = 7.2) +
annotate('text', x = 1.5, y = 4.65, label = pLabel, hjust = 0, size = 6, face = 'bold') +
annotate('text', x = 1.5, y = 6.25, label = shortLabel, hjust = 0, size = 6, face = 'bold')
gg
}
| /code/plottingFunctions.R | no_license | jshirey/Process-CRSS-Res | R | false | false | 8,840 | r | # new plotting functions
library(dplyr)
library(reshape2)
library(ggplot2)
library(grid)
library(gridExtra)
plotEOCYElev <- function(zz, yrs, var, myTitle)
{
zz <- dplyr::filter(zz, Year %in% yrs, Variable == var)
# compute the 10/50/90 and aggregate by start month
zz <- zz %>%
dplyr::group_by(StartMonth, Year, Variable) %>%
dplyr::summarise('50th' = median(Value), '10th' = quantile(Value,.1),
'90th' = quantile(Value,.9))
# reshape in format to easily plot
#zz <- dplyr::select(zz, StartMonth, '10th', '50th', '90th')
zz <- reshape2::melt(zz, value.name = 'Value', measure.vars = c('10th','50th','90th'),
id.vars = c('StartMonth','Year'), variable.name = 'Percentile')
# ploting values
qLt <- c(3,1,2)
names(qLt) <- c('10th','50th','90th')
# plot
gg <- ggplot(zz, aes(Year,Value, color = StartMonth, linetype = Percentile))
gg <- gg + geom_line(size = 1) +
scale_x_continuous(minor_breaks = 1990:3000, breaks = seq(1990,3000,5)) +
theme(panel.grid.minor = element_line(color = 'white', size = .4),
panel.grid.major = element_line(color = 'white', size = .6)) +
labs(y = '[feet]', title = myTitle) +
theme(legend.key.height = unit(2,'line'), legend.key.width = grid::unit(2, 'line')) +
scale_color_discrete(guide = guide_legend(title = 'Start Month')) +
scale_linetype_manual(values = qLt)
}
# annText is text that's added to annotation
# legendTitle
# legLoc is the location of the legend
# nC is number of columns in legend
# annSize is the size of the annotation
plotCritStats <- function(zz, yrs, annText, legendTitle = '', legLoc = 'bottom', nC = 4,
annSize = 3)
{
zz <- dplyr::filter(zz, Year %in% yrs)
# rename the variables to strings
zz$vName <- 'LB Shortage'
zz$vName[zz$Variable == 'meadLt1000'] <- 'Mead < 1,000\'\nin Any Month'
zz$vName[zz$Variable == 'meadLt1020'] <- 'Mead < 1,020\'\nin Any Month'
zz$vName[zz$Variable == 'meadLt1025'] <- 'Mead < 1,025\'\nin Any Month'
zz$vName[zz$Variable == 'powellLt3490'] <- 'Powell < 3,490\'\nin Any Month'
# compute the percent of traces by averaging values
zz <- zz %>% dplyr::group_by(Year,Variable,vName) %>%
dplyr::summarise(Value = mean(Value))
yL <- c(0,100)
gg <- ggplot(zz, aes(Year, Value, color = vName))
gg <- gg + geom_line(size = 1) +
coord_cartesian(ylim = yL) +
scale_x_continuous(minor_breaks = 1990:3000, breaks = seq(1990,3000,1)) +
scale_y_continuous(minor_breaks = seq(yL[1],yL[2],5), breaks = seq(yL[1],yL[2],10)) +
theme(panel.grid.minor = element_line(color = 'white', size = .4),
panel.grid.major = element_line(color = 'white', size = .6)) +
scale_color_discrete(guide = guide_legend(title = legendTitle,ncol = nC)) +
theme(legend.position = legLoc, axis.text.x = element_text(angle = 90,vjust=.5)) +
annotate('text', x = min(yrs), y = 95, label = annText, vjust=0, hjust=0,size = annSize) +
labs(y = 'Percent of Traces [%]')
gg
}
# monthRun will be added to the title
# legLoc is the location of the legend
# nC is number of columns in legend
plotShortageSurplus <- function(zz, yrs, monthRun, legendTitle = '', nC = 2, legLoc = 'bottom')
{
zz <- dplyr::filter(zz, Year %in% yrs)
# compute the chances of shortage/surplus
# averaging accross the traces results in total % of traces
zz <- zz %>%
dplyr::group_by(Year, Variable) %>%
dplyr::summarise(prob = mean(Value)*100)
zz$vName <- 'Shortage of Any Amount'
zz$vName[zz$Variable == 'lbSurplus'] <- 'Surplus of Any Amount'
# plot:
gg <- ggplot(zz, aes(Year, prob, color = vName))
yL <- c(0,100)
myTitle <- paste('Percent of Traces with Lower Basin Surplus or Shortage\nResults from the',
monthRun, 'CRSS Run*')
gg <- gg + geom_line(size = 1) +
coord_cartesian(ylim = yL) +
scale_x_continuous(minor_breaks = 1990:3000, breaks = seq(1990,3000,1)) +
scale_y_continuous(minor_breaks = seq(yL[1],yL[2],5), breaks = seq(yL[1],yL[2],10)) +
theme(panel.grid.minor = element_line(color = 'white', size = .4),
panel.grid.major = element_line(color = 'white', size = .6)) +
scale_color_discrete(guide = guide_legend(title = legendTitle,ncol = nC)) +
theme(legend.position = legLoc) +
labs(x = 'Year', y = '[%]', title = myTitle)
gg
}
plotShortStackedBar <- function(zz, yrs, annText, annSize = 4)
{
zz <- dplyr::filter(zz, Year %in% yrs)
zz <- zz %>%
dplyr::group_by(Year,Variable) %>%
dplyr::summarize(prob = mean(Value)*100)
# rename variables for plotting
zz$VName<- 'Step 1 Shortage'
zz$VName[zz$Variable == 'lbShortageStep2'] <- 'Step 2 Shortage'
zz$VName[zz$Variable == 'lbShortageStep3'] <- 'Step 3 Shortage'
yL <- c(0,100)
gg <- ggplot(zz,aes(Year,prob,fill = VName))
gg <- gg + geom_bar(stat = 'identity') +
coord_cartesian(ylim = yL) +
scale_x_continuous(minor_breaks = 1990:3000, breaks = seq(1990,3000,1)) +
scale_y_continuous(minor_breaks = seq(yL[1],yL[2],5), breaks = seq(yL[1],yL[2],10)) +
theme(panel.grid.minor = element_line(color = 'white', size = .4),
panel.grid.major = element_line(color = 'white', size = .6)) +
scale_fill_discrete(guide = guide_legend(title = '')) +
theme(legend.position = 'bottom') +
labs(x = 'Year', y = '[%]', title = 'Lower Basin Shortages by Tier') +
annotate('text', x = min(zz$Year), y = 95, label = annText, vjust=0, hjust=0,size = annSize)
gg
}
# assumes zz is data already read in and will return one variable for the given yrs
# rownames of zz should be years, and colnames should be variable names
getSingleVarData <- function(zz, yrs, var)
{
rr <- match(yrs, rownames(zz))
cc <- which(colnames(zz) == var)
zz[rr,cc]
}
formatSimpleTable <- function(zz, scenNames, yrs)
{
zzRound <- round(zz,0)
zzRound <- matrix(paste0(zzRound,'%'),nrow = nrow(zz), byrow = F)
# check to see if values are non-zero, but rounded to zero
# if they are, replace with '< 1%'
for(i in 1:nrow(zz)){
for(j in 1:ncol(zz)){
if(zz[i,j] > 0 & zzRound[i,j] == '0%'){
zzRound[i,j] <- '< 1%'
} else if(zz[i,j] < 0 & zzRound[i,j] == '0%'){
zzRound[i,j] <- '< -1%'
}
}
}
rownames(zzRound) <- c(scenNames, 'Difference')
colnames(zzRound) <- yrs
zzRound <- as.data.frame(zzRound)
zzRound
}
# scenNames: names to use for row names
# iFiles: character vector with paths to the two files to use get the data from multiple scenarios
# scenNames and iFiles should be the same length
# yrs to show
# Assumes that there are only two scenarios to process
creat5YrSimpleTable <- function(scenNames, iFiles, yrs)
{
if(length(scenNames) != length(iFiles) | length(scenNames) != 2){
stop(paste0('Invalid number of scenarios passed to create5YrSimpleTable.\n',
'Please ensure scenNames and iFiles have two scenarios each.'))
}
i1 <- read.csv(iFiles[1],row.names = 1)
i2 <- read.csv(iFiles[2],row.names = 1)
cc <- scan(iFiles[1], sep = ',', nlines = 1, what = 'character')
cc2 <- scan(iFiles[2],sep = ',', nlines = 1, what = 'character')
colnames(i1) <- cc[2:length(cc)]
colnames(i2) <- cc2[2:length(cc2)]
shortTable <- rbind(getSingleVarData(i1,yrs,'LB Shortage'),
getSingleVarData(i2,yrs,'LB Shortage'))
shortTable <- rbind(shortTable, shortTable[2,] - shortTable[1,])
pTable <- rbind(getSingleVarData(i1,yrs,'Powell < 3,490\' in Any Month'),
getSingleVarData(i2,yrs,'Powell < 3,490\' in Any Month'))
pTable <- rbind(pTable, pTable[2,] - pTable[1,])
shortTable <- formatSimpleTable(shortTable, scenNames, yrs)
pTable <- formatSimpleTable(pTable, scenNames, paste('WY',yrs))
shortGrob <- gridExtra::tableGrob(shortTable,gpar.coltext = gpar(cex = 1),
gpar.rowtext = gpar(cex = 1), show.hlines = T,
core.just = 'right')
pGrob <- gridExtra::tableGrob(pTable,gpar.coltext = gpar(cex = 1),
gpar.rowtext = gpar(cex = 1), show.hlines = T,
core.just = 'right')
shortLabel <- '% Traces with Lower Basin Shortage'
pLabel <- '% Traces below 3,490\' (power pool) at Powell'
gg <- qplot(1:7,1:7,geom = 'blank') + theme_bw() +
theme(line = element_blank(), text = element_blank()) +
annotation_custom(grob = pGrob, xmin = 0, ymin = 2,xmax = 7, ymax = 6) +
annotation_custom(grob = shortGrob, xmin = 0, ymin = 4,xmax = 6, ymax = 7.2) +
annotate('text', x = 1.5, y = 4.65, label = pLabel, hjust = 0, size = 6, face = 'bold') +
annotate('text', x = 1.5, y = 6.25, label = shortLabel, hjust = 0, size = 6, face = 'bold')
gg
}
|
dim(pts)
## extract plot time series
dat <- do.call("rbind", lapply(1:3, function(n) {
plots <- suppressWarnings(shapefile(shp[n]))
plots <- spTransform(plots, CRS = CRS("+init=epsg:4326"))
nms <- sapply(strsplit(plots@data$Name, " "), "[[", 1)
for (i in 1:nrow(plots@data)) {
if (nchar(nms[i]) == 4)
plots@data$Name[i] <- paste0(substr(nms[i], 1, 3), 0, substr(nms[i], 4, 5))
else
plots@data$Name[i] <- nms[i]
}
spy <- as(extent(mdss[[n]]), "SpatialPolygons")
proj4string(spy) <- "+init=epsg:4326"
vld <- sapply(1:nrow(plots@data), function(i) rgeos::gIntersects(plots[i, ], spy))
plots <- plots[vld, ]
pts <- foreach(i = 1:nrow(plots@data), .combine = "cbind") %do% {
ids <- cellFromPolygon(mdss[[n]], plots[i, ], weights = TRUE)[[1]]
pct <- sapply(ids[, 2], function(j) j / sum(ids[, 2]))
## calculate weighted sum
val <- mats[[n]][ids, ]
if (nrow(ids) > 1)
val <- sapply(1:ncol(val), function(j) sum(val[, j] * pct))
return(deseason(val[13:length(val)], cycle.window = 24L))
# return(val[13:length(val)])
}
trd <- apply(pts, 2, FUN = function(x) significantTau(x, p = 1, prewhitening = FALSE))
data.frame(PlotID = plots$Name, Trend = trd,
Simpson = simpson$Simpson[match(plots$Name, simpson$PlotID)],
types[match(plots$Name, types$EP), c("Forest_type", "Management")])
}))
xyplot(Simpson ~ Trend | Forest_type + Management, data = dat,
panel = function(x, y, ...) {
panel.xyplot(x, y, ...)
panel.ablineq(lm(y ~ x), r.squared = TRUE, rotate = TRUE)
})
smi <- read.table("data/17746.txt", header = TRUE)
dat <- merge(dat, smi, by.x = "PlotID", by.y = "EP", all = TRUE)
## extract corresponding simpson index
simpson_all <- read.csv("data/Simpson_15_alllayers.csv",
stringsAsFactors = FALSE, row.names = 1)
names(simpson_all) <- c("PlotID", "Simpson_All")
dat <- merge(dat, simpson_all, by = "PlotID", all = TRUE)
dat <- dat[complete.cases(dat), ]
# mod <- lm(Simpson ~ SMI + I(SMI^2), data = dat, na.action = na.omit)
mod <- lm(Simpson ~ SMI, data = dat, na.action = na.omit)
plot(dat$SMI, dat$Simpson, col = "grey75")
lines(dat$SMI, fitted(mod))
rsd <- residuals(mod)
cor(rsd, dat$Trend)
| /dfg_spp_exploratories/species_diversity/R/trials.R | no_license | environmentalinformatics-marburg/magic | R | false | false | 2,296 | r | dim(pts)
## extract plot time series
dat <- do.call("rbind", lapply(1:3, function(n) {
plots <- suppressWarnings(shapefile(shp[n]))
plots <- spTransform(plots, CRS = CRS("+init=epsg:4326"))
nms <- sapply(strsplit(plots@data$Name, " "), "[[", 1)
for (i in 1:nrow(plots@data)) {
if (nchar(nms[i]) == 4)
plots@data$Name[i] <- paste0(substr(nms[i], 1, 3), 0, substr(nms[i], 4, 5))
else
plots@data$Name[i] <- nms[i]
}
spy <- as(extent(mdss[[n]]), "SpatialPolygons")
proj4string(spy) <- "+init=epsg:4326"
vld <- sapply(1:nrow(plots@data), function(i) rgeos::gIntersects(plots[i, ], spy))
plots <- plots[vld, ]
pts <- foreach(i = 1:nrow(plots@data), .combine = "cbind") %do% {
ids <- cellFromPolygon(mdss[[n]], plots[i, ], weights = TRUE)[[1]]
pct <- sapply(ids[, 2], function(j) j / sum(ids[, 2]))
## calculate weighted sum
val <- mats[[n]][ids, ]
if (nrow(ids) > 1)
val <- sapply(1:ncol(val), function(j) sum(val[, j] * pct))
return(deseason(val[13:length(val)], cycle.window = 24L))
# return(val[13:length(val)])
}
trd <- apply(pts, 2, FUN = function(x) significantTau(x, p = 1, prewhitening = FALSE))
data.frame(PlotID = plots$Name, Trend = trd,
Simpson = simpson$Simpson[match(plots$Name, simpson$PlotID)],
types[match(plots$Name, types$EP), c("Forest_type", "Management")])
}))
xyplot(Simpson ~ Trend | Forest_type + Management, data = dat,
panel = function(x, y, ...) {
panel.xyplot(x, y, ...)
panel.ablineq(lm(y ~ x), r.squared = TRUE, rotate = TRUE)
})
smi <- read.table("data/17746.txt", header = TRUE)
dat <- merge(dat, smi, by.x = "PlotID", by.y = "EP", all = TRUE)
## extract corresponding simpson index
simpson_all <- read.csv("data/Simpson_15_alllayers.csv",
stringsAsFactors = FALSE, row.names = 1)
names(simpson_all) <- c("PlotID", "Simpson_All")
dat <- merge(dat, simpson_all, by = "PlotID", all = TRUE)
dat <- dat[complete.cases(dat), ]
# mod <- lm(Simpson ~ SMI + I(SMI^2), data = dat, na.action = na.omit)
mod <- lm(Simpson ~ SMI, data = dat, na.action = na.omit)
plot(dat$SMI, dat$Simpson, col = "grey75")
lines(dat$SMI, fitted(mod))
rsd <- residuals(mod)
cor(rsd, dat$Trend)
|
source("../global.R")
dbInfo = read.table('../../dbInfo.txt')
#### Define server logic required to summarize and view the selected dataset
shinyServer(function(input, output) {
values <- reactive({
con = dbConnect(MySQL(),dbname=toString(dbInfo[[1]]),user=toString(dbInfo[[2]]),password=toString(dbInfo[[3]]))
query <- paste("SELECT * FROM Treatment")
obstbl <- dbGetQuery(con,query)
if (dim(obstbl)[1]>0)
{
ret <- obstbl
} else {
ret <- NA
}
dbDisconnect(con)
ret
})
output$linktable <- renderDataTable(
{
df <- values()
df
})
})
| /listtreat/server.R | no_license | stranda/unpak-shiny | R | false | false | 745 | r | source("../global.R")
dbInfo = read.table('../../dbInfo.txt')
#### Define server logic required to summarize and view the selected dataset
shinyServer(function(input, output) {
values <- reactive({
con = dbConnect(MySQL(),dbname=toString(dbInfo[[1]]),user=toString(dbInfo[[2]]),password=toString(dbInfo[[3]]))
query <- paste("SELECT * FROM Treatment")
obstbl <- dbGetQuery(con,query)
if (dim(obstbl)[1]>0)
{
ret <- obstbl
} else {
ret <- NA
}
dbDisconnect(con)
ret
})
output$linktable <- renderDataTable(
{
df <- values()
df
})
})
|
source('pamr.from.excel.R')
source('pamr.train.R')
source('pamr.cv.R')
source('pamr.listgenes.R')
source('pamr.plotcv.R')
source('pamr.plotcen.R')
#Read dataset
khan.data <- pamr.from.excel(argv[0], sample.number, sample.labels=TRUE)
lapply(khan.data, function(x) replace(x, is.nan(x), 0))
##Train the classifier
khan.train <- pamr.train(khan.data)
##10-Cross-validate the classifier
khan.results<- pamr.cv(khan.train, khan.data)
pamr.plotcv(khan.results)
pamr.plotcen(khan.train, khan.data, threshold=1.8)
##List the significant genes
x <- pamr.listgenes(khan.train, khan.data, threshold=1.8)
x[0:10,]
| /PAM/PAM_genes.R | no_license | Pierangela00/Classification-and-survival-prediction-in-Diffuse-Large-B-cell-Lymphoma | R | false | false | 609 | r | source('pamr.from.excel.R')
source('pamr.train.R')
source('pamr.cv.R')
source('pamr.listgenes.R')
source('pamr.plotcv.R')
source('pamr.plotcen.R')
#Read dataset
khan.data <- pamr.from.excel(argv[0], sample.number, sample.labels=TRUE)
lapply(khan.data, function(x) replace(x, is.nan(x), 0))
##Train the classifier
khan.train <- pamr.train(khan.data)
##10-Cross-validate the classifier
khan.results<- pamr.cv(khan.train, khan.data)
pamr.plotcv(khan.results)
pamr.plotcen(khan.train, khan.data, threshold=1.8)
##List the significant genes
x <- pamr.listgenes(khan.train, khan.data, threshold=1.8)
x[0:10,]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Get_and_Filter_Regions.R
\name{plotSDstats}
\alias{plotSDstats}
\title{Plot Heatmaps of Region Standard Deviation vs Features}
\usage{
plotSDstats(
regions,
maxQuantile = 1,
bins = 30,
nBreaks = 4,
legend.position = c(1.09, 0.9),
save = TRUE,
file = "SD_Plots.pdf",
width = 8.5,
height = 8.5,
verbose = TRUE
)
}
\arguments{
\item{regions}{A \code{data.frame} output from \code{\link[=getRegions]{getRegions()}} giving the set
of regions and statistics for each region.}
\item{maxQuantile}{A \code{numeric(1)} giving the maximum quantile of each
feature to plot.}
\item{bins}{A \code{numeric(1)} specifying the number of bins for both axes
in each heatmap.}
\item{nBreaks}{A \code{numeric(1)} specifying the number of breaks for both
axes.}
\item{legend.position}{A \code{numeric(2)} specifying the position of the
legend, as x-axis, y-axis. May also be a \code{character(1)}
indicating "none", "left", "right", "bottom", or "top".}
\item{save}{A \code{logical(1)} indicating whether to save the plot.}
\item{file}{A \code{character(1)} giving the file name (.pdf) for the plot.}
\item{width}{A \code{numeric(1)} specifying the width in inches of the saved
plot.}
\item{height}{A \code{numeric(1)} specifying the height in inches of the saved
plot.}
\item{verbose}{A \code{logical(1)} indicating whether messages should be
printed.}
}
\value{
A \code{ggplot} object.
}
\description{
\code{plotSDstats()} takes a set of regions from \code{\link[=getRegions]{getRegions()}}, generates
heatmaps of methylation standard deviation against region features, and saves
it as a pdf. Compared features include number of CpGs, minimum coverage, mean
coverage, and mean methylation.
}
\details{
It's recommended examine these plots before and after filtering to ensure
removal of regions with high variability due to insufficient data. Plots are
heatmaps of 2D bin counts, with the color indicating the number of regions in
that bin on the log10 scale. A \code{ggplot} object is produced and can be
edited outside of this function if desired.
}
\examples{
\dontrun{
# Call Regions
regions <- getRegions(bs, file = "Unfiltered_Regions.txt")
plotRegionStats(regions, maxQuantile = 0.99,
file = "Unfiltered_Region_Plots.pdf")
plotSDstats(regions, maxQuantile = 0.99,
file = "Unfiltered_SD_Plots.pdf")
# Examine Region Totals at Different Cutoffs
regionTotals <- getRegionTotals(regions, file = "Region_Totals.txt")
plotRegionTotals(regionTotals, file = "Region_Totals.pdf")
# Filter Regions
regions <- filterRegions(regions, covMin = 10, methSD = 0.05,
file = "Filtered_Regions.txt")
plotRegionStats(regions, maxQuantile = 0.99,
file = "Filtered_Region_Plots.pdf")
plotSDstats(regions, maxQuantile = 0.99,
file = "Filtered_SD_Plots.pdf")
}
}
\seealso{
\itemize{
\item \code{\link[=getRegions]{getRegions()}} to generate the set of regions.
\item \code{\link[=plotRegionStats]{plotRegionStats()}}, \code{\link[=getRegionTotals]{getRegionTotals()}}, and
\code{\link[=plotRegionTotals]{plotRegionTotals()}} for more help visualizing region
characteristics and setting cutoffs for filtering.
\item \code{\link[=filterRegions]{filterRegions()}} for filtering regions by minimum coverage and
methylation standard deviation.
}
}
| /man/plotSDstats.Rd | permissive | cemordaunt/comethyl | R | false | true | 3,398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Get_and_Filter_Regions.R
\name{plotSDstats}
\alias{plotSDstats}
\title{Plot Heatmaps of Region Standard Deviation vs Features}
\usage{
plotSDstats(
regions,
maxQuantile = 1,
bins = 30,
nBreaks = 4,
legend.position = c(1.09, 0.9),
save = TRUE,
file = "SD_Plots.pdf",
width = 8.5,
height = 8.5,
verbose = TRUE
)
}
\arguments{
\item{regions}{A \code{data.frame} output from \code{\link[=getRegions]{getRegions()}} giving the set
of regions and statistics for each region.}
\item{maxQuantile}{A \code{numeric(1)} giving the maximum quantile of each
feature to plot.}
\item{bins}{A \code{numeric(1)} specifying the number of bins for both axes
in each heatmap.}
\item{nBreaks}{A \code{numeric(1)} specifying the number of breaks for both
axes.}
\item{legend.position}{A \code{numeric(2)} specifying the position of the
legend, as x-axis, y-axis. May also be a \code{character(1)}
indicating "none", "left", "right", "bottom", or "top".}
\item{save}{A \code{logical(1)} indicating whether to save the plot.}
\item{file}{A \code{character(1)} giving the file name (.pdf) for the plot.}
\item{width}{A \code{numeric(1)} specifying the width in inches of the saved
plot.}
\item{height}{A \code{numeric(1)} specifying the height in inches of the saved
plot.}
\item{verbose}{A \code{logical(1)} indicating whether messages should be
printed.}
}
\value{
A \code{ggplot} object.
}
\description{
\code{plotSDstats()} takes a set of regions from \code{\link[=getRegions]{getRegions()}}, generates
heatmaps of methylation standard deviation against region features, and saves
it as a pdf. Compared features include number of CpGs, minimum coverage, mean
coverage, and mean methylation.
}
\details{
It's recommended examine these plots before and after filtering to ensure
removal of regions with high variability due to insufficient data. Plots are
heatmaps of 2D bin counts, with the color indicating the number of regions in
that bin on the log10 scale. A \code{ggplot} object is produced and can be
edited outside of this function if desired.
}
\examples{
\dontrun{
# Call Regions
regions <- getRegions(bs, file = "Unfiltered_Regions.txt")
plotRegionStats(regions, maxQuantile = 0.99,
file = "Unfiltered_Region_Plots.pdf")
plotSDstats(regions, maxQuantile = 0.99,
file = "Unfiltered_SD_Plots.pdf")
# Examine Region Totals at Different Cutoffs
regionTotals <- getRegionTotals(regions, file = "Region_Totals.txt")
plotRegionTotals(regionTotals, file = "Region_Totals.pdf")
# Filter Regions
regions <- filterRegions(regions, covMin = 10, methSD = 0.05,
file = "Filtered_Regions.txt")
plotRegionStats(regions, maxQuantile = 0.99,
file = "Filtered_Region_Plots.pdf")
plotSDstats(regions, maxQuantile = 0.99,
file = "Filtered_SD_Plots.pdf")
}
}
\seealso{
\itemize{
\item \code{\link[=getRegions]{getRegions()}} to generate the set of regions.
\item \code{\link[=plotRegionStats]{plotRegionStats()}}, \code{\link[=getRegionTotals]{getRegionTotals()}}, and
\code{\link[=plotRegionTotals]{plotRegionTotals()}} for more help visualizing region
characteristics and setting cutoffs for filtering.
\item \code{\link[=filterRegions]{filterRegions()}} for filtering regions by minimum coverage and
methylation standard deviation.
}
}
|
library(ape)
testtree <- read.tree("3966_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3966_0_unrooted.txt") | /codeml_files/newick_trees_processed/3966_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("3966_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3966_0_unrooted.txt") |
## The dataset file wasn't uploaded to the course by the time I started this
## so I made my own from Census Data. The zip_income dataset was from
## table S1903 from the Census Factfinder at:
## https://factfinder.census.gov/faces/nav/jsf/pages/index.xhtml
zip_income <- read.csv("homework/zip_income.csv", stringsAsFactors = FALSE)
## Clean up and renaming columns, standards stuff
zip_income <- zip_income[-1, c(2,4)]
colnames(zip_income) <- c("zip", "median_income")
row.names(zip_income) <- 1:nrow(zip_income)
## Bringing in zipcode data
data("zipcode")
## Left joining zip_income and zipcode to get the regions, latitude, longitude, etc.
zip_income <- left_join(zip_income, zipcode, by=c("zip" = "zip"))
## Removing Puerto Rico, Alaska, and Hawaii
zip_income <- zip_income[which((zip_income$state != "PR") & (zip_income$state != "AK") & (zip_income$state != "HI")),]
## Get state names from abbreviaions and making those lower case
zip_income$state <- state.name[match(zip_income$state, state.abb)]
zip_income$state <- tolower(zip_income$state)
## readStates is a function from previous homework
## that creates a data frame from population information
dfStates <- readStates()
dfStates$state <- tolower(dfStates$stateName)
zip_income <- left_join(zip_income, dfStates, by=c("state" = "state"))
## We're getting rid of everything except the July 2011 population data
zip_income <- zip_income[, c(1, 2, 4:6, 11)]
## And renaming some columns
colnames(zip_income) <- c("zip", "median_income", "state", "latitude", "longitude", "population")
simple_dt <- data.table(zip_income)
## This just silences some type coercion errors
options(datatable.optimize = 1)
## The next few steps setup simple_dt to have numeric income data
## zip codes, and state information
## which are used in the maps that follow
simple_dt$median_income <- as.numeric(simple_dt$median_income)
simple_dt <- simple_dt[, lapply(.SD, mean), by=list(state, zip)]
simple_dt <- simple_dt[complete.cases(simple_dt), ]
simple_dt$state_abbr <- state.abb[match(simple_dt$state, tolower(state.name))]
## And then we build out maps the same way we've done in previous
## homework assignments
us <- map_data("state")
attach(simple_dt)
## Map with fill color by Median Income
map.incColor <- ggplot(simple_dt, aes(map_id = state)) +
geom_map(map = us, aes(fill = median_income)) +
expand_limits(x = us$long, y = us$lat) +
coord_map() + ggtitle("State Median Income")
map.incColor
## Map with fil color by Population
map.popColor <- ggplot(simple_dt, aes(map_id = state)) +
geom_map(map = us, aes(fill = population)) +
expand_limits(x = us$long, y = us$lat) +
coord_map() + ggtitle("State Population")
map.popColor
## A map with zip code dots colored by median income
income <- simple_dt$median_income
map.zipIncome <- ggplot(simple_dt, aes(map_id = state)) +
geom_map(map = us) +
geom_point(aes(longitude, latitude, colour = income), size = .01) +
expand_limits(x = us$long, y = us$lat) +
coord_map() + ggtitle("Income by Zip")
map.zipIncome
## The following "zoom" map around New York
## was essentially exactly like the map made in the
## asynch material, but with coloring based on income
ny <- geocode("New York, ny")
nyZoom <- 3
centerx <- ny$lon
centery <- ny$lat
## Setting up the map limits
xlimit <- c(centerx - nyZoom, centerx + nyZoom)
ylimit <- c(centery - nyZoom, centery + nyZoom)
## Making a new smaller data set with only information
## of income from around the New York area
ny_zip_inc <- zip_income
ny_zip_inc <- ny_zip_inc[ny_zip_inc$longitude > xlimit[1], ]
ny_zip_inc <- ny_zip_inc[ny_zip_inc$longitude < xlimit[2], ]
ny_zip_inc <- ny_zip_inc[ny_zip_inc$latitude > ylimit[1], ]
ny_zip_inc <- ny_zip_inc[ny_zip_inc$latitude < ylimit[2], ]
longitude <- ny_zip_inc$longitude
latitude <- ny_zip_inc$latitude
income <- as.numeric(ny_zip_inc$median_income)
## And then the map by New York state income levels
map.zipIncomeNY <- ggplot(ny_zip_inc, aes(map_id = state)) +
geom_map(map = us) +
geom_point(aes(longitude, latitude, colour = income), size = .05) +
scale_color_gradient(low="beige", high="blue") +
expand_limits(x = xlimit, y = ylimit) +
coord_map() + ggtitle("Income by Zip, NY")
map.zipIncomeNY
detach(simple_dt)
| /Homework7.R | no_license | oxenfree/R-Homework | R | false | false | 4,252 | r | ## The dataset file wasn't uploaded to the course by the time I started this
## so I made my own from Census Data. The zip_income dataset was from
## table S1903 from the Census Factfinder at:
## https://factfinder.census.gov/faces/nav/jsf/pages/index.xhtml
zip_income <- read.csv("homework/zip_income.csv", stringsAsFactors = FALSE)
## Clean up and renaming columns, standards stuff
zip_income <- zip_income[-1, c(2,4)]
colnames(zip_income) <- c("zip", "median_income")
row.names(zip_income) <- 1:nrow(zip_income)
## Bringing in zipcode data
data("zipcode")
## Left joining zip_income and zipcode to get the regions, latitude, longitude, etc.
zip_income <- left_join(zip_income, zipcode, by=c("zip" = "zip"))
## Removing Puerto Rico, Alaska, and Hawaii
zip_income <- zip_income[which((zip_income$state != "PR") & (zip_income$state != "AK") & (zip_income$state != "HI")),]
## Get state names from abbreviaions and making those lower case
zip_income$state <- state.name[match(zip_income$state, state.abb)]
zip_income$state <- tolower(zip_income$state)
## readStates is a function from previous homework
## that creates a data frame from population information
dfStates <- readStates()
dfStates$state <- tolower(dfStates$stateName)
zip_income <- left_join(zip_income, dfStates, by=c("state" = "state"))
## We're getting rid of everything except the July 2011 population data
zip_income <- zip_income[, c(1, 2, 4:6, 11)]
## And renaming some columns
colnames(zip_income) <- c("zip", "median_income", "state", "latitude", "longitude", "population")
simple_dt <- data.table(zip_income)
## This just silences some type coercion errors
options(datatable.optimize = 1)
## The next few steps setup simple_dt to have numeric income data
## zip codes, and state information
## which are used in the maps that follow
simple_dt$median_income <- as.numeric(simple_dt$median_income)
simple_dt <- simple_dt[, lapply(.SD, mean), by=list(state, zip)]
simple_dt <- simple_dt[complete.cases(simple_dt), ]
simple_dt$state_abbr <- state.abb[match(simple_dt$state, tolower(state.name))]
## And then we build out maps the same way we've done in previous
## homework assignments
us <- map_data("state")
attach(simple_dt)
## Map with fill color by Median Income
map.incColor <- ggplot(simple_dt, aes(map_id = state)) +
geom_map(map = us, aes(fill = median_income)) +
expand_limits(x = us$long, y = us$lat) +
coord_map() + ggtitle("State Median Income")
map.incColor
## Map with fil color by Population
map.popColor <- ggplot(simple_dt, aes(map_id = state)) +
geom_map(map = us, aes(fill = population)) +
expand_limits(x = us$long, y = us$lat) +
coord_map() + ggtitle("State Population")
map.popColor
## A map with zip code dots colored by median income
income <- simple_dt$median_income
map.zipIncome <- ggplot(simple_dt, aes(map_id = state)) +
geom_map(map = us) +
geom_point(aes(longitude, latitude, colour = income), size = .01) +
expand_limits(x = us$long, y = us$lat) +
coord_map() + ggtitle("Income by Zip")
map.zipIncome
## The following "zoom" map around New York
## was essentially exactly like the map made in the
## asynch material, but with coloring based on income
ny <- geocode("New York, ny")
nyZoom <- 3
centerx <- ny$lon
centery <- ny$lat
## Setting up the map limits
xlimit <- c(centerx - nyZoom, centerx + nyZoom)
ylimit <- c(centery - nyZoom, centery + nyZoom)
## Making a new smaller data set with only information
## of income from around the New York area
ny_zip_inc <- zip_income
ny_zip_inc <- ny_zip_inc[ny_zip_inc$longitude > xlimit[1], ]
ny_zip_inc <- ny_zip_inc[ny_zip_inc$longitude < xlimit[2], ]
ny_zip_inc <- ny_zip_inc[ny_zip_inc$latitude > ylimit[1], ]
ny_zip_inc <- ny_zip_inc[ny_zip_inc$latitude < ylimit[2], ]
longitude <- ny_zip_inc$longitude
latitude <- ny_zip_inc$latitude
income <- as.numeric(ny_zip_inc$median_income)
## And then the map by New York state income levels
map.zipIncomeNY <- ggplot(ny_zip_inc, aes(map_id = state)) +
geom_map(map = us) +
geom_point(aes(longitude, latitude, colour = income), size = .05) +
scale_color_gradient(low="beige", high="blue") +
expand_limits(x = xlimit, y = ylimit) +
coord_map() + ggtitle("Income by Zip, NY")
map.zipIncomeNY
detach(simple_dt)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/billboarder-data.R
\docType{data}
\name{equilibre_mensuel}
\alias{equilibre_mensuel}
\title{Monthly supply / demand balance (january 2007 to june 2017)}
\format{
A data frame with 126 rows and 5 variables:
\describe{
\item{date}{Date}
\item{solde}{Supply/demand balance (in GWh)}
\item{production}{Generation (in GWh)}
\item{pompage}{Pumping for hydraulic generation (in GWh)}
\item{consommation}{Consumption (in GWh)}
}
}
\source{
RTE (\url{https://odre.opendatasoft.com/explore/dataset/equilibre-national-mensuel-prod-conso-brute/})
}
\usage{
equilibre_mensuel
}
\description{
Monthly history of supply/demand balance (GWh) based on gross consumption,
the balance of physical exchanges with foreign countries and offtakes due to pumping.
Last update : 2017-07-27.
}
\keyword{datasets}
| /man/equilibre_mensuel.Rd | permissive | dreamRs/billboarder | R | false | true | 876 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/billboarder-data.R
\docType{data}
\name{equilibre_mensuel}
\alias{equilibre_mensuel}
\title{Monthly supply / demand balance (january 2007 to june 2017)}
\format{
A data frame with 126 rows and 5 variables:
\describe{
\item{date}{Date}
\item{solde}{Supply/demand balance (in GWh)}
\item{production}{Generation (in GWh)}
\item{pompage}{Pumping for hydraulic generation (in GWh)}
\item{consommation}{Consumption (in GWh)}
}
}
\source{
RTE (\url{https://odre.opendatasoft.com/explore/dataset/equilibre-national-mensuel-prod-conso-brute/})
}
\usage{
equilibre_mensuel
}
\description{
Monthly history of supply/demand balance (GWh) based on gross consumption,
the balance of physical exchanges with foreign countries and offtakes due to pumping.
Last update : 2017-07-27.
}
\keyword{datasets}
|
#' @title Calculate number of coital acts per couple
#'
#' @param dat master data object
#' @param at timestep
#' @return 'dat' object with number of coital acts per couple added to 'social_discord_edgelist_df' table
#' @details
#' Takes the data.frame, discord_edgelist_df, returned from social_discord_edgelist_df and calculates the number of acts per partnership for timestep then expands data.frame so each row represents single sex act if couples have more than one act per timestep; eliminates couples with zero acts for timestep.
#' @examples
#' dat <- social_coital_acts(dat,at)
#' @export
social_coital_acts <-function(dat,at)
{
#######################################################
#-- takes the data.frame,discord_edgelist_df, returned from social_discord_edgelist_df
#-- and calculates the number of acts per partnership for timestep
#-- then expands data.frame so each row represents a single sex act if the
#-- couples have more than one act for timestep
#-- eliminates couples with zero acts for timestep
#-- main output: discord_coital_df
# inputs: dat$discord_edgelist_df, dat$pop$disclosure_status, dat$param$act_redux_discl,
# dat$param$mean_sex_acts_day, dat$param$days_per_timestep
# outputs: dat$discord_coital_df
#######################################################
dat$discord_coital_df <- NULL #need this in case sum of acts=0
#(small pop size, low # inf)
#this will cause role_fxn to be skipped
if(is.null(dat$discord_edgelist_df)){return(dat)}
discord_edgelist_df <- dat$discord_edgelist_df
disc_inf_ix <- which(dat$discord_edgelist_df$infected==1)
recipient_id <- discord_edgelist_df$sus_id[ disc_inf_ix]
infector_id <- discord_edgelist_df$inf_id[ disc_inf_ix]
#calculate no acts per day, stop if none
reduction_vec <- rep(1, length(infector_id))
temp_index <- which(dat$pop$diag_status[infector_id]==1 & dat$pop$disclosure_status[infector_id]==1)
#reduce no. of sex acts due to infected partner disclosing status
if(length(temp_index)>0)
reduction_vec[temp_index] <- 1.0 -dat$param$act_redux_discl
#track when sus agent last (knowingly) had sex with hiv+ agent
dat$pop$time_hiv_sex[recipient_id][temp_index] <- at
#track when inf agent last had sex
dat$pop$last_disc_sex[infector_id] <- at
#browser()
if(dat$param$prob_sex_by_age){
recip_age_vec <- dat$pop$age[recipient_id]
inf_age_vec <- dat$pop$age[infector_id]
mean_age_vec <- rowMeans(cbind(recip_age_vec,inf_age_vec))
#these calculations come from john
prob_sex <- pmax ( dat$param$prob_sex_age_19 *
(1 - (mean_age_vec - 19) /
(dat$param$max_age_sex - 19) ), 0 )
mean_no_acts <- prob_sex* reduction_vec
}else{
mean_no_acts <- dat$param$mean_sex_acts_day * reduction_vec
}
no_acts <- rpois(length(infector_id),mean_no_acts)
#need to have 1 act as default to keep nondiscordonant couples in df, otw they'd be removed below
discord_edgelist_df$no_acts <- rep(NA_real_,nrow(discord_edgelist_df))
discord_edgelist_df$no_acts[ dat$discord_edgelist_df$infected==1] <- no_acts
ix_nondisc <- which(dat$discord_edgelist_df$infected==0)
discord_edgelist_df$no_acts[ix_nondisc] <- rpois(length(ix_nondisc),dat$param$mean_sex_acts_day)
if(sum(no_acts)==0){return(dat)}
#for counting number of disc act per agent
acts_by_agent <- table( c( rep( discord_edgelist_df$inf_id[disc_inf_ix],
times = discord_edgelist_df$no_acts[disc_inf_ix]),
rep( discord_edgelist_df$sus_id[ disc_inf_ix],
times = discord_edgelist_df$no_acts[disc_inf_ix]) ))
acts_by_agent_index <- as.numeric(names(acts_by_agent))
#discordonant total acts
dat$pop$total_acts[acts_by_agent_index] <-
( dat$pop$total_acts[acts_by_agent_index]+as.numeric(acts_by_agent) )
#add couple id
discord_edgelist_df$couple_id <- 1:nrow(discord_edgelist_df)
#expand edgelist based on number of acts per pair
#each row of discord_coital_df represents single act
df_index <- rep(1:nrow(discord_edgelist_df) , times= discord_edgelist_df$no_acts)
df_temp <- discord_edgelist_df[df_index,,drop=F]
df_temp <- as.data.frame(df_temp,row.names = NULL)
#list of no acts per couple
aa <- split(df_temp$no_acts,df_temp$couple_id)
#list of "act id" per couple
bb <- lapply(aa,function(x){ bb=length(x);x=1:length(x)})
df_temp$act_id_couple <- unlist(bb)
#final output
dat$discord_coital_df <- df_temp
return(dat)
}
| /R/social_coital_acts.R | no_license | EvoNetHIV/Test_and_Treat | R | false | false | 4,611 | r | #' @title Calculate number of coital acts per couple
#'
#' @param dat master data object
#' @param at timestep
#' @return 'dat' object with number of coital acts per couple added to 'social_discord_edgelist_df' table
#' @details
#' Takes the data.frame, discord_edgelist_df, returned from social_discord_edgelist_df and calculates the number of acts per partnership for timestep then expands data.frame so each row represents single sex act if couples have more than one act per timestep; eliminates couples with zero acts for timestep.
#' @examples
#' dat <- social_coital_acts(dat,at)
#' @export
social_coital_acts <-function(dat,at)
{
#######################################################
#-- takes the data.frame,discord_edgelist_df, returned from social_discord_edgelist_df
#-- and calculates the number of acts per partnership for timestep
#-- then expands data.frame so each row represents a single sex act if the
#-- couples have more than one act for timestep
#-- eliminates couples with zero acts for timestep
#-- main output: discord_coital_df
# inputs: dat$discord_edgelist_df, dat$pop$disclosure_status, dat$param$act_redux_discl,
# dat$param$mean_sex_acts_day, dat$param$days_per_timestep
# outputs: dat$discord_coital_df
#######################################################
dat$discord_coital_df <- NULL #need this in case sum of acts=0
#(small pop size, low # inf)
#this will cause role_fxn to be skipped
if(is.null(dat$discord_edgelist_df)){return(dat)}
discord_edgelist_df <- dat$discord_edgelist_df
disc_inf_ix <- which(dat$discord_edgelist_df$infected==1)
recipient_id <- discord_edgelist_df$sus_id[ disc_inf_ix]
infector_id <- discord_edgelist_df$inf_id[ disc_inf_ix]
#calculate no acts per day, stop if none
reduction_vec <- rep(1, length(infector_id))
temp_index <- which(dat$pop$diag_status[infector_id]==1 & dat$pop$disclosure_status[infector_id]==1)
#reduce no. of sex acts due to infected partner disclosing status
if(length(temp_index)>0)
reduction_vec[temp_index] <- 1.0 -dat$param$act_redux_discl
#track when sus agent last (knowingly) had sex with hiv+ agent
dat$pop$time_hiv_sex[recipient_id][temp_index] <- at
#track when inf agent last had sex
dat$pop$last_disc_sex[infector_id] <- at
#browser()
if(dat$param$prob_sex_by_age){
recip_age_vec <- dat$pop$age[recipient_id]
inf_age_vec <- dat$pop$age[infector_id]
mean_age_vec <- rowMeans(cbind(recip_age_vec,inf_age_vec))
#these calculations come from john
prob_sex <- pmax ( dat$param$prob_sex_age_19 *
(1 - (mean_age_vec - 19) /
(dat$param$max_age_sex - 19) ), 0 )
mean_no_acts <- prob_sex* reduction_vec
}else{
mean_no_acts <- dat$param$mean_sex_acts_day * reduction_vec
}
no_acts <- rpois(length(infector_id),mean_no_acts)
#need to have 1 act as default to keep nondiscordonant couples in df, otw they'd be removed below
discord_edgelist_df$no_acts <- rep(NA_real_,nrow(discord_edgelist_df))
discord_edgelist_df$no_acts[ dat$discord_edgelist_df$infected==1] <- no_acts
ix_nondisc <- which(dat$discord_edgelist_df$infected==0)
discord_edgelist_df$no_acts[ix_nondisc] <- rpois(length(ix_nondisc),dat$param$mean_sex_acts_day)
if(sum(no_acts)==0){return(dat)}
#for counting number of disc act per agent
acts_by_agent <- table( c( rep( discord_edgelist_df$inf_id[disc_inf_ix],
times = discord_edgelist_df$no_acts[disc_inf_ix]),
rep( discord_edgelist_df$sus_id[ disc_inf_ix],
times = discord_edgelist_df$no_acts[disc_inf_ix]) ))
acts_by_agent_index <- as.numeric(names(acts_by_agent))
#discordonant total acts
dat$pop$total_acts[acts_by_agent_index] <-
( dat$pop$total_acts[acts_by_agent_index]+as.numeric(acts_by_agent) )
#add couple id
discord_edgelist_df$couple_id <- 1:nrow(discord_edgelist_df)
#expand edgelist based on number of acts per pair
#each row of discord_coital_df represents single act
df_index <- rep(1:nrow(discord_edgelist_df) , times= discord_edgelist_df$no_acts)
df_temp <- discord_edgelist_df[df_index,,drop=F]
df_temp <- as.data.frame(df_temp,row.names = NULL)
#list of no acts per couple
aa <- split(df_temp$no_acts,df_temp$couple_id)
#list of "act id" per couple
bb <- lapply(aa,function(x){ bb=length(x);x=1:length(x)})
df_temp$act_id_couple <- unlist(bb)
#final output
dat$discord_coital_df <- df_temp
return(dat)
}
|
# chap08_VisualizationAnalysi [고급 시각화]
#####################################
## Chapter08. 고급시각화 분석
#####################################
# - lattice, latticeExtra, ggplot2, ggmap 패키지
##########################################
# 1. lattice 패키지
#########################################
# The Lattice Plotting System
# 격자 형태의 그래픽(Trellis graphic) 생성 패키지
# 다차원 데이터를 사용할 경우, 한 번에 여러개의 plot 생성 가능
# 높은 밀도의 plot를 효과적으로 그려준다.
# lattice 패키지의 주요 함수
# xyplot(), barchart(), dotplot(), cloud(),
# histogram(), densityplot(), coplot()
# (참고) barchart(), dotplot() => 이산형 시각화
###########################################
# available.packages()
# install.packages("lattice")
library(lattice)
install.packages("mlmRev")
library(mlmRev)
data(Chem97)
str(Chem97)
#(참고) 주로 num형 데이터를 시각화함.
###### Chem97 데이터 셋 설명 ##########
# - mlmRev 패키지에서 제공
# - 1997년 영국 2,280개 학교 31,022명을 대상으로
# A레벨(대학시험) 화학점수
# 'data.frame': 31022 obs. of 8 variables:
# score 변수 : A레벨 화학점수(0,2,4,6,8,10)
# gender 변수 : 성별
# gcsescore 변수 : 고등학교 재학중에 치루는 큰 시험
# GCSE : General Certificate of Secondary Education)
# 1.histogram(~x축, dataframe)
histogram(~gcsescore, data=Chem97)
# &&& 1
# gcsescore변수를 대상으로 백분율 적용 히스토그램
# score 변수를 조건으로 지정
table(Chem97$score)
# 0 2 4 6 8 10 >> 숫자형 이지만 일정한 빈도를 가지면 범주형으로 쓸수 있다.
# 3688 3627 4619 5739 6668 6681
histogram(~gcsescore | score, data=Chem97) # score 단위
# &&& 2
histogram(~gcsescore | factor(score), data=Chem97) # score 요인 단위
# &&& 3
# | factor(집단변수) : 집단 수 만큼 격자 생성
# 2.densityplot(~x축 | factor(집단변수), dataframe, groups=변수)
densityplot(~gcsescore | factor(score), data=Chem97,
groups = gender, plot.points=T, auto.key = T)
# &&& 4
# 밀도 점 : plot.points=F
# 범례: auto.key=T
# 성별 단위(그룹화)로 GCSE점수를 밀도로 플로팅
library(dplyr)
# matrix -> data.table 변환
str(VADeaths) # num [1:5, 1:4]
class(VADeaths)
VADeaths %>% head()
dft <- as.data.frame.table(VADeaths)
str(dft) # 'data.frame': 20 obs. of 3 variables:
class(dft) # "data.frame"
dft %>% head()
# 3.barchart(y~x | 집단변수, dataframe, layout)
barchart(Var1 ~ Freq | Var2, data=dft, layout=c(4,1))
# &&& 5
# Var2변수 단위(그룹화)로 x축-Freq, y축-Var1으로 막대차트 플로팅
# layout=c(4,1) : 차트를 4행 1열 격자로 보여줌
barchart(Freq~Var1 | Var2, data=dft, layout=c(2,2))
# &&& 6
# x축 0부터 시작
barchart(Var1 ~ Freq | Var2, data=dft, layout=c(4,1), origin=0)
# &&& 7
# 4.dotplot(y~x | 조건 , dataframe, layout)
dotplot(Var1 ~ Freq | Var2 , dft)
# &&& 8
# Var2변수 단위로 그룹화하여 점을 연결하여 플로팅
dotplot(Var1 ~ Freq, data=dft, groups=Var2, type="o",
auto.key=list(space="right", points=T, lines=T))
# &&& 9
# type="o" : 점 타입 -> 원형에 실선 통과
# auto.key=list(배치위치, 점 추가, 선 추가) : 범례
# 5.xyplot(y축~x축| 조건, dataframe or list)
library(datasets)
str(airquality) # datasets의 airqulity 테이터셋 로드
airquality %>% head() # Ozone Solar.R Wind Temp Month(5~9) Day
# airquality의 Ozone(y),Wind(x) 산점도 플로팅
xyplot(Ozone ~ Wind, data=airquality)
# &&& 10
range(airquality$Ozone,na.rm=T)
# Month(5~9)변수 기준으로 플로팅
xyplot(Ozone ~ Wind | Month, data=airquality) # 2행3컬럼
# &&& 11
# default -> layout=c(3,2)
xyplot(Ozone ~ Wind | Month, data=airquality, layout=c(5,1))
# &&& 12
xyplot(Ozone ~ Wind | factor(Month), data=airquality, layout=c(5,1))
# &&& 13
# 5컬럼으로 플로팅 - 컬럼 제목 : Month
# 해석 : 바람의 세기가 작으면 오존이 높다.
# 해석 : 5월부터 8월까지 오존이 오르다 9월부터 내려감.
############## quakes 데이터셋 설명 #################
# R에서 제공하는 기존 데이터셋
# - 1964년 이후 피지(태평양) 근처에 발생한 지진 사건
#lat:위도,long:경도,depth:깊이(km),mag:리히터규모,stations
####################################################
head(quakes)
str(quakes) # 'data.frame': 1000 obs. of 5 variables:
# 변수(5개) : lat, long, depth, mag, stations
range(quakes$stations) # 10 132
# 지진발생 위치(위도와 경로)
xyplot(lat~long, data=quakes, pch=".")
# &&& 14
# 그래프를 변수에 저장
tplot<-xyplot(lat~long, data=quakes, pch=".")
# 그래프에 제목 추가
tplot2<-update(tplot,
main="1964년 이후 태평양에서 발생한 지진위치")
print(tplot2)
# depth 이산형 변수 리코딩
# 1. depth변수 범위
range(quakes$depth)# depth 범위
# 40 680
# 2. depth변수 리코딩
quakes$depth2[quakes$depth >=40 & quakes$depth <=150] <- 1
quakes$depth2[quakes$depth >=151 & quakes$depth <=250] <- 2
quakes$depth2[quakes$depth >=251 & quakes$depth <=350] <- 3
quakes$depth2[quakes$depth >=351 & quakes$depth <=450] <- 4
quakes$depth2[quakes$depth >=451 & quakes$depth <=550] <- 5
quakes$depth2[quakes$depth >=551 & quakes$depth <=680] <- 6
str(quakes) # depth2 : num
# 리코딩된 수심(depth2)변수을 조건으로 산점도 그래프 그리기
convert <- transform(quakes, depth2=factor(depth2))
str(convert) # depth2 : Factor
# 집단형을 팩터형으로 변환해서 convert라는 데이타셋 생성
xyplot(lat~long | depth2, data=convert)
# &&& 15
# 동일한 패널에 2개의 y축에 값을 표현
# xyplot(y1+y2 ~ x | 조건, data, type, layout)
xyplot(Ozone + Solar.R ~ Wind | factor(Month), data=airquality,
col=c("blue","red"),layout=c(5,1), auto.key=T)
# &&& 16
# 6.coplot()
# a조건 하에서 x에 대한 y 그래프를 그린다.
# 형식) coplot(y ~ x : a, data)
# two variantes of the conditioning plot
# http://dic1224.blog.me/80209537545
# 기본 coplot(y~x | a, data, overlap=0.5, number=6, row=2)
# number : 격자의 수 , 기본=6
# overlap : 겹치는 구간(0.1~0.9:작을 수록 사이 간격이 적게 겹침), 기본=0.5
# row : 패널 행수, 기본=2
coplot(lat~long | depth, data=quakes) # 2행3열, 0.5, 사이간격 6
# &&& 17
coplot(lat~long | depth, data=quakes, overlap=0.1) # 겹치는 구간 : 0.1
# &&& 18
coplot(lat~long | depth, data=quakes, number=5, row=1) # 사이간격 5, 1행 5열
# &&& 19
coplot(lat~long | depth, data=quakes, number=5, row=1, panel=panel.smooth)
# &&& 20
coplot(lat~long | depth, data=quakes, number=5, row=1,
col='blue',bar.bg=c(num='green')) # 패널과 조건 막대 색
# &&& 21
# 7.cloud(z~y*x)
# 3차원(위도, 경도, 깊이) 산점도 그래프 플로팅
cloud(depth ~ lat * long , data=quakes,
zlim=rev(range(quakes$depth)))
# &&& 22
cloud(depth ~ lat * long , data=quakes,
zlim=rev(range(quakes$depth)),
xlab="경도", ylab="위도", zlab="깊이")
# &&& 23
# 테두리 사이즈와 회전 속성을 추가하여 3차원 산점도 그래프 그리기
cloud(depth ~ lat * long , data=quakes,
zlim=rev(range(quakes$depth)),
panel.aspect=0.9,
screen=list(z=45,x=-25),
xlab="경도", ylab="위도", zlab="깊이")
# &&& 24
# depth ~ lat * long : depth(z축), lat(y축) * long(x축)
# zlim=rev(range(quakes$depth)) : z축값 범위 지정
# panel.aspect=0.9 : 테두리 사이즈
# screen=list(z=105,x=-70) : z,x축 회전
# xlab="Longitude", ylab="Latitude", zlab="Depth" : xyz축 이름
###########################################
# 2. ggplot2 패키지
###########################################
# ggplot2 그래픽 패키지
# 기하학적 객체들(점,선,막대 등)에 미적 특성(색상, 모양,크기)를
# 맵핑하여 플로팅한다.
# 그래픽 생성 기능과 통계변환을 포함할 수 있다.
# ggplot2의 기본 함수 qplot()
# geoms(점,선 등) 속성, aes(크기,모양,색상) 속성 사용
# dataframe 데이터셋 이용(변환 필요)
###########################################
#install.packages("ggplot2") # 패키지 설치
library(ggplot2)
data(mpg) # 데이터 셋 가져오기
str(mpg) # map 데이터 셋 구조 보기
head(mpg) # map 데이터 셋 내용 보기
summary(mpg) # 요약 통계량
table(mpg$drv) # 구동방식 빈도수
################ mpg 데이터셋 #################
# ggplot2에서 제공하는 데이터셋
# 'data.frame': 234 obs. of 11 variables:
# 주요 변수 : displ:엔진크기, cyl : 실린더수,
# hwy : 고속도로 주행마일수, cty : 도시 주행마일수,
# drv(구동방식) ->사륜구동(4), 전륜구동(f), 후륜구동(r)
###################################################
# 1. qplot()함수
help(qplot)
# (1) 1개 변수 대상 기본 - x축 기준 도수분포도 (히스토그램)
qplot(hwy, data=mpg)
# &&& 1
# fill 옵션 : hwy 변수를 대상으로 drv변수에 색 채우기
qplot(hwy, data=mpg, fill=drv) # fill 옵션 적용
# &&& 2
# binwidth 옵션 : 도수 폭 지정 속성
qplot(hwy, data=mpg, fill=drv, binwidth=2) # binwidth 옵션 적용
# &&& 3
# facets 옵션 : drv변수 값으로 열단위/행단위 패널 생성
qplot(hwy, data=mpg, fill=drv, facets=.~ drv, binwidth=2) # 열 단위 패널 생성(facets=.~ drv)
# &&& 4
qplot(hwy, data=mpg, fill=drv, facets=drv~., binwidth=2) # 행 단위 패널 생성(facets=drv~.)
# &&& 5
# (2) 2변수 대상 기본 - 속이 꽉찬 점 모양과 점의 크기는 1를 갖는 "산점도" 그래프
qplot(displ, hwy, data=mpg)# mpg 데이터셋의 displ과 hwy변수 이용
# &&& 6
# displ, hwy 변수 대상으로 drv변수값으로 색상 적용 산점도 그래프
qplot(displ, hwy, data=mpg, color=drv)
# &&& 7
# (3) 색상, 크기, 모양 적용
### ggplot2 패키지 제공 데이터 셋
head(mtcars)
str(mtcars) # ggplot2에서 제공하는 데이터 셋
#주요 변수
# mpg(연비), cyl(실린더 수), displ(엔진크기), hp(마력), wt(중량),
# qsec(1/4마일 소요시간), am(변속기:0=오토,1=수동), gear(앞쪽 기어 수), carb(카뷰레터 수)
# num(동일색 농도) vs factor(집단별 색상)
qplot(wt, mpg, data=mtcars, color=carb)
# &&& 8
qplot(wt, mpg, data=mtcars, color=factor(carb)) # 색상 적용
# &&& 9
qplot(wt, mpg, data=mtcars, size=qsec, color=factor(carb)) # 크기 적용
# &&& 10
qplot(wt, mpg, data=mtcars, size=qsec, color=factor(carb), shape=factor(cyl))#모양 적용
# &&& 11
mtcars$qsec
# (4) geom 속성
### ggplot2 패키지 제공 데이터 셋
head(diamonds)
str(diamonds)
# 주요 변수
# price : 다이아몬드 가격($326~$18,823), carat :다이아몬드 무게 (0.2~5.01),
# cut : 컷의 품질(Fair,Good,Very Good, Premium Ideal),
# color : 색상(J:가장나쁨 ~ D:가장좋음),
# clarity : 선명도(I1:가장나쁨, SI1, SI1, VS1, VS2, VVS1, VVS2, IF:가장좋음),
# x: 길이, y : 폭
# geom 속성 : 차트 유형, clarity변수 대상 cut변수로 색 채우기
qplot(clarity, data=diamonds, fill=cut, geom="bar") #geom="bar" : 막대차트
# &&& 12
# clarity 범주형 자료는 막대차트로 시각화 하는게 좋음.
# qplot(wt, mpg, data=mtcars, size=qsec) # geom="point" : 산점도
qplot(wt, mpg, data=mtcars, size=qsec, geom="point")
# &&& 13
# cyl 변수의 요인으로 point 크기 적용, carb 변수의 요인으로 포인트 색 적용
qplot(wt, mpg, data=mtcars, size=factor(cyl), color=factor(carb), geom="point")
# &&& 14
# qsec변수로 포인트 크기 적용, cyl 변수의 요인으로 point 모양 적용
qplot(wt, mpg, data=mtcars, size=qsec, color=factor(carb), shape=factor(cyl), geom="point")
# &&& 15
# geom="line"
qplot(mpg, wt, data=mtcars, color=factor(cyl), geom="line")
# &&& 16
# geom="smooth"
qplot(wt, mpg, data=mtcars, geom=c("point", "smooth"))
# &&& 17
qplot(wt, mpg, data=mtcars, geom=c("point", "line"))
# &&& 18
#######################################################
library(UsingR)
data("galton")
head(galton)
library(ggplot2)
p <- ggplot(data=galton, aes(x=parent, y = child))
p + geom_count() + geom_smooth(method='lm')
##########################################################
# 2. ggplot()함수
# (1) aes(x,y,color) 옵션
# aes(x,y,color) 속성 = aesthetics : 미학
p<-ggplot(diamonds, aes(x=carat, y=price, color=cut))
p+geom_point() # point 추가
# &&& 19
# (2) geom_line() 레이어 추가
p+geom_line() # line 추가
# &&& 20
# (3) geom_point()함수 레이어 추가
p<- ggplot(mtcars, aes(mpg,wt,color=factor(cyl)))
p+geom_point() # point 추가
# &&& 21
# (4) geom_step() 레이어 추가
p+geom_step() # step 추가
# &&& 22
# (5) geom_bar() 레이어 추가
p<- ggplot(diamonds, aes(clarity))
p+geom_bar(aes(fill=cut), position="fill") # bar 추가
# &&& 23
# position="fill" : 밀도 1기준으로한 꽉찬 막대차트
# 3. ggsave()함수
# save image of plot on disk
#geom_point()함수 - 결과 동일
p<-ggplot(diamonds, aes(carat, price, color=cut))
p+geom_point() # point 추가
# &&& 24
ggsave(file="C:/ITWILL/2_Rwork/output/diamond_price.pdf") # 가장 최근 그래프 저장
ggsave(file="C:/ITWILL/2_Rwork/output/diamond_price.jpg", dpi=72)
# &&& 25
# 변수에 저장된 그래프 저장
p<- ggplot(diamonds, aes(clarity))
p<- p+geom_bar(aes(fill=cut), position="fill") # bar 추가
print(p)
# &&& 26
ggsave(file="C:/ITWILL/2_Rwork/output/bar.png", plot=p, width=10, height=5)
# &&& 27
##########################################
# 3. ggmap 패키지
##########################################
#공간시각화
# 공간 시각화는 지도를 기반으로 하기 때문에
# 표현방법 : 레이어 형태로 추가하여 시각화
# 영역 : 데이터에 따른 색상과 크기 표현
##########################################
# 지도 관련 패키지 설치
install.packages("ggmap")
library(ggmap) # get_stamenmap()
library(ggplot2) # geom_point(), geom_text(), ggsave()
ge <- geocode('seoul')
#ge <- geocode('seoul') # 인증 key 필요 >>그래서 인증 키 필요한 함수 제외함.
# 서울 : 위도(left), 경도(bottom) : 126.97797 37.56654 -> google 지도에서 검색
# 서울 중심 좌표 : 위도 중심 좌우(126.8 ~ 127.2), 경도 중심 하상(37.38~37.6)
seoul <- c(left = 126.77, bottom = 37.40,
right = 127.17, top = 37.70)
map <- get_stamenmap(seoul, zoom=12, maptype='terrain')#'toner-2011')
ggmap(map) # maptype : terrain, watercolor
# &&& 28
ggmap(get_stamenmap(seoul, zoom=12, maptype='watercolor'))
# &&& 29
# 대구 중심 남쪽 대륙 지도 좌표 : 35.829355, 128.570088
# 대구 위도와 경도 기준으로 남한 대륙 지도 ( 또는 전주 기준으로 남한지도)
daegu <- c(left = 123.4423013, bottom = 32.8528306,
right = 131.601445, top = 38.8714354)
map2 <- get_stamenmap(daegu, zoom=7, maptype='watercolor')
ggmap(map2)
# &&& 30
#[단계1] dataset 가져오기
pop <- read.csv(file.choose()) # Part-II/population201901.csv
str(pop)
head(pop)
region <- pop$'지역명'
lon <- pop$LON
lat <- pop$LAT
# 문자열 -> 숫자형
library(stringr)
tot_pot <- as.numeric(str_replace_all(pop$'총인구수', ",", ""))
tot_pot
df <- data.frame(region, lon, lat, tot_pot)
df
# [단계2] 지도정보 생성 >> 남한 지도
map <- get_stamenmap(daegu, zoom=7, maptype='watercolor')
# [단계3] 레이어1 : 정적인 지도 시각화
layer1 <- ggmap(map)
# &&& 30
# [단계4] 레이어2 : 각 지역별 포인트 추가
# library(ggplot2) # geom_point(), geom_text(), ggsave()
layer2 <- layer1 + geom_point(data=df, aes(x=lon, y=lat,
color=factor(tot_pot),
size=factor(tot_pot)))
layer2
# &&& 31
#[단계5] 레이어3 : 각 지역별 포인트 옆에 지명 추가
layer3 <- layer2 + geom_text(data = df, aes(x=lon+0.01, y=lat+0.08, label=region),
size=3)
layer3
# &&& 32
#[단계6] 지도 이미지 file 저장
ggsave("pop201901.png", scale = 1, width = 10.24, height = 7.68)
getwd() # "C:/ITWILL/2_Rwork"
# &&& 33
| /itwill/R-script/chap08_VisualizationAnalysi.R | no_license | kimjieun6307/itwill | R | false | false | 16,202 | r | # chap08_VisualizationAnalysi [고급 시각화]
#####################################
## Chapter08. 고급시각화 분석
#####################################
# - lattice, latticeExtra, ggplot2, ggmap 패키지
##########################################
# 1. lattice 패키지
#########################################
# The Lattice Plotting System
# 격자 형태의 그래픽(Trellis graphic) 생성 패키지
# 다차원 데이터를 사용할 경우, 한 번에 여러개의 plot 생성 가능
# 높은 밀도의 plot를 효과적으로 그려준다.
# lattice 패키지의 주요 함수
# xyplot(), barchart(), dotplot(), cloud(),
# histogram(), densityplot(), coplot()
# (참고) barchart(), dotplot() => 이산형 시각화
###########################################
# available.packages()
# install.packages("lattice")
library(lattice)
install.packages("mlmRev")
library(mlmRev)
data(Chem97)
str(Chem97)
#(참고) 주로 num형 데이터를 시각화함.
###### Chem97 데이터 셋 설명 ##########
# - mlmRev 패키지에서 제공
# - 1997년 영국 2,280개 학교 31,022명을 대상으로
# A레벨(대학시험) 화학점수
# 'data.frame': 31022 obs. of 8 variables:
# score 변수 : A레벨 화학점수(0,2,4,6,8,10)
# gender 변수 : 성별
# gcsescore 변수 : 고등학교 재학중에 치루는 큰 시험
# GCSE : General Certificate of Secondary Education)
# 1.histogram(~x축, dataframe)
histogram(~gcsescore, data=Chem97)
# &&& 1
# gcsescore변수를 대상으로 백분율 적용 히스토그램
# score 변수를 조건으로 지정
table(Chem97$score)
# 0 2 4 6 8 10 >> 숫자형 이지만 일정한 빈도를 가지면 범주형으로 쓸수 있다.
# 3688 3627 4619 5739 6668 6681
histogram(~gcsescore | score, data=Chem97) # score 단위
# &&& 2
histogram(~gcsescore | factor(score), data=Chem97) # score 요인 단위
# &&& 3
# | factor(집단변수) : 집단 수 만큼 격자 생성
# 2.densityplot(~x축 | factor(집단변수), dataframe, groups=변수)
densityplot(~gcsescore | factor(score), data=Chem97,
groups = gender, plot.points=T, auto.key = T)
# &&& 4
# 밀도 점 : plot.points=F
# 범례: auto.key=T
# 성별 단위(그룹화)로 GCSE점수를 밀도로 플로팅
library(dplyr)
# matrix -> data.table 변환
str(VADeaths) # num [1:5, 1:4]
class(VADeaths)
VADeaths %>% head()
dft <- as.data.frame.table(VADeaths)
str(dft) # 'data.frame': 20 obs. of 3 variables:
class(dft) # "data.frame"
dft %>% head()
# 3.barchart(y~x | 집단변수, dataframe, layout)
barchart(Var1 ~ Freq | Var2, data=dft, layout=c(4,1))
# &&& 5
# Var2변수 단위(그룹화)로 x축-Freq, y축-Var1으로 막대차트 플로팅
# layout=c(4,1) : 차트를 4행 1열 격자로 보여줌
barchart(Freq~Var1 | Var2, data=dft, layout=c(2,2))
# &&& 6
# x축 0부터 시작
barchart(Var1 ~ Freq | Var2, data=dft, layout=c(4,1), origin=0)
# &&& 7
# 4.dotplot(y~x | 조건 , dataframe, layout)
dotplot(Var1 ~ Freq | Var2 , dft)
# &&& 8
# Var2변수 단위로 그룹화하여 점을 연결하여 플로팅
dotplot(Var1 ~ Freq, data=dft, groups=Var2, type="o",
auto.key=list(space="right", points=T, lines=T))
# &&& 9
# type="o" : 점 타입 -> 원형에 실선 통과
# auto.key=list(배치위치, 점 추가, 선 추가) : 범례
# 5.xyplot(y축~x축| 조건, dataframe or list)
library(datasets)
str(airquality) # datasets의 airqulity 테이터셋 로드
airquality %>% head() # Ozone Solar.R Wind Temp Month(5~9) Day
# airquality의 Ozone(y),Wind(x) 산점도 플로팅
xyplot(Ozone ~ Wind, data=airquality)
# &&& 10
range(airquality$Ozone,na.rm=T)
# Month(5~9)변수 기준으로 플로팅
xyplot(Ozone ~ Wind | Month, data=airquality) # 2행3컬럼
# &&& 11
# default -> layout=c(3,2)
xyplot(Ozone ~ Wind | Month, data=airquality, layout=c(5,1))
# &&& 12
xyplot(Ozone ~ Wind | factor(Month), data=airquality, layout=c(5,1))
# &&& 13
# 5컬럼으로 플로팅 - 컬럼 제목 : Month
# 해석 : 바람의 세기가 작으면 오존이 높다.
# 해석 : 5월부터 8월까지 오존이 오르다 9월부터 내려감.
############## quakes 데이터셋 설명 #################
# R에서 제공하는 기존 데이터셋
# - 1964년 이후 피지(태평양) 근처에 발생한 지진 사건
#lat:위도,long:경도,depth:깊이(km),mag:리히터규모,stations
####################################################
head(quakes)
str(quakes) # 'data.frame': 1000 obs. of 5 variables:
# 변수(5개) : lat, long, depth, mag, stations
range(quakes$stations) # 10 132
# 지진발생 위치(위도와 경로)
xyplot(lat~long, data=quakes, pch=".")
# &&& 14
# 그래프를 변수에 저장
tplot<-xyplot(lat~long, data=quakes, pch=".")
# 그래프에 제목 추가
tplot2<-update(tplot,
main="1964년 이후 태평양에서 발생한 지진위치")
print(tplot2)
# depth 이산형 변수 리코딩
# 1. depth변수 범위
range(quakes$depth)# depth 범위
# 40 680
# 2. depth변수 리코딩
quakes$depth2[quakes$depth >=40 & quakes$depth <=150] <- 1
quakes$depth2[quakes$depth >=151 & quakes$depth <=250] <- 2
quakes$depth2[quakes$depth >=251 & quakes$depth <=350] <- 3
quakes$depth2[quakes$depth >=351 & quakes$depth <=450] <- 4
quakes$depth2[quakes$depth >=451 & quakes$depth <=550] <- 5
quakes$depth2[quakes$depth >=551 & quakes$depth <=680] <- 6
str(quakes) # depth2 : num
# 리코딩된 수심(depth2)변수을 조건으로 산점도 그래프 그리기
convert <- transform(quakes, depth2=factor(depth2))
str(convert) # depth2 : Factor
# 집단형을 팩터형으로 변환해서 convert라는 데이타셋 생성
xyplot(lat~long | depth2, data=convert)
# &&& 15
# 동일한 패널에 2개의 y축에 값을 표현
# xyplot(y1+y2 ~ x | 조건, data, type, layout)
xyplot(Ozone + Solar.R ~ Wind | factor(Month), data=airquality,
col=c("blue","red"),layout=c(5,1), auto.key=T)
# &&& 16
# 6.coplot()
# a조건 하에서 x에 대한 y 그래프를 그린다.
# 형식) coplot(y ~ x : a, data)
# two variantes of the conditioning plot
# http://dic1224.blog.me/80209537545
# 기본 coplot(y~x | a, data, overlap=0.5, number=6, row=2)
# number : 격자의 수 , 기본=6
# overlap : 겹치는 구간(0.1~0.9:작을 수록 사이 간격이 적게 겹침), 기본=0.5
# row : 패널 행수, 기본=2
coplot(lat~long | depth, data=quakes) # 2행3열, 0.5, 사이간격 6
# &&& 17
coplot(lat~long | depth, data=quakes, overlap=0.1) # 겹치는 구간 : 0.1
# &&& 18
coplot(lat~long | depth, data=quakes, number=5, row=1) # 사이간격 5, 1행 5열
# &&& 19
coplot(lat~long | depth, data=quakes, number=5, row=1, panel=panel.smooth)
# &&& 20
coplot(lat~long | depth, data=quakes, number=5, row=1,
col='blue',bar.bg=c(num='green')) # 패널과 조건 막대 색
# &&& 21
# 7.cloud(z~y*x)
# 3차원(위도, 경도, 깊이) 산점도 그래프 플로팅
cloud(depth ~ lat * long , data=quakes,
zlim=rev(range(quakes$depth)))
# &&& 22
cloud(depth ~ lat * long , data=quakes,
zlim=rev(range(quakes$depth)),
xlab="경도", ylab="위도", zlab="깊이")
# &&& 23
# 테두리 사이즈와 회전 속성을 추가하여 3차원 산점도 그래프 그리기
cloud(depth ~ lat * long , data=quakes,
zlim=rev(range(quakes$depth)),
panel.aspect=0.9,
screen=list(z=45,x=-25),
xlab="경도", ylab="위도", zlab="깊이")
# &&& 24
# depth ~ lat * long : depth(z축), lat(y축) * long(x축)
# zlim=rev(range(quakes$depth)) : z축값 범위 지정
# panel.aspect=0.9 : 테두리 사이즈
# screen=list(z=105,x=-70) : z,x축 회전
# xlab="Longitude", ylab="Latitude", zlab="Depth" : xyz축 이름
###########################################
# 2. ggplot2 패키지
###########################################
# ggplot2 그래픽 패키지
# 기하학적 객체들(점,선,막대 등)에 미적 특성(색상, 모양,크기)를
# 맵핑하여 플로팅한다.
# 그래픽 생성 기능과 통계변환을 포함할 수 있다.
# ggplot2의 기본 함수 qplot()
# geoms(점,선 등) 속성, aes(크기,모양,색상) 속성 사용
# dataframe 데이터셋 이용(변환 필요)
###########################################
#install.packages("ggplot2") # 패키지 설치
library(ggplot2)
data(mpg) # 데이터 셋 가져오기
str(mpg) # map 데이터 셋 구조 보기
head(mpg) # map 데이터 셋 내용 보기
summary(mpg) # 요약 통계량
table(mpg$drv) # 구동방식 빈도수
################ mpg 데이터셋 #################
# ggplot2에서 제공하는 데이터셋
# 'data.frame': 234 obs. of 11 variables:
# 주요 변수 : displ:엔진크기, cyl : 실린더수,
# hwy : 고속도로 주행마일수, cty : 도시 주행마일수,
# drv(구동방식) ->사륜구동(4), 전륜구동(f), 후륜구동(r)
###################################################
# 1. qplot()함수
help(qplot)
# (1) 1개 변수 대상 기본 - x축 기준 도수분포도 (히스토그램)
qplot(hwy, data=mpg)
# &&& 1
# fill 옵션 : hwy 변수를 대상으로 drv변수에 색 채우기
qplot(hwy, data=mpg, fill=drv) # fill 옵션 적용
# &&& 2
# binwidth 옵션 : 도수 폭 지정 속성
qplot(hwy, data=mpg, fill=drv, binwidth=2) # binwidth 옵션 적용
# &&& 3
# facets 옵션 : drv변수 값으로 열단위/행단위 패널 생성
qplot(hwy, data=mpg, fill=drv, facets=.~ drv, binwidth=2) # 열 단위 패널 생성(facets=.~ drv)
# &&& 4
qplot(hwy, data=mpg, fill=drv, facets=drv~., binwidth=2) # 행 단위 패널 생성(facets=drv~.)
# &&& 5
# (2) 2변수 대상 기본 - 속이 꽉찬 점 모양과 점의 크기는 1를 갖는 "산점도" 그래프
qplot(displ, hwy, data=mpg)# mpg 데이터셋의 displ과 hwy변수 이용
# &&& 6
# displ, hwy 변수 대상으로 drv변수값으로 색상 적용 산점도 그래프
qplot(displ, hwy, data=mpg, color=drv)
# &&& 7
# (3) 색상, 크기, 모양 적용
### ggplot2 패키지 제공 데이터 셋
head(mtcars)
str(mtcars) # ggplot2에서 제공하는 데이터 셋
#주요 변수
# mpg(연비), cyl(실린더 수), displ(엔진크기), hp(마력), wt(중량),
# qsec(1/4마일 소요시간), am(변속기:0=오토,1=수동), gear(앞쪽 기어 수), carb(카뷰레터 수)
# num(동일색 농도) vs factor(집단별 색상)
qplot(wt, mpg, data=mtcars, color=carb)
# &&& 8
qplot(wt, mpg, data=mtcars, color=factor(carb)) # 색상 적용
# &&& 9
qplot(wt, mpg, data=mtcars, size=qsec, color=factor(carb)) # 크기 적용
# &&& 10
qplot(wt, mpg, data=mtcars, size=qsec, color=factor(carb), shape=factor(cyl))#모양 적용
# &&& 11
mtcars$qsec
# (4) geom 속성
### ggplot2 패키지 제공 데이터 셋
head(diamonds)
str(diamonds)
# 주요 변수
# price : 다이아몬드 가격($326~$18,823), carat :다이아몬드 무게 (0.2~5.01),
# cut : 컷의 품질(Fair,Good,Very Good, Premium Ideal),
# color : 색상(J:가장나쁨 ~ D:가장좋음),
# clarity : 선명도(I1:가장나쁨, SI1, SI1, VS1, VS2, VVS1, VVS2, IF:가장좋음),
# x: 길이, y : 폭
# geom 속성 : 차트 유형, clarity변수 대상 cut변수로 색 채우기
qplot(clarity, data=diamonds, fill=cut, geom="bar") #geom="bar" : 막대차트
# &&& 12
# clarity 범주형 자료는 막대차트로 시각화 하는게 좋음.
# qplot(wt, mpg, data=mtcars, size=qsec) # geom="point" : 산점도
qplot(wt, mpg, data=mtcars, size=qsec, geom="point")
# &&& 13
# cyl 변수의 요인으로 point 크기 적용, carb 변수의 요인으로 포인트 색 적용
qplot(wt, mpg, data=mtcars, size=factor(cyl), color=factor(carb), geom="point")
# &&& 14
# qsec변수로 포인트 크기 적용, cyl 변수의 요인으로 point 모양 적용
qplot(wt, mpg, data=mtcars, size=qsec, color=factor(carb), shape=factor(cyl), geom="point")
# &&& 15
# geom="line"
qplot(mpg, wt, data=mtcars, color=factor(cyl), geom="line")
# &&& 16
# geom="smooth"
qplot(wt, mpg, data=mtcars, geom=c("point", "smooth"))
# &&& 17
qplot(wt, mpg, data=mtcars, geom=c("point", "line"))
# &&& 18
#######################################################
library(UsingR)
data("galton")
head(galton)
library(ggplot2)
p <- ggplot(data=galton, aes(x=parent, y = child))
p + geom_count() + geom_smooth(method='lm')
##########################################################
# 2. ggplot()함수
# (1) aes(x,y,color) 옵션
# aes(x,y,color) 속성 = aesthetics : 미학
p<-ggplot(diamonds, aes(x=carat, y=price, color=cut))
p+geom_point() # point 추가
# &&& 19
# (2) geom_line() 레이어 추가
p+geom_line() # line 추가
# &&& 20
# (3) geom_point()함수 레이어 추가
p<- ggplot(mtcars, aes(mpg,wt,color=factor(cyl)))
p+geom_point() # point 추가
# &&& 21
# (4) geom_step() 레이어 추가
p+geom_step() # step 추가
# &&& 22
# (5) geom_bar() 레이어 추가
p<- ggplot(diamonds, aes(clarity))
p+geom_bar(aes(fill=cut), position="fill") # bar 추가
# &&& 23
# position="fill" : 밀도 1기준으로한 꽉찬 막대차트
# 3. ggsave()함수
# save image of plot on disk
#geom_point()함수 - 결과 동일
p<-ggplot(diamonds, aes(carat, price, color=cut))
p+geom_point() # point 추가
# &&& 24
ggsave(file="C:/ITWILL/2_Rwork/output/diamond_price.pdf") # 가장 최근 그래프 저장
ggsave(file="C:/ITWILL/2_Rwork/output/diamond_price.jpg", dpi=72)
# &&& 25
# 변수에 저장된 그래프 저장
p<- ggplot(diamonds, aes(clarity))
p<- p+geom_bar(aes(fill=cut), position="fill") # bar 추가
print(p)
# &&& 26
ggsave(file="C:/ITWILL/2_Rwork/output/bar.png", plot=p, width=10, height=5)
# &&& 27
##########################################
# 3. ggmap 패키지
##########################################
#공간시각화
# 공간 시각화는 지도를 기반으로 하기 때문에
# 표현방법 : 레이어 형태로 추가하여 시각화
# 영역 : 데이터에 따른 색상과 크기 표현
##########################################
# 지도 관련 패키지 설치
install.packages("ggmap")
library(ggmap) # get_stamenmap()
library(ggplot2) # geom_point(), geom_text(), ggsave()
ge <- geocode('seoul')
#ge <- geocode('seoul') # 인증 key 필요 >>그래서 인증 키 필요한 함수 제외함.
# 서울 : 위도(left), 경도(bottom) : 126.97797 37.56654 -> google 지도에서 검색
# 서울 중심 좌표 : 위도 중심 좌우(126.8 ~ 127.2), 경도 중심 하상(37.38~37.6)
seoul <- c(left = 126.77, bottom = 37.40,
right = 127.17, top = 37.70)
map <- get_stamenmap(seoul, zoom=12, maptype='terrain')#'toner-2011')
ggmap(map) # maptype : terrain, watercolor
# &&& 28
ggmap(get_stamenmap(seoul, zoom=12, maptype='watercolor'))
# &&& 29
# 대구 중심 남쪽 대륙 지도 좌표 : 35.829355, 128.570088
# 대구 위도와 경도 기준으로 남한 대륙 지도 ( 또는 전주 기준으로 남한지도)
daegu <- c(left = 123.4423013, bottom = 32.8528306,
right = 131.601445, top = 38.8714354)
map2 <- get_stamenmap(daegu, zoom=7, maptype='watercolor')
ggmap(map2)
# &&& 30
#[단계1] dataset 가져오기
pop <- read.csv(file.choose()) # Part-II/population201901.csv
str(pop)
head(pop)
region <- pop$'지역명'
lon <- pop$LON
lat <- pop$LAT
# 문자열 -> 숫자형
library(stringr)
tot_pot <- as.numeric(str_replace_all(pop$'총인구수', ",", ""))
tot_pot
df <- data.frame(region, lon, lat, tot_pot)
df
# [단계2] 지도정보 생성 >> 남한 지도
map <- get_stamenmap(daegu, zoom=7, maptype='watercolor')
# [단계3] 레이어1 : 정적인 지도 시각화
layer1 <- ggmap(map)
# &&& 30
# [단계4] 레이어2 : 각 지역별 포인트 추가
# library(ggplot2) # geom_point(), geom_text(), ggsave()
layer2 <- layer1 + geom_point(data=df, aes(x=lon, y=lat,
color=factor(tot_pot),
size=factor(tot_pot)))
layer2
# &&& 31
#[단계5] 레이어3 : 각 지역별 포인트 옆에 지명 추가
layer3 <- layer2 + geom_text(data = df, aes(x=lon+0.01, y=lat+0.08, label=region),
size=3)
layer3
# &&& 32
#[단계6] 지도 이미지 file 저장
ggsave("pop201901.png", scale = 1, width = 10.24, height = 7.68)
getwd() # "C:/ITWILL/2_Rwork"
# &&& 33
|
.inlabru_envir <- new.env(parent = emptyenv())
| /R/0_inlabru_envir.R | no_license | cran/inlabru | R | false | false | 47 | r | .inlabru_envir <- new.env(parent = emptyenv())
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
time <- read.table(file = "stdin", header = F)
t <-as.POSIXct(time[1,1], format = "%I:%M:%S%p") #The different ways of representing data stored behind the %
cat(format(t, format = "%H:%M:%S"))
| /Time Conversion [easy].R | no_license | fardeen-ahmed/HackerRank | R | false | false | 264 | r | # Enter your code here. Read input from STDIN. Print output to STDOUT
time <- read.table(file = "stdin", header = F)
t <-as.POSIXct(time[1,1], format = "%I:%M:%S%p") #The different ways of representing data stored behind the %
cat(format(t, format = "%H:%M:%S"))
|
# Download and unzip data file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile="household.zip")
unzip("household.zip")
# Read the data and subset the first two months
household <- read.table("household_power_consumption.txt",sep=";",header=TRUE,stringsAsFactors=FALSE)
household <- household[household$Date=="1/2/2007"|household$Date=="2/2/2007",]
household$DateTime <- paste(household$Date,household$Time)
household$DateTime <- strptime(household$DateTime,"%d/%m/%Y %H:%M:%S")
# Open graphic device
png(file = "plot4.png")
# Make plot
par(mfrow = c(2, 2),mar=c(4,4,1,1))
#axis(side=1,tck=-0.001)
# Sub-plot 1
plot(household$DateTime,household$Global_active_power,type="l",xlab="",
ylab="Global Active Power")
# Sub-plot 2
plot(household$DateTime,household$Voltage,type="l",xlab="datetime",ylab="Voltage")
# Sub-plot 3
plot(household$DateTime, household$Sub_metering_1,type="n",xlab="",
ylab="Energy sub metering")
points(household$DateTime,household$Sub_metering_1,type="l")
points(household$DateTime,household$Sub_metering_2,type="l",col="red")
points(household$DateTime,household$Sub_metering_3,type="l",col="blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#,y.intersp=0.5, cex=0.9,seg.len=1, bty="n")
# Sub-plot 4
plot(household$DateTime,household$Global_reactive_power,type="l",xlab="datetime",ylab="Global_Reactive_Power")
# Close graphic device
dev.off()
| /plot4.R | no_license | azmdatasci/Assignment_plotting1 | R | false | false | 1,532 | r | # Download and unzip data file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile="household.zip")
unzip("household.zip")
# Read the data and subset the first two months
household <- read.table("household_power_consumption.txt",sep=";",header=TRUE,stringsAsFactors=FALSE)
household <- household[household$Date=="1/2/2007"|household$Date=="2/2/2007",]
household$DateTime <- paste(household$Date,household$Time)
household$DateTime <- strptime(household$DateTime,"%d/%m/%Y %H:%M:%S")
# Open graphic device
png(file = "plot4.png")
# Make plot
par(mfrow = c(2, 2),mar=c(4,4,1,1))
#axis(side=1,tck=-0.001)
# Sub-plot 1
plot(household$DateTime,household$Global_active_power,type="l",xlab="",
ylab="Global Active Power")
# Sub-plot 2
plot(household$DateTime,household$Voltage,type="l",xlab="datetime",ylab="Voltage")
# Sub-plot 3
plot(household$DateTime, household$Sub_metering_1,type="n",xlab="",
ylab="Energy sub metering")
points(household$DateTime,household$Sub_metering_1,type="l")
points(household$DateTime,household$Sub_metering_2,type="l",col="red")
points(household$DateTime,household$Sub_metering_3,type="l",col="blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#,y.intersp=0.5, cex=0.9,seg.len=1, bty="n")
# Sub-plot 4
plot(household$DateTime,household$Global_reactive_power,type="l",xlab="datetime",ylab="Global_Reactive_Power")
# Close graphic device
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test-functions.R
\name{f0.rosenbrock2}
\alias{f0.rosenbrock2}
\title{f0.rosenbrock2}
\usage{
f0.rosenbrock2(x1, x2)
}
\arguments{
\item{x1}{Parameter 1}
\item{x2}{Parameter 2}
}
\description{
Two variable Rosenbrock function, where f(1,1) = 0
}
| /man/f0.rosenbrock2.Rd | permissive | antonio-pgarcia/evoper | R | false | true | 324 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test-functions.R
\name{f0.rosenbrock2}
\alias{f0.rosenbrock2}
\title{f0.rosenbrock2}
\usage{
f0.rosenbrock2(x1, x2)
}
\arguments{
\item{x1}{Parameter 1}
\item{x2}{Parameter 2}
}
\description{
Two variable Rosenbrock function, where f(1,1) = 0
}
|
# jo f. with thanks to:
# http://is-r.tumblr.com/post/46821313005/to-plot-them-is-my-real-test
# http://stackoverflow.com/questions/12918367/in-r-how-to-plot-with-a-png-as-background
# http://students.washington.edu/mclarkso/documents/figure%20layout%20Ver1.R
# http://georeferenced.wordpress.com/2013/01/15/rwordcloud/
rm(list=ls())
sapply(c("stringr", "jpeg", "RCurl", "EBImage", "wordcloud", "tm"),library, character.only=TRUE)
allImageURLs <- c("http://upload.wikimedia.org/wikipedia/commons/c/c1/Rlogo.png",
"http://www.memes.at/faces/cereal_guy_squint.jpg",
"http://img2.wikia.nocookie.net/__cb20120912234733/ragecomic/images/9/91/Cereal_Guy_Spitting.jpeg")
imageList <- list()
for(imageURL in allImageURLs) {
print(imageURL)
tempName <- str_extract(imageURL,"([[:alnum:]_-]+)([[:punct:]])([[:alnum:]]+)$")
print(tempName)
tempImage <- readImage(imageURL)
imageList[[tempName]] <- tempImage
}
par(mfrow=c(2,2))
plot(0:10, 0:10, type="n", axes=F, ann=FALSE)
rasterImage(imageList[[1]],1,1,10,10)
box("figure", col="black", lwd=2)
plot(0:10, 0:10, type="n", axes=F, ann=FALSE)
rasterImage(imageList[[2]],1,1,10,10)
box("figure", col="black", lwd=2)
useR <- Corpus (DirSource("./useRdir"))
useR <- tm_map(useR, stripWhitespace)
useR <- tm_map(useR, tolower)
useR <- tm_map(useR, removeWords, stopwords('english'))
wordcloud(useR, scale=c(4,1.25), max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8,'Dark2'))
box("figure", col="black", lwd=2)
plot(0:10, 0:10, type="n", axes=F, ann=FALSE)
rasterImage(imageList[[3]],1,1,10,10)
box("figure", col="black", lwd=2)
| /tshirt_code.R | no_license | calycolor/t-shirt | R | false | false | 1,630 | r | # jo f. with thanks to:
# http://is-r.tumblr.com/post/46821313005/to-plot-them-is-my-real-test
# http://stackoverflow.com/questions/12918367/in-r-how-to-plot-with-a-png-as-background
# http://students.washington.edu/mclarkso/documents/figure%20layout%20Ver1.R
# http://georeferenced.wordpress.com/2013/01/15/rwordcloud/
rm(list=ls())
sapply(c("stringr", "jpeg", "RCurl", "EBImage", "wordcloud", "tm"),library, character.only=TRUE)
allImageURLs <- c("http://upload.wikimedia.org/wikipedia/commons/c/c1/Rlogo.png",
"http://www.memes.at/faces/cereal_guy_squint.jpg",
"http://img2.wikia.nocookie.net/__cb20120912234733/ragecomic/images/9/91/Cereal_Guy_Spitting.jpeg")
imageList <- list()
for(imageURL in allImageURLs) {
print(imageURL)
tempName <- str_extract(imageURL,"([[:alnum:]_-]+)([[:punct:]])([[:alnum:]]+)$")
print(tempName)
tempImage <- readImage(imageURL)
imageList[[tempName]] <- tempImage
}
par(mfrow=c(2,2))
plot(0:10, 0:10, type="n", axes=F, ann=FALSE)
rasterImage(imageList[[1]],1,1,10,10)
box("figure", col="black", lwd=2)
plot(0:10, 0:10, type="n", axes=F, ann=FALSE)
rasterImage(imageList[[2]],1,1,10,10)
box("figure", col="black", lwd=2)
useR <- Corpus (DirSource("./useRdir"))
useR <- tm_map(useR, stripWhitespace)
useR <- tm_map(useR, tolower)
useR <- tm_map(useR, removeWords, stopwords('english'))
wordcloud(useR, scale=c(4,1.25), max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8,'Dark2'))
box("figure", col="black", lwd=2)
plot(0:10, 0:10, type="n", axes=F, ann=FALSE)
rasterImage(imageList[[3]],1,1,10,10)
box("figure", col="black", lwd=2)
|
##
## this is the latest version
library(aqp)
library(soilDB)
library(cluster)
library(sharpshootR)
library(reshape2)
library(maps)
library(colorspace)
library(ragg)
library(ggplot2)
library(geofacet)
library(treemapify)
data("us.state.soils")
data("munsell")
# get KSSL + morph for all state soils
x <- fetchKSSL(series = us.state.soils$series, returnMorphologicData = TRUE, simplifyColors = TRUE)
# extract pieces for simpler code
pedons <- x$SPC
phcolor <- x$phcolor
# normalize taxonname / series
pedons$taxonname <- toupper(pedons$taxonname)
table(pedons$taxonname)
# re-name for join
pedons$series <- pedons$taxonname
pedons$taxonname <- NULL
# remove state derived from GIS intersection
pedons$state <- NULL
# join state / 2-letter code from us.state.soils
us.state.soils$series <- toupper(us.state.soils$series)
site(pedons) <- us.state.soils
previewColors(pedons$moist_soil_color, method='MDS')
# check: looks good
png(file='state-soil-kssl-data-eval-colors.png', width = 800, height=400, type = 'cairo', antialias = 'subpixel', res = 70)
par(mar=c(0.5,0.5,1,0.5))
groupedProfilePlot(pedons[1:40, ], groups = 'abbreviated', color = "moist_soil_color", print.id=FALSE, max.depth=150)
dev.off()
## aggregate moist color by state
# convert soil colors to LAB colorspace
# moist colors
cols.lab <- convertColor(cbind(pedons$m_r, pedons$m_g, pedons$m_b), from='sRGB', to = 'Lab', from.ref.white = 'D65', to.ref.white = 'D65', clip = FALSE)
pedons$m_L <- cols.lab[, 1]
pedons$m_A <- cols.lab[, 2]
pedons$m_B <- cols.lab[, 3]
# aggregate data by normalized taxonname, via slice-wise median
a.colors <- slab(pedons, state ~ m_r + m_g + m_b + m_L + m_A + m_B, slab.fun = median, na.rm=TRUE)
# throw out aggregate data that are deeper than 150cm
a.colors <- subset(a.colors, subset=bottom < 150)
# convert long -> wide format
x.colors <- dcast(a.colors, state + top + bottom ~ variable, value.var = 'value')
# composite sRGB triplets into an R-compatible color
# note that missing colors must be padded with NA
x.colors$soil_color <- NA
not.na <- which(complete.cases(x.colors[, c('m_L', 'm_A', 'm_B')]))
cols.srgb <- data.frame(convertColor(cbind(x.colors$m_L, x.colors$m_A, x.colors$m_B), from='Lab', to = 'sRGB', from.ref.white = 'D65', to.ref.white = 'D65', clip = FALSE))
names(cols.srgb) <- c('R', 'G', 'B')
x.colors$soil_color[not.na] <- with(cols.srgb[not.na, ], rgb(R, G, B, maxColorValue = 1))
# init a new SoilProfileCollection from aggregate data
depths(x.colors) <- state ~ top + bottom
# not bad
par(mar=c(1,0,3,4))
plot(x.colors, divide.hz=FALSE, name=NA, col.label='Soil Color', lwd=1.25, axis.line.offset=0, cex.depth.axis=1, cex.id=1)
## simple color signature
pig <- soilColorSignature(x.colors, r = 'm_r', g = 'm_g', b='m_b')
# account for missing data
idx <- which(complete.cases(pig[, -1]))
pig <- pig[idx, ]
# the first column is the ID
row.names(pig) <- pig[, 1]
d <- daisy(pig[, 2:6])
dd <- diana(d)
# index to those profiles present in `d`
idx <- which(profile_id(x.colors) %in% pig$state)
sp <- x.colors[idx, ]
plotProfileDendrogram(sp, dd, dend.y.scale = 0.5, divide.hz=FALSE, scaling.factor = 0.001, y.offset = 0.01, width=0.15)
dd$order.lab
dd$order
### work with OSDs
# get these soil series
s <- fetchOSD(us.state.soils$series)
# join in state
s$series <- profile_id(s)
site(s) <- us.state.soils
# manually convert Munsell -> sRGB
rgb.data <- munsell2rgb(s$hue, s$value, s$chroma, return_triplets = TRUE)
s$r <- rgb.data$r
s$g <- rgb.data$g
s$b <- rgb.data$b
# check
par(mar=c(1,1,1,1))
plot(s)
rgb.colors <- munsell2rgb(s$hue, s$value, s$chroma, return_triplets = TRUE)
lab.colors <- as(sRGB(rgb.colors[['r']], rgb.colors[['g']], rgb.colors[['b']]), 'LAB')@coords
cols <- cbind(rgb.colors, lab.colors)
cols <- na.omit(cols)
cols <- as.data.frame(cols)
png(file='state-soils-osd-color-LAB-palette.png', width=800, height=800, res=90, type='cairo', antialias = 'subpixel')
pairs(~ L + A + B, data=cols, pch=16, cex=3, col=rgb(cols$r, cols$g, cols$b))
dev.off()
# generate color signatures using OSDs and PAM method
pig <- soilColorSignature(s, RescaleLightnessBy = 5, method='pam')
# move row names over for distance matrix
row.names(pig) <- pig[, 1]
d <- daisy(pig[, -1])
dd <- diana(d)
par(mar=c(1,1,1,1))
plotProfileDendrogram(s, dd, dend.y.scale = max(d) * 2, scaling.factor = 0.25, y.offset = 6, width=0.15, cex.names=0.45, label='state', name=NA)
par(mar=c(1,1,1,1))
plot(s, plot.order=dd$order, label='state', name='')
# set order of states based on clustering order
ll <- us.state.soils$state[match(dd$order.lab, us.state.soils$series)]
s$state <- factor(s$state, levels=ll)
# aggregate soil color based on sate
a.osd <- aggregateColor(s, groups='state', col='soil_color', mixingMethod = 'estimate')
# plot, vertical axis will be in order of dendrogram leaves
png(file='state-soils-osd-signatures.png', width = 1000, height=800, type = 'cairo', antialias = 'subpixel', res = 90)
par(mar=c(0.5, 6, 1, 0.5), lend=1)
aggregateColorPlot(a.osd, print.label = FALSE, x.axis = FALSE, rect.border = NA, horizontal.borders = TRUE, horizontal.border.lwd = 1)
title(main='State Soil Color Signatures: OSD', line=-1, cex.main=2)
dev.off()
png(file='state-soils-osd-signatures-inverse.png', width = 1000, height=900, type = 'cairo', antialias = 'subpixel', res = 90)
par(mar=c(0.5, 6, 1, 0.5), bg='black', fg='white', lend=1)
aggregateColorPlot(a.osd, print.label = FALSE, x.axis = FALSE, rect.border = NA, horizontal.borders = TRUE, horizontal.border.lwd = 1)
dev.off()
## all colors from KSSL pedons
pedons$state <- factor(pedons$state, levels=ll)
a.kssl <- aggregateColor(pedons, groups = 'state', col = 'moist_soil_color', mixingMethod = 'estimate')
a.kssl.15 <- aggregateColor(pedons, groups = 'state', col = 'moist_soil_color', k = 15, mixingMethod = 'estimate')
png(file='state-soils-kssl-signatures.png', width = 1000, height=800, type = 'cairo', antialias = 'subpixel', res = 90)
par(mar=c(0.5, 6, 1, 0.5), lend=1)
aggregateColorPlot(a.kssl, print.label = FALSE, x.axis = FALSE, rect.border = NA, horizontal.borders = TRUE, horizontal.border.lwd = 1)
title(main='State Soil Color Signatures: KSSL', line=-1, cex.main=2)
dev.off()
png(file='state-soils-kssl-signatures-15.png', width = 1000, height=800, type = 'cairo', antialias = 'subpixel', res = 90)
par(mar=c(0.5, 6, 1, 0.5), bg='black', fg='white', lend=1)
aggregateColorPlot(a.kssl.15, print.label = FALSE, x.axis = FALSE, rect.border = NA, horizontal.borders = TRUE, horizontal.border.lwd = 1)
dev.off()
## single color / state based on KSSL morph
# KSSL
a.aggregate <- a.kssl$aggregate.data
a.aggregate$munsell <- paste0(a.aggregate$hue, ' ', a.aggregate$value, '/', a.aggregate$chroma)
# make a grid for plotting
n <- ceiling(sqrt(nrow(a.aggregate)))
# read from top-left to bottom-right
g <- expand.grid(x=1:n, y=n:1)[1:nrow(a.aggregate),]
agg_png(file='state-soils-single-color-kssl.png', width = 1000, height=900, scaling = 1)
par(mar=c(1,0,1,1))
plot(g$x, g$y, pch=15, cex=12, axes=FALSE, xlab='', ylab='', col=a.aggregate$col, xlim=c(0.5,8.5), ylim=c(1.5,8.5))
text(g$x, g$y, a.aggregate$state, adj=c(0.45,5), cex=1, font=2)
text(g$x, g$y, a.aggregate$munsell, col='white', pos=1, cex=0.85, font=2)
title(main='State Soil Colors (KSSL)', line=-1, cex.main=2)
dev.off()
# OSD
a.aggregate <- a.osd$aggregate.data
a.aggregate$munsell <- paste0(a.aggregate$hue, ' ', a.aggregate$value, '/', a.aggregate$chroma)
# make a grid for plotting
n <- ceiling(sqrt(nrow(a.aggregate)))
# read from top-left to bottom-right
g <- expand.grid(x=1:n, y=n:1)[1:nrow(a.aggregate),]
agg_png(file='state-soils-single-color-osd.png', width = 1000, height=900, scaling = 1)
par(mar=c(1,0,1,1))
plot(g$x, g$y, pch=15, cex=12, axes=FALSE, xlab='', ylab='', col=a.aggregate$col, xlim=c(0.5,8.5), ylim=c(1.5,8.5))
text(g$x, g$y, a.aggregate$state, adj=c(0.45,5), cex=1, font=2)
text(g$x, g$y, a.aggregate$munsell, col='white', pos=1, cex=0.85, font=2)
title(main='State Soil Colors (OSD)', line=-1, cex.main=2)
dev.off()
## simple map with maps package
# get state names in order of plotting
state.map <- map('state', plot=FALSE)$names
# clean cruft from names
state.map <- strsplit(state.map, ':')
state.map <- sapply(state.map, function(i) i[[1]])
# index mapping states to colors
col.idx <- match(state.map, tolower(a.aggregate$state))
agg_png(file='state-soils-single-color-osd-map.png', width = 1000, height=900)
par(mar=c(1,0,1,1))
map('state', col=a.aggregate$col[col.idx], fill=TRUE, mar=c(1,1,2,1))
title(main='State Soil Colors (OSD)', line=1, cex.main=2)
dev.off()
## VI missing KSSL data in fetchKSSL() snapshot
## split into OSD / KSSL representation
##
## geofacet with state soil colours + Munsell
##
state_cols <- a.aggregate$col
names(state_cols) <- a.aggregate$state
geoplot <- ggplot(a.aggregate) +
geom_rect(aes(xmin = 0, xmax = 1, ymin = 0, ymax = 1, fill = state)) +
geom_text(aes(x = 0.5, y = 0.5, label = munsell), colour = "#ffffff") +
facet_geo(~state, strip.position = "bottom") +
scale_fill_manual(
guide = 'none',
values = state_cols
) +
coord_equal() +
theme_bw() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = "bold"),
panel.border = element_rect(fill = NA, colour = NA),
panel.grid = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()
) +
labs(title = 'US State Soil Colors (Moist)', subtitle = 'Source: Official Series Descriptions via soilDB::fetchOSD()', caption = 'Weighted mean soil color in CIELAB colorspace via aqp::aggregateColor()')
agg_png("geofacet-soils-osd.png", width = 5000, height = 4000, scaling = 6)
print(geoplot)
dev.off()
## geofacet with treemaps of main soil colours per state
## OSD version
soil_data_states <- do.call(rbind, a.osd$scaled.data)
soil_data_states$state <- stringr::str_remove(stringr::str_extract(row.names(soil_data_states), '.*[.]'), "\\.")
treemap_cols <- soil_data_states$soil_color
names(treemap_cols) <- soil_data_states$munsell
geoplot_treemap <- ggplot(data = soil_data_states) +
geom_treemap(aes(area = weight, fill = munsell)) +
facet_geo(~state, strip.position = "bottom") +
scale_fill_manual(
guide = 'none',
values = treemap_cols
) +
coord_equal() +
theme_bw() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = "bold"),
panel.border = element_rect(fill = NA, colour = NA),
panel.grid = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()
) +
labs(title = 'US State Soil Colors (Moist)', subtitle = 'Source: Official Series Descriptions via soilDB::fetchOSD()', caption = 'Soil color proportions via aqp::aggregateColor()')
agg_png("geofacet-treemap-soils-osd.png", width = 5000, height = 4000, scaling = 6)
print(geoplot_treemap)
dev.off()
## KSSL version
soil_data_states <- do.call(rbind, a.kssl$scaled.data)
soil_data_states$state <- stringr::str_remove(stringr::str_extract(row.names(soil_data_states), '.*[.]'), "\\.")
treemap_cols <- soil_data_states$moist_soil_color
names(treemap_cols) <- soil_data_states$munsell
geoplot_treemap <- ggplot(data = soil_data_states) +
geom_treemap(aes(area = weight, fill = munsell)) +
facet_geo(~state, strip.position = "bottom") +
scale_fill_manual(
guide = 'none',
values = treemap_cols
) +
coord_equal() +
theme_bw() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = "bold"),
panel.border = element_rect(fill = NA, colour = NA),
panel.grid = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()
) +
labs(title = 'US State Soil Colors (Moist)', subtitle = 'Source: soil morphologic data via soilDB::fetchKSSL()', caption = 'Soil color proportions via aqp::aggregateColor()')
agg_png("geofacet-treemap-soils-kssl.png", width = 5000, height = 4000, scaling = 6)
print(geoplot_treemap)
dev.off()
| /AQP/state-soils/state-soil-colors.R | no_license | ncss-tech/ncss-tech.github.io | R | false | false | 12,158 | r | ##
## this is the latest version
library(aqp)
library(soilDB)
library(cluster)
library(sharpshootR)
library(reshape2)
library(maps)
library(colorspace)
library(ragg)
library(ggplot2)
library(geofacet)
library(treemapify)
data("us.state.soils")
data("munsell")
# get KSSL + morph for all state soils
x <- fetchKSSL(series = us.state.soils$series, returnMorphologicData = TRUE, simplifyColors = TRUE)
# extract pieces for simpler code
pedons <- x$SPC
phcolor <- x$phcolor
# normalize taxonname / series
pedons$taxonname <- toupper(pedons$taxonname)
table(pedons$taxonname)
# re-name for join
pedons$series <- pedons$taxonname
pedons$taxonname <- NULL
# remove state derived from GIS intersection
pedons$state <- NULL
# join state / 2-letter code from us.state.soils
us.state.soils$series <- toupper(us.state.soils$series)
site(pedons) <- us.state.soils
previewColors(pedons$moist_soil_color, method='MDS')
# check: looks good
png(file='state-soil-kssl-data-eval-colors.png', width = 800, height=400, type = 'cairo', antialias = 'subpixel', res = 70)
par(mar=c(0.5,0.5,1,0.5))
groupedProfilePlot(pedons[1:40, ], groups = 'abbreviated', color = "moist_soil_color", print.id=FALSE, max.depth=150)
dev.off()
## aggregate moist color by state
# convert soil colors to LAB colorspace
# moist colors
cols.lab <- convertColor(cbind(pedons$m_r, pedons$m_g, pedons$m_b), from='sRGB', to = 'Lab', from.ref.white = 'D65', to.ref.white = 'D65', clip = FALSE)
pedons$m_L <- cols.lab[, 1]
pedons$m_A <- cols.lab[, 2]
pedons$m_B <- cols.lab[, 3]
# aggregate data by normalized taxonname, via slice-wise median
a.colors <- slab(pedons, state ~ m_r + m_g + m_b + m_L + m_A + m_B, slab.fun = median, na.rm=TRUE)
# throw out aggregate data that are deeper than 150cm
a.colors <- subset(a.colors, subset=bottom < 150)
# convert long -> wide format
x.colors <- dcast(a.colors, state + top + bottom ~ variable, value.var = 'value')
# composite sRGB triplets into an R-compatible color
# note that missing colors must be padded with NA
x.colors$soil_color <- NA
not.na <- which(complete.cases(x.colors[, c('m_L', 'm_A', 'm_B')]))
cols.srgb <- data.frame(convertColor(cbind(x.colors$m_L, x.colors$m_A, x.colors$m_B), from='Lab', to = 'sRGB', from.ref.white = 'D65', to.ref.white = 'D65', clip = FALSE))
names(cols.srgb) <- c('R', 'G', 'B')
x.colors$soil_color[not.na] <- with(cols.srgb[not.na, ], rgb(R, G, B, maxColorValue = 1))
# init a new SoilProfileCollection from aggregate data
depths(x.colors) <- state ~ top + bottom
# not bad
par(mar=c(1,0,3,4))
plot(x.colors, divide.hz=FALSE, name=NA, col.label='Soil Color', lwd=1.25, axis.line.offset=0, cex.depth.axis=1, cex.id=1)
## simple color signature
pig <- soilColorSignature(x.colors, r = 'm_r', g = 'm_g', b='m_b')
# account for missing data
idx <- which(complete.cases(pig[, -1]))
pig <- pig[idx, ]
# the first column is the ID
row.names(pig) <- pig[, 1]
d <- daisy(pig[, 2:6])
dd <- diana(d)
# index to those profiles present in `d`
idx <- which(profile_id(x.colors) %in% pig$state)
sp <- x.colors[idx, ]
plotProfileDendrogram(sp, dd, dend.y.scale = 0.5, divide.hz=FALSE, scaling.factor = 0.001, y.offset = 0.01, width=0.15)
dd$order.lab
dd$order
### work with OSDs
# get these soil series
s <- fetchOSD(us.state.soils$series)
# join in state
s$series <- profile_id(s)
site(s) <- us.state.soils
# manually convert Munsell -> sRGB
rgb.data <- munsell2rgb(s$hue, s$value, s$chroma, return_triplets = TRUE)
s$r <- rgb.data$r
s$g <- rgb.data$g
s$b <- rgb.data$b
# check
par(mar=c(1,1,1,1))
plot(s)
rgb.colors <- munsell2rgb(s$hue, s$value, s$chroma, return_triplets = TRUE)
lab.colors <- as(sRGB(rgb.colors[['r']], rgb.colors[['g']], rgb.colors[['b']]), 'LAB')@coords
cols <- cbind(rgb.colors, lab.colors)
cols <- na.omit(cols)
cols <- as.data.frame(cols)
png(file='state-soils-osd-color-LAB-palette.png', width=800, height=800, res=90, type='cairo', antialias = 'subpixel')
pairs(~ L + A + B, data=cols, pch=16, cex=3, col=rgb(cols$r, cols$g, cols$b))
dev.off()
# generate color signatures using OSDs and PAM method
pig <- soilColorSignature(s, RescaleLightnessBy = 5, method='pam')
# move row names over for distance matrix
row.names(pig) <- pig[, 1]
d <- daisy(pig[, -1])
dd <- diana(d)
par(mar=c(1,1,1,1))
plotProfileDendrogram(s, dd, dend.y.scale = max(d) * 2, scaling.factor = 0.25, y.offset = 6, width=0.15, cex.names=0.45, label='state', name=NA)
par(mar=c(1,1,1,1))
plot(s, plot.order=dd$order, label='state', name='')
# set order of states based on clustering order
ll <- us.state.soils$state[match(dd$order.lab, us.state.soils$series)]
s$state <- factor(s$state, levels=ll)
# aggregate soil color based on sate
a.osd <- aggregateColor(s, groups='state', col='soil_color', mixingMethod = 'estimate')
# plot, vertical axis will be in order of dendrogram leaves
png(file='state-soils-osd-signatures.png', width = 1000, height=800, type = 'cairo', antialias = 'subpixel', res = 90)
par(mar=c(0.5, 6, 1, 0.5), lend=1)
aggregateColorPlot(a.osd, print.label = FALSE, x.axis = FALSE, rect.border = NA, horizontal.borders = TRUE, horizontal.border.lwd = 1)
title(main='State Soil Color Signatures: OSD', line=-1, cex.main=2)
dev.off()
png(file='state-soils-osd-signatures-inverse.png', width = 1000, height=900, type = 'cairo', antialias = 'subpixel', res = 90)
par(mar=c(0.5, 6, 1, 0.5), bg='black', fg='white', lend=1)
aggregateColorPlot(a.osd, print.label = FALSE, x.axis = FALSE, rect.border = NA, horizontal.borders = TRUE, horizontal.border.lwd = 1)
dev.off()
## all colors from KSSL pedons
pedons$state <- factor(pedons$state, levels=ll)
a.kssl <- aggregateColor(pedons, groups = 'state', col = 'moist_soil_color', mixingMethod = 'estimate')
a.kssl.15 <- aggregateColor(pedons, groups = 'state', col = 'moist_soil_color', k = 15, mixingMethod = 'estimate')
png(file='state-soils-kssl-signatures.png', width = 1000, height=800, type = 'cairo', antialias = 'subpixel', res = 90)
par(mar=c(0.5, 6, 1, 0.5), lend=1)
aggregateColorPlot(a.kssl, print.label = FALSE, x.axis = FALSE, rect.border = NA, horizontal.borders = TRUE, horizontal.border.lwd = 1)
title(main='State Soil Color Signatures: KSSL', line=-1, cex.main=2)
dev.off()
png(file='state-soils-kssl-signatures-15.png', width = 1000, height=800, type = 'cairo', antialias = 'subpixel', res = 90)
par(mar=c(0.5, 6, 1, 0.5), bg='black', fg='white', lend=1)
aggregateColorPlot(a.kssl.15, print.label = FALSE, x.axis = FALSE, rect.border = NA, horizontal.borders = TRUE, horizontal.border.lwd = 1)
dev.off()
## single color / state based on KSSL morph
# KSSL
a.aggregate <- a.kssl$aggregate.data
a.aggregate$munsell <- paste0(a.aggregate$hue, ' ', a.aggregate$value, '/', a.aggregate$chroma)
# make a grid for plotting
n <- ceiling(sqrt(nrow(a.aggregate)))
# read from top-left to bottom-right
g <- expand.grid(x=1:n, y=n:1)[1:nrow(a.aggregate),]
agg_png(file='state-soils-single-color-kssl.png', width = 1000, height=900, scaling = 1)
par(mar=c(1,0,1,1))
plot(g$x, g$y, pch=15, cex=12, axes=FALSE, xlab='', ylab='', col=a.aggregate$col, xlim=c(0.5,8.5), ylim=c(1.5,8.5))
text(g$x, g$y, a.aggregate$state, adj=c(0.45,5), cex=1, font=2)
text(g$x, g$y, a.aggregate$munsell, col='white', pos=1, cex=0.85, font=2)
title(main='State Soil Colors (KSSL)', line=-1, cex.main=2)
dev.off()
# OSD
a.aggregate <- a.osd$aggregate.data
a.aggregate$munsell <- paste0(a.aggregate$hue, ' ', a.aggregate$value, '/', a.aggregate$chroma)
# make a grid for plotting
n <- ceiling(sqrt(nrow(a.aggregate)))
# read from top-left to bottom-right
g <- expand.grid(x=1:n, y=n:1)[1:nrow(a.aggregate),]
agg_png(file='state-soils-single-color-osd.png', width = 1000, height=900, scaling = 1)
par(mar=c(1,0,1,1))
plot(g$x, g$y, pch=15, cex=12, axes=FALSE, xlab='', ylab='', col=a.aggregate$col, xlim=c(0.5,8.5), ylim=c(1.5,8.5))
text(g$x, g$y, a.aggregate$state, adj=c(0.45,5), cex=1, font=2)
text(g$x, g$y, a.aggregate$munsell, col='white', pos=1, cex=0.85, font=2)
title(main='State Soil Colors (OSD)', line=-1, cex.main=2)
dev.off()
## simple map with maps package
# get state names in order of plotting
state.map <- map('state', plot=FALSE)$names
# clean cruft from names
state.map <- strsplit(state.map, ':')
state.map <- sapply(state.map, function(i) i[[1]])
# index mapping states to colors
col.idx <- match(state.map, tolower(a.aggregate$state))
agg_png(file='state-soils-single-color-osd-map.png', width = 1000, height=900)
par(mar=c(1,0,1,1))
map('state', col=a.aggregate$col[col.idx], fill=TRUE, mar=c(1,1,2,1))
title(main='State Soil Colors (OSD)', line=1, cex.main=2)
dev.off()
## VI missing KSSL data in fetchKSSL() snapshot
## split into OSD / KSSL representation
##
## geofacet with state soil colours + Munsell
##
state_cols <- a.aggregate$col
names(state_cols) <- a.aggregate$state
geoplot <- ggplot(a.aggregate) +
geom_rect(aes(xmin = 0, xmax = 1, ymin = 0, ymax = 1, fill = state)) +
geom_text(aes(x = 0.5, y = 0.5, label = munsell), colour = "#ffffff") +
facet_geo(~state, strip.position = "bottom") +
scale_fill_manual(
guide = 'none',
values = state_cols
) +
coord_equal() +
theme_bw() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = "bold"),
panel.border = element_rect(fill = NA, colour = NA),
panel.grid = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()
) +
labs(title = 'US State Soil Colors (Moist)', subtitle = 'Source: Official Series Descriptions via soilDB::fetchOSD()', caption = 'Weighted mean soil color in CIELAB colorspace via aqp::aggregateColor()')
agg_png("geofacet-soils-osd.png", width = 5000, height = 4000, scaling = 6)
print(geoplot)
dev.off()
## geofacet with treemaps of main soil colours per state
## OSD version
soil_data_states <- do.call(rbind, a.osd$scaled.data)
soil_data_states$state <- stringr::str_remove(stringr::str_extract(row.names(soil_data_states), '.*[.]'), "\\.")
treemap_cols <- soil_data_states$soil_color
names(treemap_cols) <- soil_data_states$munsell
geoplot_treemap <- ggplot(data = soil_data_states) +
geom_treemap(aes(area = weight, fill = munsell)) +
facet_geo(~state, strip.position = "bottom") +
scale_fill_manual(
guide = 'none',
values = treemap_cols
) +
coord_equal() +
theme_bw() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = "bold"),
panel.border = element_rect(fill = NA, colour = NA),
panel.grid = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()
) +
labs(title = 'US State Soil Colors (Moist)', subtitle = 'Source: Official Series Descriptions via soilDB::fetchOSD()', caption = 'Soil color proportions via aqp::aggregateColor()')
agg_png("geofacet-treemap-soils-osd.png", width = 5000, height = 4000, scaling = 6)
print(geoplot_treemap)
dev.off()
## KSSL version
soil_data_states <- do.call(rbind, a.kssl$scaled.data)
soil_data_states$state <- stringr::str_remove(stringr::str_extract(row.names(soil_data_states), '.*[.]'), "\\.")
treemap_cols <- soil_data_states$moist_soil_color
names(treemap_cols) <- soil_data_states$munsell
geoplot_treemap <- ggplot(data = soil_data_states) +
geom_treemap(aes(area = weight, fill = munsell)) +
facet_geo(~state, strip.position = "bottom") +
scale_fill_manual(
guide = 'none',
values = treemap_cols
) +
coord_equal() +
theme_bw() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = "bold"),
panel.border = element_rect(fill = NA, colour = NA),
panel.grid = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()
) +
labs(title = 'US State Soil Colors (Moist)', subtitle = 'Source: soil morphologic data via soilDB::fetchKSSL()', caption = 'Soil color proportions via aqp::aggregateColor()')
agg_png("geofacet-treemap-soils-kssl.png", width = 5000, height = 4000, scaling = 6)
print(geoplot_treemap)
dev.off()
|
#hw_sim_noise
source("hw_functions.R")
source("precision_recall.R")
library(randomForest)
library(stats)
library(MASS)
library(Matrix)
library(scatterplot3d)
library(rerf)
library(vegan)
library(umap)
library(pracma)
library(rflann)
noise_experiments_varying_dim<- function(experiment = "rotation", data = "sphere") {
num_of_points=1000
if (data == "hw")
hw_data_object=hw_data(num_of_points)
else if(data == "helix")
hw_data_object=helix_data(num_of_points)
else if(data == "linear")
hw_data_object=linear_data(num_of_points)
else if(data == "mog")
hw_data_object=mog_data(num_of_points)
else
hw_data_object=upper_sphere(r=9, N=num_of_points)
hw_data=hw_data_object[[1]]
t=hw_data_object[[2]]
if (data == "hw")
D_truth=hw_geodesic(t, num_of_points)
else if(data == "helix")
D_truth=helix_geodesic(t, num_of_points)
else if(data=="linear")
D_truth=linear_geodesic(t, num_of_points)
else if(data == "mog")
D_truth=mog_geodesic(hw_data)
else
D_truth=sphere_geodesic(hw_data,9, num_of_points)
at_K=seq(50, 51, 1)
noise_dims = c(0, 10, 50, 100, 1000, 5000, 10000)
#noise_dims = seq(0, 10, 2)
rf_prec_list = c()
iso_prec_list = c()
umap_prec_list = c()
arf_prec_list = c()
rf_norm_prec_list = c()
iso_norm_prec_list = c()
umap_norm_prec_list = c()
arf_norm_prec_list = c()
for (noise_dim in noise_dims) {
#generate noise
print("it: ")
print(noise_dim/2)
if(noise_dim > 0)
{
noise_1=generate_high_dim_uniform_noise(num_of_points, noise_dim, const = 70)
noise_2=generate_high_dim_gaussian_noise(num_of_points, noise_dim, const=70)
hw_noise_data=cbind(hw_data, noise_2)
}
else
hw_noise_data = hw_data
#g_noise1=randomForest(hw_noise_data, ntree=300, keep.forest=FALSE, proximity=TRUE)
#simMat=g_noise1$proximity
#D_rf = 1-simMat
D_rf = Neighbour(hw_noise_data, hw_noise_data, 1000)$indices
#####################################################################
D_rf_p_r_list = p_r_list(D_rf, D_truth, at_K, num_of_points)
D_rf_precision_list= D_rf_p_r_list$precisionList
arf_prec_list = c(arf_prec_list, D_rf_precision_list[[1]])
}
flann_norm_prec_list = arf_prec_list
save(flann_norm_prec_list,
at_K, file= paste(experiment, data, "varying_dims_with_flann_normalized1000.Rdata", sep="_"))
}
| /hw_flann_10e4_dim_noise.R | no_license | megh1241/experiments_URerF | R | false | false | 2,249 | r | #hw_sim_noise
source("hw_functions.R")
source("precision_recall.R")
library(randomForest)
library(stats)
library(MASS)
library(Matrix)
library(scatterplot3d)
library(rerf)
library(vegan)
library(umap)
library(pracma)
library(rflann)
noise_experiments_varying_dim<- function(experiment = "rotation", data = "sphere") {
num_of_points=1000
if (data == "hw")
hw_data_object=hw_data(num_of_points)
else if(data == "helix")
hw_data_object=helix_data(num_of_points)
else if(data == "linear")
hw_data_object=linear_data(num_of_points)
else if(data == "mog")
hw_data_object=mog_data(num_of_points)
else
hw_data_object=upper_sphere(r=9, N=num_of_points)
hw_data=hw_data_object[[1]]
t=hw_data_object[[2]]
if (data == "hw")
D_truth=hw_geodesic(t, num_of_points)
else if(data == "helix")
D_truth=helix_geodesic(t, num_of_points)
else if(data=="linear")
D_truth=linear_geodesic(t, num_of_points)
else if(data == "mog")
D_truth=mog_geodesic(hw_data)
else
D_truth=sphere_geodesic(hw_data,9, num_of_points)
at_K=seq(50, 51, 1)
noise_dims = c(0, 10, 50, 100, 1000, 5000, 10000)
#noise_dims = seq(0, 10, 2)
rf_prec_list = c()
iso_prec_list = c()
umap_prec_list = c()
arf_prec_list = c()
rf_norm_prec_list = c()
iso_norm_prec_list = c()
umap_norm_prec_list = c()
arf_norm_prec_list = c()
for (noise_dim in noise_dims) {
#generate noise
print("it: ")
print(noise_dim/2)
if(noise_dim > 0)
{
noise_1=generate_high_dim_uniform_noise(num_of_points, noise_dim, const = 70)
noise_2=generate_high_dim_gaussian_noise(num_of_points, noise_dim, const=70)
hw_noise_data=cbind(hw_data, noise_2)
}
else
hw_noise_data = hw_data
#g_noise1=randomForest(hw_noise_data, ntree=300, keep.forest=FALSE, proximity=TRUE)
#simMat=g_noise1$proximity
#D_rf = 1-simMat
D_rf = Neighbour(hw_noise_data, hw_noise_data, 1000)$indices
#####################################################################
D_rf_p_r_list = p_r_list(D_rf, D_truth, at_K, num_of_points)
D_rf_precision_list= D_rf_p_r_list$precisionList
arf_prec_list = c(arf_prec_list, D_rf_precision_list[[1]])
}
flann_norm_prec_list = arf_prec_list
save(flann_norm_prec_list,
at_K, file= paste(experiment, data, "varying_dims_with_flann_normalized1000.Rdata", sep="_"))
}
|
/Credit Australian Market/ProgramCreditAustralian.R | no_license | redisdead-pack/TD_ESILV_BernieThomas | R | false | false | 3,706 | r | ||
#' user_data UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_user_data_ui <- function(id){
ns <- NS(id)
tagList(
#fluidRow(
DT::DTOutput(ns("user_dribble")) %>% withSpinner(),
#br(),
shinyFeedback::loadingButton(ns("delete"),"Delete from DB", loadingLabel = "Deleting file")
# )
)
}
#' user_data Server Functions
#'
#' @noRd
mod_user_data_server <- function(id, user, db_trigger){
moduleServer( id, function(input, output, session){
ns <- session$ns
user_dribble <- reactive({
req(user())
# to refresh when the DB is updated
db_trigger()
get_file_list(user())
})
output$user_dribble <- DT::renderDT({
user_dribble() %>%
select( 'File name' = description, 'Type' = name, 'Size'= size) %>%
datatable(
caption = "The available datasets",
selection = "single",
options = list(
paging = FALSE, searching = FALSE
)
)
})
return(user_dribble)
})
}
## To be copied in the UI
# mod_user_data_ui("user_data_ui_1")
## To be copied in the server
# mod_user_data_server("user_data_ui_1")
| /R/mod_user_data.R | permissive | lefkiospaikousis/testRemoteData | R | false | false | 1,344 | r | #' user_data UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_user_data_ui <- function(id){
ns <- NS(id)
tagList(
#fluidRow(
DT::DTOutput(ns("user_dribble")) %>% withSpinner(),
#br(),
shinyFeedback::loadingButton(ns("delete"),"Delete from DB", loadingLabel = "Deleting file")
# )
)
}
#' user_data Server Functions
#'
#' @noRd
mod_user_data_server <- function(id, user, db_trigger){
moduleServer( id, function(input, output, session){
ns <- session$ns
user_dribble <- reactive({
req(user())
# to refresh when the DB is updated
db_trigger()
get_file_list(user())
})
output$user_dribble <- DT::renderDT({
user_dribble() %>%
select( 'File name' = description, 'Type' = name, 'Size'= size) %>%
datatable(
caption = "The available datasets",
selection = "single",
options = list(
paging = FALSE, searching = FALSE
)
)
})
return(user_dribble)
})
}
## To be copied in the UI
# mod_user_data_ui("user_data_ui_1")
## To be copied in the server
# mod_user_data_server("user_data_ui_1")
|
# ---------------------------------------------------------------------
# Program: ThreeLatentMediationTest2.R
# Author: Steven M. Boker
# Date: Sun Mar 14 16:28:34 EDT 2010
#
# This program tests variations on a latent mediation model
# using a standard RAM.
#
# ---------------------------------------------------------------------
# Revision History
# -- Sun Mar 14 16:28:38 EDT 2010
# Created ThreeLatentMediationTest2.R.
#
# ---------------------------------------------------------------------
# ----------------------------------
# Read libraries and set options.
require(OpenMx)
# ----------------------------------
# Read the data and print descriptive statistics.
data(latentMultipleRegExample2)
numberFactors <- 3
indicators <- names(latentMultipleRegExample2)
numberIndicators <- length(indicators)
totalVars <- numberIndicators + numberFactors
# ----------------------------------
# Build an Old-style RAM OpenMx single factor FIML model with fixed variance
latents <- paste("F", 1:numberFactors, sep="")
loadingLabels <- paste("b_F", rep(1:numberFactors, each=numberIndicators),
rep(indicators, numberFactors), sep="")
loadingLabels
uniqueLabels <- paste("U_", indicators, sep="")
meanLabels <- paste("M_", indicators, sep="")
factorVarLabels <- paste("Var_", latents, sep="")
latents1 <- latents[1]
indicators1 <- indicators[1:4]
loadingLabels1 <- paste("b_F1", indicators[1:4], sep="")
latents2 <- latents[2]
indicators2 <- indicators[5:8]
loadingLabels2 <- paste("b_F2", indicators[5:8], sep="")
latents3 <- latents[3]
indicators3 <- indicators[9:12]
loadingLabels3 <- paste("b_F3", indicators[9:12], sep="")
threeLatentOrthoRaw1 <- mxModel("threeLatentOrthogonal",
type="RAM",
manifestVars=indicators,
latentVars=latents,
mxPath(from=latents1, to=indicators1,
# arrows=1, all=TRUE,
arrows=1, connect="all.pairs",
free=TRUE, values=.2,
labels=loadingLabels1),
mxPath(from=latents2, to=indicators2,
# arrows=1, all=TRUE,
arrows=1, connect="all.pairs",
free=TRUE, values=.2,
labels=loadingLabels2),
mxPath(from=latents3, to=indicators3,
# arrows=1, all=TRUE,
arrows=1, connect="all.pairs",
free=TRUE, values=.2,
labels=loadingLabels3),
mxPath(from=latents1, to=indicators1[1],
arrows=1,
free=FALSE, values=1),
mxPath(from=latents2, to=indicators2[1],
arrows=1,
free=FALSE, values=1),
mxPath(from=latents3, to=indicators3[1],
arrows=1,
free=FALSE, values=1),
mxPath(from=indicators,
arrows=2,
free=TRUE, values=.8,
labels=uniqueLabels),
mxPath(from=latents,
arrows=2,
free=TRUE, values=.8,
labels=factorVarLabels),
mxPath(from="one", to=indicators,
arrows=1, free=TRUE, values=.1,
labels=meanLabels),
mxData(observed=latentMultipleRegExample2, type="raw")
)
threeLatentOrthoRaw1Out <- mxRun(threeLatentOrthoRaw1, suppressWarnings=TRUE)
summary(threeLatentOrthoRaw1Out)
# model 2
threeLatentMediation1 <- mxModel(threeLatentOrthoRaw1,
mxPath(from="F2",to="F3",
arrows=1,
free=TRUE, values=.2,
labels="b23"),
name="threeLatentMediation1"
)
threeLatentMediation1Out <- mxRun(threeLatentMediation1, suppressWarnings=TRUE)
summary(threeLatentMediation1Out)
# model 3
threeLatentMediation2 <- mxModel(threeLatentMediation1,
mxPath(from="F1",to="F3",
arrows=1,
free=TRUE, values=.2,
labels="b13"),
name="threeLatentMediation2"
)
threeLatentMediation2Out <- mxRun(threeLatentMediation2, suppressWarnings=TRUE)
summary(threeLatentMediation2Out)
# model 4
threeLatentMediation3 <- mxModel(threeLatentMediation2,
mxPath(from="F2",to="F1",
arrows=1,
free=TRUE, values=.2,
labels="b12"),
name="threeLatentMediation3"
)
threeLatentMediation3Out <- mxRun(threeLatentMediation3, suppressWarnings=TRUE)
summary(threeLatentMediation3Out)
# model 5
threeLatentMediation4 <- mxModel(threeLatentMediation3,
mxPath(from="F2",to="F3",
arrows=1,
free=FALSE, values=0),
name="threeLatentMediation4"
)
threeLatentMediation4Out <- mxRun(threeLatentMediation4, suppressWarnings=TRUE)
summary(threeLatentMediation4Out)
#---------------------------------------
# check values: threeLatentMediation4Out
expectVal <- c(0.899897, 1.211936, 1.447487, 0.474833, 0.701115,
1.296055, 1.138717, 1.004529, 0.905956, 0.891751, 0.847099, 1.038272,
1.03872, 0.820213, 0.945435, 0.836217, 0.973649, 0.982129, 1.049803,
1.331303, 1.038699, 0.767626, 0.966071, 0.643407, 1.089664, 0.746656,
0.067508, 0.12139, 0.088573, -0.034884, -0.09419, 0.012755, -0.067872,
-0.059441, -0.070524, -0.049503, -0.049853, -0.098781)
expectSE <- c(0.076951, 0.087951, 0.102676, 0.077557, 0.0881, 0.119881, 0.111364,
0.111189, 0.115273, 0.113165, 0.110221, 0.122962, 0.118599, 0.114646,
0.145942, 0.105904, 0.106794, 0.141494, 0.133961, 0.172149, 0.133256,
0.109461, 0.121744, 0.125662, 0.185381, 0.157077, 0.117532, 0.110355,
0.129547, 0.151052, 0.097896, 0.086741, 0.118288, 0.11071, 0.110982,
0.099194, 0.091335, 0.094306)
expectMin <- 7710.62
omxCheckCloseEnough(expectVal, threeLatentMediation4Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentMediation4Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentMediation4Out$output$minimum, 0.002)
#---------------------------------------
# check values: threeLatentMediation3Out
expectVal <- c(0.899884, 1.211972, 1.447474, 0.481912, 0.700912,
1.295785, 1.138487, 1.004631, -0.010669, 0.906008, 0.891848,
0.847203, 1.038428, 1.038887, 0.820293, 0.94583, 0.835866, 0.973786,
0.982301, 1.049919, 1.331468, 1.038727, 0.767558, 0.965987, 0.642672,
1.090014, 0.745861, 0.067507, 0.12139, 0.088573, -0.034884, -0.09419,
0.012755, -0.067872, -0.059441, -0.070524, -0.049504, -0.049854,
-0.098781)
expectSE <- c(0.076937, 0.087925, 0.102634, 0.127972, 0.088123, 0.119893,
0.111359, 0.111145, 0.152816, 0.115262, 0.113151, 0.110214, 0.122903,
0.11856, 0.11459, 0.145961, 0.105997, 0.106805, 0.141516, 0.133879,
0.17206, 0.133238, 0.109431, 0.121728, 0.125978, 0.185416, 0.15734,
0.117811, 0.110551, 0.129828, 0.151418, 0.098074, 0.086838, 0.118514,
0.110886, 0.11105, 0.099275, 0.091429, 0.094377)
expectMin <- 7710.615
omxCheckCloseEnough(expectVal, threeLatentMediation3Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentMediation3Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentMediation3Out$output$minimum, 0.002)
#---------------------------------------
# check values: threeLatentMediation2Out
expectVal <- c(0.900993, 1.207067, 1.425325, 0.43279, 0.694426,
1.355306, 1.152128, 0.078525, 0.906157, 0.891475, 0.846991, 1.016895,
1.017922, 0.80959, 1.01297, 0.871724, 1.000945, 0.876173, 1.063462,
1.331154, 1.038162, 0.76807, 0.966173, 1.764348, 1.054153, 0.747071,
0.06751, 0.121392, 0.088576, -0.034879, -0.094187, 0.012757,
-0.067869, -0.059439, -0.070522, -0.049502, -0.049852, -0.09878)
expectSE <- c(0.076472, 0.088267, 0.102919, 0.092556, 0.092707, 0.136435,
0.119071, 0.108581, 0.115255, 0.113106, 0.11018, 0.124685, 0.119489,
0.120868, 0.159665, 0.117298, 0.111136, 0.160984, 0.146537, 0.172013,
0.133155, 0.109448, 0.12173, 0.268676, 0.188841, 0.15717, 0.118021,
0.110782, 0.13014, 0.151765, 0.098149, 0.086882, 0.118609, 0.110993,
0.109976, 0.098261, 0.09034, 0.093452)
expectMin <- 7840.327
omxCheckCloseEnough(expectVal, threeLatentMediation2Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentMediation2Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentMediation2Out$output$minimum, 0.002)
#---------------------------------------
# check values: threeLatentMediation1Out
expectVal <- c(0.901892, 1.20158, 1.427048, 0.704502, 1.355613,
1.1605, 0.465948, 0.94444, 0.924281, 0.870142, 1.013963, 1.01268,
0.828678, 0.998326, 0.879945, 0.990166, 0.890404, 1.054125, 1.389673,
1.009607, 0.750164, 0.965169, 1.767278, 1.045934, 0.854189, 0.067511,
0.121393, 0.088577, -0.034878, -0.094187, 0.012757, -0.067869,
-0.059439, -0.070522, -0.049502, -0.049852, -0.09878)
expectSE <- c(0.076491, 0.08846, 0.103591, 0.092754, 0.133509, 0.118961,
0.099896, 0.121764, 0.120148, 0.115831, 0.125683, 0.119873, 0.123774,
0.162982, 0.115216, 0.110034, 0.154831, 0.143633, 0.176605, 0.134085,
0.111037, 0.12368, 0.269229, 0.186339, 0.185038, 0.117895, 0.110663,
0.129977, 0.151571, 0.098181, 0.086905, 0.118655, 0.111025, 0.111196,
0.099395, 0.091536, 0.094484)
expectMin <- 7867.323
omxCheckCloseEnough(expectVal, threeLatentMediation1Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentMediation1Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentMediation1Out$output$minimum, 0.002)
#---------------------------------------
# check values: threeLatentOrthoRaw1Out
expectVal <- c(0.901891, 1.20158, 1.427049, 0.692088, 1.351975,
1.149558, 0.987615, 0.966986, 0.902343, 1.013963, 1.012679, 0.828678,
0.998327, 0.86774, 1.002452, 0.878394, 1.064433, 1.459186, 0.987214,
0.727833, 0.960052, 1.767278, 1.058139, 1.01176, 0.067512, 0.121394,
0.088578, -0.034877, -0.094186, 0.012757, -0.067868, -0.059437,
-0.070521, -0.049501, -0.04985, -0.098779)
expectSE <- c(0.076528, 0.088527, 0.103665, 0.092459, 0.136218, 0.118764,
0.130232, 0.129587, 0.123389, 0.125806, 0.119984, 0.12382, 0.163071,
0.117211, 0.111291, 0.161268, 0.14648, 0.181275, 0.136797, 0.113505,
0.125792, 0.269576, 0.189346, 0.226953, 0.117883, 0.110644, 0.129952,
0.151555, 0.098128, 0.086865, 0.118584, 0.110962, 0.111143, 0.099336,
0.091474, 0.094429)
expectMin <- 7897.082
omxCheckCloseEnough(expectVal, threeLatentOrthoRaw1Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentOrthoRaw1Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentOrthoRaw1Out$output$minimum, 0.002)
| /inst/models/passing/IntroSEM-ThreeLatentMediationTest1.R | permissive | trbrick/OpenMx | R | false | false | 10,319 | r | # ---------------------------------------------------------------------
# Program: ThreeLatentMediationTest2.R
# Author: Steven M. Boker
# Date: Sun Mar 14 16:28:34 EDT 2010
#
# This program tests variations on a latent mediation model
# using a standard RAM.
#
# ---------------------------------------------------------------------
# Revision History
# -- Sun Mar 14 16:28:38 EDT 2010
# Created ThreeLatentMediationTest2.R.
#
# ---------------------------------------------------------------------
# ----------------------------------
# Read libraries and set options.
require(OpenMx)
# ----------------------------------
# Read the data and print descriptive statistics.
data(latentMultipleRegExample2)
numberFactors <- 3
indicators <- names(latentMultipleRegExample2)
numberIndicators <- length(indicators)
totalVars <- numberIndicators + numberFactors
# ----------------------------------
# Build an Old-style RAM OpenMx single factor FIML model with fixed variance
latents <- paste("F", 1:numberFactors, sep="")
loadingLabels <- paste("b_F", rep(1:numberFactors, each=numberIndicators),
rep(indicators, numberFactors), sep="")
loadingLabels
uniqueLabels <- paste("U_", indicators, sep="")
meanLabels <- paste("M_", indicators, sep="")
factorVarLabels <- paste("Var_", latents, sep="")
latents1 <- latents[1]
indicators1 <- indicators[1:4]
loadingLabels1 <- paste("b_F1", indicators[1:4], sep="")
latents2 <- latents[2]
indicators2 <- indicators[5:8]
loadingLabels2 <- paste("b_F2", indicators[5:8], sep="")
latents3 <- latents[3]
indicators3 <- indicators[9:12]
loadingLabels3 <- paste("b_F3", indicators[9:12], sep="")
threeLatentOrthoRaw1 <- mxModel("threeLatentOrthogonal",
type="RAM",
manifestVars=indicators,
latentVars=latents,
mxPath(from=latents1, to=indicators1,
# arrows=1, all=TRUE,
arrows=1, connect="all.pairs",
free=TRUE, values=.2,
labels=loadingLabels1),
mxPath(from=latents2, to=indicators2,
# arrows=1, all=TRUE,
arrows=1, connect="all.pairs",
free=TRUE, values=.2,
labels=loadingLabels2),
mxPath(from=latents3, to=indicators3,
# arrows=1, all=TRUE,
arrows=1, connect="all.pairs",
free=TRUE, values=.2,
labels=loadingLabels3),
mxPath(from=latents1, to=indicators1[1],
arrows=1,
free=FALSE, values=1),
mxPath(from=latents2, to=indicators2[1],
arrows=1,
free=FALSE, values=1),
mxPath(from=latents3, to=indicators3[1],
arrows=1,
free=FALSE, values=1),
mxPath(from=indicators,
arrows=2,
free=TRUE, values=.8,
labels=uniqueLabels),
mxPath(from=latents,
arrows=2,
free=TRUE, values=.8,
labels=factorVarLabels),
mxPath(from="one", to=indicators,
arrows=1, free=TRUE, values=.1,
labels=meanLabels),
mxData(observed=latentMultipleRegExample2, type="raw")
)
threeLatentOrthoRaw1Out <- mxRun(threeLatentOrthoRaw1, suppressWarnings=TRUE)
summary(threeLatentOrthoRaw1Out)
# model 2
threeLatentMediation1 <- mxModel(threeLatentOrthoRaw1,
mxPath(from="F2",to="F3",
arrows=1,
free=TRUE, values=.2,
labels="b23"),
name="threeLatentMediation1"
)
threeLatentMediation1Out <- mxRun(threeLatentMediation1, suppressWarnings=TRUE)
summary(threeLatentMediation1Out)
# model 3
threeLatentMediation2 <- mxModel(threeLatentMediation1,
mxPath(from="F1",to="F3",
arrows=1,
free=TRUE, values=.2,
labels="b13"),
name="threeLatentMediation2"
)
threeLatentMediation2Out <- mxRun(threeLatentMediation2, suppressWarnings=TRUE)
summary(threeLatentMediation2Out)
# model 4
threeLatentMediation3 <- mxModel(threeLatentMediation2,
mxPath(from="F2",to="F1",
arrows=1,
free=TRUE, values=.2,
labels="b12"),
name="threeLatentMediation3"
)
threeLatentMediation3Out <- mxRun(threeLatentMediation3, suppressWarnings=TRUE)
summary(threeLatentMediation3Out)
# model 5
threeLatentMediation4 <- mxModel(threeLatentMediation3,
mxPath(from="F2",to="F3",
arrows=1,
free=FALSE, values=0),
name="threeLatentMediation4"
)
threeLatentMediation4Out <- mxRun(threeLatentMediation4, suppressWarnings=TRUE)
summary(threeLatentMediation4Out)
#---------------------------------------
# check values: threeLatentMediation4Out
expectVal <- c(0.899897, 1.211936, 1.447487, 0.474833, 0.701115,
1.296055, 1.138717, 1.004529, 0.905956, 0.891751, 0.847099, 1.038272,
1.03872, 0.820213, 0.945435, 0.836217, 0.973649, 0.982129, 1.049803,
1.331303, 1.038699, 0.767626, 0.966071, 0.643407, 1.089664, 0.746656,
0.067508, 0.12139, 0.088573, -0.034884, -0.09419, 0.012755, -0.067872,
-0.059441, -0.070524, -0.049503, -0.049853, -0.098781)
expectSE <- c(0.076951, 0.087951, 0.102676, 0.077557, 0.0881, 0.119881, 0.111364,
0.111189, 0.115273, 0.113165, 0.110221, 0.122962, 0.118599, 0.114646,
0.145942, 0.105904, 0.106794, 0.141494, 0.133961, 0.172149, 0.133256,
0.109461, 0.121744, 0.125662, 0.185381, 0.157077, 0.117532, 0.110355,
0.129547, 0.151052, 0.097896, 0.086741, 0.118288, 0.11071, 0.110982,
0.099194, 0.091335, 0.094306)
expectMin <- 7710.62
omxCheckCloseEnough(expectVal, threeLatentMediation4Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentMediation4Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentMediation4Out$output$minimum, 0.002)
#---------------------------------------
# check values: threeLatentMediation3Out
expectVal <- c(0.899884, 1.211972, 1.447474, 0.481912, 0.700912,
1.295785, 1.138487, 1.004631, -0.010669, 0.906008, 0.891848,
0.847203, 1.038428, 1.038887, 0.820293, 0.94583, 0.835866, 0.973786,
0.982301, 1.049919, 1.331468, 1.038727, 0.767558, 0.965987, 0.642672,
1.090014, 0.745861, 0.067507, 0.12139, 0.088573, -0.034884, -0.09419,
0.012755, -0.067872, -0.059441, -0.070524, -0.049504, -0.049854,
-0.098781)
expectSE <- c(0.076937, 0.087925, 0.102634, 0.127972, 0.088123, 0.119893,
0.111359, 0.111145, 0.152816, 0.115262, 0.113151, 0.110214, 0.122903,
0.11856, 0.11459, 0.145961, 0.105997, 0.106805, 0.141516, 0.133879,
0.17206, 0.133238, 0.109431, 0.121728, 0.125978, 0.185416, 0.15734,
0.117811, 0.110551, 0.129828, 0.151418, 0.098074, 0.086838, 0.118514,
0.110886, 0.11105, 0.099275, 0.091429, 0.094377)
expectMin <- 7710.615
omxCheckCloseEnough(expectVal, threeLatentMediation3Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentMediation3Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentMediation3Out$output$minimum, 0.002)
#---------------------------------------
# check values: threeLatentMediation2Out
expectVal <- c(0.900993, 1.207067, 1.425325, 0.43279, 0.694426,
1.355306, 1.152128, 0.078525, 0.906157, 0.891475, 0.846991, 1.016895,
1.017922, 0.80959, 1.01297, 0.871724, 1.000945, 0.876173, 1.063462,
1.331154, 1.038162, 0.76807, 0.966173, 1.764348, 1.054153, 0.747071,
0.06751, 0.121392, 0.088576, -0.034879, -0.094187, 0.012757,
-0.067869, -0.059439, -0.070522, -0.049502, -0.049852, -0.09878)
expectSE <- c(0.076472, 0.088267, 0.102919, 0.092556, 0.092707, 0.136435,
0.119071, 0.108581, 0.115255, 0.113106, 0.11018, 0.124685, 0.119489,
0.120868, 0.159665, 0.117298, 0.111136, 0.160984, 0.146537, 0.172013,
0.133155, 0.109448, 0.12173, 0.268676, 0.188841, 0.15717, 0.118021,
0.110782, 0.13014, 0.151765, 0.098149, 0.086882, 0.118609, 0.110993,
0.109976, 0.098261, 0.09034, 0.093452)
expectMin <- 7840.327
omxCheckCloseEnough(expectVal, threeLatentMediation2Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentMediation2Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentMediation2Out$output$minimum, 0.002)
#---------------------------------------
# check values: threeLatentMediation1Out
expectVal <- c(0.901892, 1.20158, 1.427048, 0.704502, 1.355613,
1.1605, 0.465948, 0.94444, 0.924281, 0.870142, 1.013963, 1.01268,
0.828678, 0.998326, 0.879945, 0.990166, 0.890404, 1.054125, 1.389673,
1.009607, 0.750164, 0.965169, 1.767278, 1.045934, 0.854189, 0.067511,
0.121393, 0.088577, -0.034878, -0.094187, 0.012757, -0.067869,
-0.059439, -0.070522, -0.049502, -0.049852, -0.09878)
expectSE <- c(0.076491, 0.08846, 0.103591, 0.092754, 0.133509, 0.118961,
0.099896, 0.121764, 0.120148, 0.115831, 0.125683, 0.119873, 0.123774,
0.162982, 0.115216, 0.110034, 0.154831, 0.143633, 0.176605, 0.134085,
0.111037, 0.12368, 0.269229, 0.186339, 0.185038, 0.117895, 0.110663,
0.129977, 0.151571, 0.098181, 0.086905, 0.118655, 0.111025, 0.111196,
0.099395, 0.091536, 0.094484)
expectMin <- 7867.323
omxCheckCloseEnough(expectVal, threeLatentMediation1Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentMediation1Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentMediation1Out$output$minimum, 0.002)
#---------------------------------------
# check values: threeLatentOrthoRaw1Out
expectVal <- c(0.901891, 1.20158, 1.427049, 0.692088, 1.351975,
1.149558, 0.987615, 0.966986, 0.902343, 1.013963, 1.012679, 0.828678,
0.998327, 0.86774, 1.002452, 0.878394, 1.064433, 1.459186, 0.987214,
0.727833, 0.960052, 1.767278, 1.058139, 1.01176, 0.067512, 0.121394,
0.088578, -0.034877, -0.094186, 0.012757, -0.067868, -0.059437,
-0.070521, -0.049501, -0.04985, -0.098779)
expectSE <- c(0.076528, 0.088527, 0.103665, 0.092459, 0.136218, 0.118764,
0.130232, 0.129587, 0.123389, 0.125806, 0.119984, 0.12382, 0.163071,
0.117211, 0.111291, 0.161268, 0.14648, 0.181275, 0.136797, 0.113505,
0.125792, 0.269576, 0.189346, 0.226953, 0.117883, 0.110644, 0.129952,
0.151555, 0.098128, 0.086865, 0.118584, 0.110962, 0.111143, 0.099336,
0.091474, 0.094429)
expectMin <- 7897.082
omxCheckCloseEnough(expectVal, threeLatentOrthoRaw1Out$output$estimate, 0.002)
omxCheckCloseEnough(expectSE,
as.vector(threeLatentOrthoRaw1Out$output$standardError), 0.002)
omxCheckCloseEnough(expectMin, threeLatentOrthoRaw1Out$output$minimum, 0.002)
|
# loading the dataset
raw_df <- read.csv("household_power_consumption.txt", sep=";")
# subsetting df for the 2007-02-01 and 2007-02-02
df <- subset(raw_df, Date=="1/2/2007" | Date=="2/2/2007")
# Converting Global active power to numeric
df[, "Global_active_power"] <- as.numeric(as.character(df$Global_active_power))
png("plot1.png",)
par(xaxs= "i", yaxs= "i")
hist(df$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | walidkaffel/ExData_Plotting1 | R | false | false | 497 | r | # loading the dataset
raw_df <- read.csv("household_power_consumption.txt", sep=";")
# subsetting df for the 2007-02-01 and 2007-02-02
df <- subset(raw_df, Date=="1/2/2007" | Date=="2/2/2007")
# Converting Global active power to numeric
df[, "Global_active_power"] <- as.numeric(as.character(df$Global_active_power))
png("plot1.png",)
par(xaxs= "i", yaxs= "i")
hist(df$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("European Option Price Calculator"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
radioButtons("type", "Option Type: ",
c("Call" = "1",
"Put" = "2")),
numericInput('stock', 'Spot price of the underlying asset: ', value = 1),
numericInput('strike', 'Strike price of the underlying asset: ', value = 1),
numericInput('time', 'Time to maturity in years: ', value = 1),
numericInput('risk_free_rate', 'Risk-Free Interest Rate (in decimal format): ', value = 1),
numericInput('sigma', 'Volatility (in decimal format): ', value = 1),
actionButton("goButton", "Calculate")
),
mainPanel(
h3('European option (Black Scholes calculator)'),
h1(' '),
h1('__________ '),h1(' '),h1(' '),
textOutput('var2'),
h2(' '),
h2(' '),
h2(' '),
h2(' '),
textOutput('var')
)
)
))
| /ui.R | no_license | jorgebarron/Dev-Data-Products | R | false | false | 1,286 | r | # This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("European Option Price Calculator"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
radioButtons("type", "Option Type: ",
c("Call" = "1",
"Put" = "2")),
numericInput('stock', 'Spot price of the underlying asset: ', value = 1),
numericInput('strike', 'Strike price of the underlying asset: ', value = 1),
numericInput('time', 'Time to maturity in years: ', value = 1),
numericInput('risk_free_rate', 'Risk-Free Interest Rate (in decimal format): ', value = 1),
numericInput('sigma', 'Volatility (in decimal format): ', value = 1),
actionButton("goButton", "Calculate")
),
mainPanel(
h3('European option (Black Scholes calculator)'),
h1(' '),
h1('__________ '),h1(' '),h1(' '),
textOutput('var2'),
h2(' '),
h2(' '),
h2(' '),
h2(' '),
textOutput('var')
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rNnetEventOD.R
\name{rNnetEventOD}
\alias{rNnetEventOD}
\title{Generates random value according to a Poisson multinomial distribution
needed to estimate the origin destination matrices.}
\usage{
rNnetEventOD(
n,
dupFileName,
regsFileName,
postLocJointPath,
prefix,
seed = 123
)
}
\arguments{
\item{n}{The number of random values to be generated.}
\item{dupFileName}{The name of the .csv file with the duplicity probability
for each device. This is an output of the \code{deduplication} package.}
\item{regsFileName}{The name of the .csv file defining the regions. It has two
columns: \code{ tile, region}. The first column contains the IDs of each
tile in the grid while the second contains the number of a region. This file
is defined by the user and it can be created with any text editor.}
\item{postLocJointPath}{The path where the files with the posterior location
probabilities for each device can be found. A file with the location
probabilities should have the name \code{prefix_ID.csv} where \code{ID} is
replaced with the device ID and \code{prefix} is given as a parameter to
this function.}
\item{prefix}{A prefix that is used to compose the file name with posterior
location probabilities.}
}
\value{
A data table object with the following columns: \code{time_from,
time_to, region_from, region_to, Nnet, iter}. The number of detected
individuals moving from a region to another between two succesive time
instants is given in column \code{Nnet} while the last column gives the
index of the random value generated for this number.
}
\description{
Generates random value according to a Poisson multinomial
distribution needed to estimate the origin destination matrices. This is a
high level function, the only one to be called by users to estimate the
number of individuals going from one region to another. The actual
computations are performed using a parallelization (transparent to the
users) which uses the whole number of (logical) cores.
}
| /MobileNetworkDataSimulationTemplate/code/src/aggregation/man/rNnetEventOD.Rd | no_license | Lorencrack3/TFG-Lorenzo | R | false | true | 2,064 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rNnetEventOD.R
\name{rNnetEventOD}
\alias{rNnetEventOD}
\title{Generates random value according to a Poisson multinomial distribution
needed to estimate the origin destination matrices.}
\usage{
rNnetEventOD(
n,
dupFileName,
regsFileName,
postLocJointPath,
prefix,
seed = 123
)
}
\arguments{
\item{n}{The number of random values to be generated.}
\item{dupFileName}{The name of the .csv file with the duplicity probability
for each device. This is an output of the \code{deduplication} package.}
\item{regsFileName}{The name of the .csv file defining the regions. It has two
columns: \code{ tile, region}. The first column contains the IDs of each
tile in the grid while the second contains the number of a region. This file
is defined by the user and it can be created with any text editor.}
\item{postLocJointPath}{The path where the files with the posterior location
probabilities for each device can be found. A file with the location
probabilities should have the name \code{prefix_ID.csv} where \code{ID} is
replaced with the device ID and \code{prefix} is given as a parameter to
this function.}
\item{prefix}{A prefix that is used to compose the file name with posterior
location probabilities.}
}
\value{
A data table object with the following columns: \code{time_from,
time_to, region_from, region_to, Nnet, iter}. The number of detected
individuals moving from a region to another between two succesive time
instants is given in column \code{Nnet} while the last column gives the
index of the random value generated for this number.
}
\description{
Generates random value according to a Poisson multinomial
distribution needed to estimate the origin destination matrices. This is a
high level function, the only one to be called by users to estimate the
number of individuals going from one region to another. The actual
computations are performed using a parallelization (transparent to the
users) which uses the whole number of (logical) cores.
}
|
"Qn.Exp.f" <- function(p,yc,delta,mu,sigma,lambda,zero=1e-4) {
n <-length(yc)
a <- -10; b <- 6; tol <- 0.001; maxit <- 20
f.res <- .Fortran("qnexp",
p=as.double(p),yc=as.double(yc),delta=to.single(delta),n=to.integer(n),mu=as.double(mu),
sigma=as.double(sigma),lambda=as.double(lambda),zero=as.double(zero),a=as.double(a),
b=as.double(b),tol=as.double(tol),maxit=to.integer(maxit),qj=double(1),itr=integer(1),
iterm=integer(1))
f.res$qj
}
| /R/Qn.Exp.f.R | no_license | cran/robeth | R | false | false | 454 | r | "Qn.Exp.f" <- function(p,yc,delta,mu,sigma,lambda,zero=1e-4) {
n <-length(yc)
a <- -10; b <- 6; tol <- 0.001; maxit <- 20
f.res <- .Fortran("qnexp",
p=as.double(p),yc=as.double(yc),delta=to.single(delta),n=to.integer(n),mu=as.double(mu),
sigma=as.double(sigma),lambda=as.double(lambda),zero=as.double(zero),a=as.double(a),
b=as.double(b),tol=as.double(tol),maxit=to.integer(maxit),qj=double(1),itr=integer(1),
iterm=integer(1))
f.res$qj
}
|
xlimsGen <- function (chr_order, strand_order, start_v, end_v, chr_v){
xlims <- lapply(chr_order, function(chr_i, strand, start_v, end_v, chr_v){
if(strand[which(chr_i %in% chr_order)]){
c(min(start_v[chr_v == chr_i]),
max(end_v[chr_v == chr_i]))
} else {
c(max(end_v[chr_v == chr_i]),
min(start_v[chr_v == chr_i]))
}
}, strand=strand_order, start_v=start_v, end_v=end_v, chr_v=chr_v)
return(unlist(xlims))
} | /R/functions/xlimsGen.R | no_license | seaman248/An_autosome_reconstructing | R | false | false | 454 | r | xlimsGen <- function (chr_order, strand_order, start_v, end_v, chr_v){
xlims <- lapply(chr_order, function(chr_i, strand, start_v, end_v, chr_v){
if(strand[which(chr_i %in% chr_order)]){
c(min(start_v[chr_v == chr_i]),
max(end_v[chr_v == chr_i]))
} else {
c(max(end_v[chr_v == chr_i]),
min(start_v[chr_v == chr_i]))
}
}, strand=strand_order, start_v=start_v, end_v=end_v, chr_v=chr_v)
return(unlist(xlims))
} |
## Since inverting a Matrix requires a lot of computation, it is easier to "cache" data in order to save a lot of computation power.
## This couple of functions allow you to create a "matrix" get its inverse and to save it for future calculations.
## If the program doesn't find the cached matrix, it will get its inverse
## The first function creates a "matrix" that is actually a list of four functions (sort of an object),
## that allow us to perform diferent procedures to the given matrix and it's inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y){
x <<- y
inverse <<- NULL
}
get <- function(){
x
}
setinverse <- function(solve){
inverse <<- solve
}
getinverse <- function(){
inverse
}
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function, checks if there is already a cached inverse of the matrix "x", if not it is calculated and stored.
## In both cases it returns the inverse of it
cacheSolve <- function(x , ...){
inverse <- x$getinverse()
if (!is.null(inverse)){
message("Getting cached inverse matrix")
return (inverse)
}
data <- x$get()
inverse <- solve(data , ...)
x$setinverse(inverse)
inverse
}
| /cachematrix.R | no_license | PaoloLuciano/ProgrammingAssignment2 | R | false | false | 1,327 | r | ## Since inverting a Matrix requires a lot of computation, it is easier to "cache" data in order to save a lot of computation power.
## This couple of functions allow you to create a "matrix" get its inverse and to save it for future calculations.
## If the program doesn't find the cached matrix, it will get its inverse
## The first function creates a "matrix" that is actually a list of four functions (sort of an object),
## that allow us to perform diferent procedures to the given matrix and it's inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y){
x <<- y
inverse <<- NULL
}
get <- function(){
x
}
setinverse <- function(solve){
inverse <<- solve
}
getinverse <- function(){
inverse
}
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function, checks if there is already a cached inverse of the matrix "x", if not it is calculated and stored.
## In both cases it returns the inverse of it
cacheSolve <- function(x , ...){
inverse <- x$getinverse()
if (!is.null(inverse)){
message("Getting cached inverse matrix")
return (inverse)
}
data <- x$get()
inverse <- solve(data , ...)
x$setinverse(inverse)
inverse
}
|
setwd("D:/Eigene Dateien/Dokumente/GitHub/GeoData/2017/slides")
library(knitr)
purl("ggmap.Rmd")
purl("maps.Rmd")
purl("maptools.Rmd")
purl("polygonSources.Rmd")
purl("OpenStreetMap.Rmd")
purl("Matching.Rmd")
purl("spplot.Rmd")
| /2017/rcode/RcodeErzeugen.R | no_license | Japhilko/GeoData | R | false | false | 236 | r | setwd("D:/Eigene Dateien/Dokumente/GitHub/GeoData/2017/slides")
library(knitr)
purl("ggmap.Rmd")
purl("maps.Rmd")
purl("maptools.Rmd")
purl("polygonSources.Rmd")
purl("OpenStreetMap.Rmd")
purl("Matching.Rmd")
purl("spplot.Rmd")
|
\name{mrw-package}
\alias{mrw-package}
\alias{mrw}
\docType{package}
\title{Multistate Random Walks}
\description{
A collection of functions to post-process and analyze multistate random walk models of animal movement which were fit in JAGS, as well as the fitted models for the example data sets in the paper.
}
\details{
\tabular{ll}{
Package: \tab mrw\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2013-10-02\cr
License: \tab GPL-3\cr
}
}
\author{
Chloe Bracis <cbracis@uw.edu>
}
\references{
Gurarie, E., M. Delgado, C. Bracis, M. Wagner, T. Meckley, I. Kojola, J. Pusenius, and B. van Moorter. In review. What is the animal doing? A comparison of methods and a practical guide to the behavioral analysis of animal movements. J Anim. Ecol.
}
\keyword{ package, mixed random walk }
\seealso{
\code{\link[waddle:waddle-package]{waddle}}
}
\examples{
}
| /.svn/pristine/de/de5d8095a550998af817fe1551b7ab3065a5542c.svn-base | no_license | xiang-chen-git/ecomove | R | false | false | 869 | \name{mrw-package}
\alias{mrw-package}
\alias{mrw}
\docType{package}
\title{Multistate Random Walks}
\description{
A collection of functions to post-process and analyze multistate random walk models of animal movement which were fit in JAGS, as well as the fitted models for the example data sets in the paper.
}
\details{
\tabular{ll}{
Package: \tab mrw\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2013-10-02\cr
License: \tab GPL-3\cr
}
}
\author{
Chloe Bracis <cbracis@uw.edu>
}
\references{
Gurarie, E., M. Delgado, C. Bracis, M. Wagner, T. Meckley, I. Kojola, J. Pusenius, and B. van Moorter. In review. What is the animal doing? A comparison of methods and a practical guide to the behavioral analysis of animal movements. J Anim. Ecol.
}
\keyword{ package, mixed random walk }
\seealso{
\code{\link[waddle:waddle-package]{waddle}}
}
\examples{
}
| |
library(doParallel)
library(smaa)
library(hitandrun)
library(parallel)
source('lib/partition.points.R')
source('lib/plane.R')
source('lib/hyperplane.sample.bounding.R')
source('lib/hyperplane.sample.shakeandbake.R')
comp.uc <- function(nalts, stopping) {
norm <- log2(nalts) # bits of information, choice problem
(stopping$h - stopping$h.min) / norm
}
## Sample N points in n-dimensions, within simplex with additional constraints
harSample <- function(constr, N) {
n <- ncol(constr$constr)
transform <- simplex.createTransform(n)
constr <- simplex.createConstraints(transform, constr)
seedPoint <- createSeedPoint(constr, homogeneous=TRUE)
# always generate at least 10^4*(n-1)^3 samples to get uniformity
M <- max(1E4, N) * (n-1)^3
thin <- floor(M / N)
samples <- har(seedPoint, constr, N=M, thin=thin, homogeneous=TRUE, transform=transform)$samples
stopifnot(nrow(samples) == N) # sanity check
samples
}
find.cut <- function(meas, nrPlanes, prevCuts, equalWProbs=TRUE, ranking=FALSE, sample.planes, cluster.size=detectCores()) {
N <- dim(meas)[1]
n <- dim(meas)[3]
cl <- makeCluster(cluster.size)
registerDoParallel(cl)
entropy <- if (ranking) smaa.entropy.ranking else smaa.entropy.choice
## Sample planes
if (is.null(prevCuts)) {
prevCuts <- list(constr=matrix(NA, nrow=0, ncol=n), dir=rep("<=", 0), rhs=rep(0, 0))
}
sampling.t <- system.time(
planes <- sample.planes(prevCuts, nrPlanes)
)[3]
message("- planes sampled in ", sampling.t, "s")
plane.find.t <- system.time(
best <- best.cutting.plane(prevCuts, meas, planes, equalWProbs=equalWProbs, entropy=entropy)
)[3]
message("- best plane found in ", plane.find.t, "s")
cutPt <- planes[[best$choice]]$point
cutDir <- planes[[best$choice]]$normal
pts <- harSample(prevCuts, N)
stopping.t <- system.time(
stopping <- stopping.calc(meas, pts, entropy)
)[3]
message("- stopping criterion computed in ", stopping.t, "s")
partition <- partition.points(pts, cutPt, cutDir)
values <- smaa.values(meas, pts)
ranks <- smaa.ranks(values)
stopCluster(cl)
list(entropies=best$entropies, point=cutPt, normal=cutDir,
share=sum(partition) / N, h=best$entropies[best$choice, "h"],
ra=smaa.ra(ranks), stopping=stopping)
}
# meas: measurements
# wn: weights
# entropy.fn: entropy calculation function
stopping.calc <- function(meas, wn, entropy.fn) {
entropy <- function(w) {
ra <- smaa.ra(smaa.ranks(smaa.values(meas=meas, pref=w)))
entropy.fn(ra)
}
hn <- entropy(wn)
all.ent <- aaply(wn, 1, entropy, .parallel=TRUE)
i.min <- which.min(all.ent)
i.max <- which.max(all.ent)
list(h=hn,
w.min=wn[i.min,],
h.min=all.ent[i.min],
w.max=wn[i.max,],
h.max=all.ent[i.max],
all.ent=all.ent)
}
get.cuts <- function(tgt, nr, meas, nrPlanes, equal.w.prob, ranking=FALSE, sample.planes=sample.planes.unrestricted()) {
stopifnot(nr >= 1)
n <- dim(meas)[3]
constr <- list(constr=matrix(NA, nrow=0, ncol=n), dir=rep("<=", 0), rhs=rep(0, 0))
ret <- list()
for (i in 1:nr) {
res <- find.cut(meas, nrPlanes, constr, equal.w.prob, ranking=ranking, sample.planes=sample.planes)
message(i, ". Cut: ", res$share * 100, "% - h: ", sprintf("%.2f", res$h))
ret[[i]] <- res
old.nr <- nrow(constr$constr)
constr <- eliminateRedundant(mergeConstraints(constr,
plane.constraint(res$point,
res$normal,
plane.side(tgt, res$point, res$normal))))
message("- eliminated ", old.nr - nrow(constr$constr) + 1, " redundant constraints")
}
ret
}
# select columns of a matrix, ALWAYS returning a matrix
select.col <- function(m, i) {
m[, i, drop=FALSE]
matrix(m[, i], nrow=nrow(m))
}
# select rows of a matrix, ALWAYS returning a matrix
select.row <- function(m, i) {
mi[i, , drop=FALSE]
}
question.entropy.pairwise <- function(w, w1, w2, cut, meas, entropy=smaa.entropy.choice, equalWProbs=FALSE) {
sel <- partition.points(w, cut$point, cut$normal)
p1 <- sum(sel) / nrow(w)
p2 <- 1 - p1
if (equalWProbs) {
p1 <- 0.5
p2 <- 0.5
}
v1 <- smaa.values(meas, w1)
v2 <- smaa.values(meas, w2)
r1 <- smaa.ranks(v1)
r2 <- smaa.ranks(v2)
h1 <- entropy(r1)
h2 <- entropy(r2)
c('h1'=h1, 'h2'=h2, 'h'=p1 * h1 + p2 * h2)
}
## Choose the best cutting plane (in terms of entropy).
# constr: constraints defining W'
# meas: measurements for the alternatives (matrix)
# cuts: a list, where cuts[[i]]$point and cuts[[i]]$normal give a point and
# normal vector defining a hyperplane
# ranking: whether to compute ranking (TRUE) or choice (FALSE) entropy
# equalWProbs: whether to use equal probabilities for both sides of the cut (TRUE) or to have the probabilities reflecting sizes of the half-spaces (FALSE)
# Return value: the (index of the) best hyperplane
best.cutting.plane <- function(constr, meas, cuts, entropy=smaa.entropy.choice, equalWProbs=TRUE) {
nrW <- dim(meas)[1]
n <- dim(meas)[3]
w <- harSample(constr, nrW) # sample weights to use for estimating the sizes of p(W'')
hs <- laply(cuts, function(cut) {
source('lib/code.R') # workaround bug in parallel packend
w1 <- harSample(mergeConstraints(constr, plane.constraint(cut$point, cut$normal, TRUE)), nrW)
w2 <- harSample(mergeConstraints(constr, plane.constraint(cut$point, cut$normal, FALSE)), nrW)
question.entropy.pairwise(w, w1, w2, cut, meas, entropy=entropy, equalWProbs=equalWProbs)
}, .parallel=TRUE)
colnames(hs) <- c("h1", "h2", "h")
list(choice=which.min(hs[,"h"]), entropies=hs)
}
| /elicitation/lib/code.R | no_license | tommite/pubs-code | R | false | false | 5,724 | r | library(doParallel)
library(smaa)
library(hitandrun)
library(parallel)
source('lib/partition.points.R')
source('lib/plane.R')
source('lib/hyperplane.sample.bounding.R')
source('lib/hyperplane.sample.shakeandbake.R')
comp.uc <- function(nalts, stopping) {
norm <- log2(nalts) # bits of information, choice problem
(stopping$h - stopping$h.min) / norm
}
## Sample N points in n-dimensions, within simplex with additional constraints
harSample <- function(constr, N) {
n <- ncol(constr$constr)
transform <- simplex.createTransform(n)
constr <- simplex.createConstraints(transform, constr)
seedPoint <- createSeedPoint(constr, homogeneous=TRUE)
# always generate at least 10^4*(n-1)^3 samples to get uniformity
M <- max(1E4, N) * (n-1)^3
thin <- floor(M / N)
samples <- har(seedPoint, constr, N=M, thin=thin, homogeneous=TRUE, transform=transform)$samples
stopifnot(nrow(samples) == N) # sanity check
samples
}
find.cut <- function(meas, nrPlanes, prevCuts, equalWProbs=TRUE, ranking=FALSE, sample.planes, cluster.size=detectCores()) {
N <- dim(meas)[1]
n <- dim(meas)[3]
cl <- makeCluster(cluster.size)
registerDoParallel(cl)
entropy <- if (ranking) smaa.entropy.ranking else smaa.entropy.choice
## Sample planes
if (is.null(prevCuts)) {
prevCuts <- list(constr=matrix(NA, nrow=0, ncol=n), dir=rep("<=", 0), rhs=rep(0, 0))
}
sampling.t <- system.time(
planes <- sample.planes(prevCuts, nrPlanes)
)[3]
message("- planes sampled in ", sampling.t, "s")
plane.find.t <- system.time(
best <- best.cutting.plane(prevCuts, meas, planes, equalWProbs=equalWProbs, entropy=entropy)
)[3]
message("- best plane found in ", plane.find.t, "s")
cutPt <- planes[[best$choice]]$point
cutDir <- planes[[best$choice]]$normal
pts <- harSample(prevCuts, N)
stopping.t <- system.time(
stopping <- stopping.calc(meas, pts, entropy)
)[3]
message("- stopping criterion computed in ", stopping.t, "s")
partition <- partition.points(pts, cutPt, cutDir)
values <- smaa.values(meas, pts)
ranks <- smaa.ranks(values)
stopCluster(cl)
list(entropies=best$entropies, point=cutPt, normal=cutDir,
share=sum(partition) / N, h=best$entropies[best$choice, "h"],
ra=smaa.ra(ranks), stopping=stopping)
}
# meas: measurements
# wn: weights
# entropy.fn: entropy calculation function
stopping.calc <- function(meas, wn, entropy.fn) {
entropy <- function(w) {
ra <- smaa.ra(smaa.ranks(smaa.values(meas=meas, pref=w)))
entropy.fn(ra)
}
hn <- entropy(wn)
all.ent <- aaply(wn, 1, entropy, .parallel=TRUE)
i.min <- which.min(all.ent)
i.max <- which.max(all.ent)
list(h=hn,
w.min=wn[i.min,],
h.min=all.ent[i.min],
w.max=wn[i.max,],
h.max=all.ent[i.max],
all.ent=all.ent)
}
get.cuts <- function(tgt, nr, meas, nrPlanes, equal.w.prob, ranking=FALSE, sample.planes=sample.planes.unrestricted()) {
stopifnot(nr >= 1)
n <- dim(meas)[3]
constr <- list(constr=matrix(NA, nrow=0, ncol=n), dir=rep("<=", 0), rhs=rep(0, 0))
ret <- list()
for (i in 1:nr) {
res <- find.cut(meas, nrPlanes, constr, equal.w.prob, ranking=ranking, sample.planes=sample.planes)
message(i, ". Cut: ", res$share * 100, "% - h: ", sprintf("%.2f", res$h))
ret[[i]] <- res
old.nr <- nrow(constr$constr)
constr <- eliminateRedundant(mergeConstraints(constr,
plane.constraint(res$point,
res$normal,
plane.side(tgt, res$point, res$normal))))
message("- eliminated ", old.nr - nrow(constr$constr) + 1, " redundant constraints")
}
ret
}
# select columns of a matrix, ALWAYS returning a matrix
select.col <- function(m, i) {
m[, i, drop=FALSE]
matrix(m[, i], nrow=nrow(m))
}
# select rows of a matrix, ALWAYS returning a matrix
select.row <- function(m, i) {
mi[i, , drop=FALSE]
}
question.entropy.pairwise <- function(w, w1, w2, cut, meas, entropy=smaa.entropy.choice, equalWProbs=FALSE) {
sel <- partition.points(w, cut$point, cut$normal)
p1 <- sum(sel) / nrow(w)
p2 <- 1 - p1
if (equalWProbs) {
p1 <- 0.5
p2 <- 0.5
}
v1 <- smaa.values(meas, w1)
v2 <- smaa.values(meas, w2)
r1 <- smaa.ranks(v1)
r2 <- smaa.ranks(v2)
h1 <- entropy(r1)
h2 <- entropy(r2)
c('h1'=h1, 'h2'=h2, 'h'=p1 * h1 + p2 * h2)
}
## Choose the best cutting plane (in terms of entropy).
# constr: constraints defining W'
# meas: measurements for the alternatives (matrix)
# cuts: a list, where cuts[[i]]$point and cuts[[i]]$normal give a point and
# normal vector defining a hyperplane
# ranking: whether to compute ranking (TRUE) or choice (FALSE) entropy
# equalWProbs: whether to use equal probabilities for both sides of the cut (TRUE) or to have the probabilities reflecting sizes of the half-spaces (FALSE)
# Return value: the (index of the) best hyperplane
best.cutting.plane <- function(constr, meas, cuts, entropy=smaa.entropy.choice, equalWProbs=TRUE) {
nrW <- dim(meas)[1]
n <- dim(meas)[3]
w <- harSample(constr, nrW) # sample weights to use for estimating the sizes of p(W'')
hs <- laply(cuts, function(cut) {
source('lib/code.R') # workaround bug in parallel packend
w1 <- harSample(mergeConstraints(constr, plane.constraint(cut$point, cut$normal, TRUE)), nrW)
w2 <- harSample(mergeConstraints(constr, plane.constraint(cut$point, cut$normal, FALSE)), nrW)
question.entropy.pairwise(w, w1, w2, cut, meas, entropy=entropy, equalWProbs=equalWProbs)
}, .parallel=TRUE)
colnames(hs) <- c("h1", "h2", "h")
list(choice=which.min(hs[,"h"]), entropies=hs)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pcause.R
\name{pcause}
\alias{pcause}
\title{Compute the bootstrap probability of correct causal direction.}
\usage{
pcause(x, y, n999 = 999)
}
\arguments{
\item{x}{{Vector of x data}}
\item{y}{{Vector of y data}}
\item{n999}{{Number of bootstrap replications (default=999)}}
}
\value{
P(cause) the bootstrap proportion of correct causal determinations.
}
\description{
Maximum entropy bootstrap (`meboot') package is used for statistical inference
regarding \eqn{\delta} which equals GMC(X|Y)-GMC(Y|X) defined by Zheng et al (2012).
The bootstrap provides an approximation to chances of correct determination of
the causal direction.
}
\note{
'pcause' is computer intensive and generally slow. It is better to use
it at a later stage in the investigation when a preliminary causal determination
is already made. Its use may slow the exploratory phase. In my experience, if
P(cause) is less than 0.55, there is a cause for concern.
}
\examples{
\dontrun{
set.seed(34);x=sample(1:10);y=sample(2:11)
pcause(x,y,n999=29)
data('EuroCrime')
attach(EuroCrime)
pcause(crim,off,n999=29)
}
}
\references{
Vinod, H. D. `Generalized Correlation and Kernel Causality with
Applications in Development Economics' in Communications in
Statistics -Simulation and Computation, 2015,
\url{http://dx.doi.org/10.1080/03610918.2015.1122048}
Zheng, S., Shi, N.-Z., and Zhang, Z. (2012). Generalized measures
of correlation for asymmetry, nonlinearity, and beyond.
Journal of the American Statistical Association, vol. 107, pp. 1239-1252.
Vinod, H. D. and Lopez-de-Lacalle, J. (2009). 'Maximum entropy bootstrap
for time series: The meboot R package.' Journal of Statistical Software,
Vol. 29(5), pp. 1-19.
}
\author{
Prof. H. D. Vinod, Economics Dept., Fordham University, NY
}
\concept{bootstrap}
\concept{maximum entropy bootstrap}
| /man/pcause.Rd | no_license | Srisai85/generalCorr | R | false | true | 1,976 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pcause.R
\name{pcause}
\alias{pcause}
\title{Compute the bootstrap probability of correct causal direction.}
\usage{
pcause(x, y, n999 = 999)
}
\arguments{
\item{x}{{Vector of x data}}
\item{y}{{Vector of y data}}
\item{n999}{{Number of bootstrap replications (default=999)}}
}
\value{
P(cause) the bootstrap proportion of correct causal determinations.
}
\description{
Maximum entropy bootstrap (`meboot') package is used for statistical inference
regarding \eqn{\delta} which equals GMC(X|Y)-GMC(Y|X) defined by Zheng et al (2012).
The bootstrap provides an approximation to chances of correct determination of
the causal direction.
}
\note{
'pcause' is computer intensive and generally slow. It is better to use
it at a later stage in the investigation when a preliminary causal determination
is already made. Its use may slow the exploratory phase. In my experience, if
P(cause) is less than 0.55, there is a cause for concern.
}
\examples{
\dontrun{
set.seed(34);x=sample(1:10);y=sample(2:11)
pcause(x,y,n999=29)
data('EuroCrime')
attach(EuroCrime)
pcause(crim,off,n999=29)
}
}
\references{
Vinod, H. D. `Generalized Correlation and Kernel Causality with
Applications in Development Economics' in Communications in
Statistics -Simulation and Computation, 2015,
\url{http://dx.doi.org/10.1080/03610918.2015.1122048}
Zheng, S., Shi, N.-Z., and Zhang, Z. (2012). Generalized measures
of correlation for asymmetry, nonlinearity, and beyond.
Journal of the American Statistical Association, vol. 107, pp. 1239-1252.
Vinod, H. D. and Lopez-de-Lacalle, J. (2009). 'Maximum entropy bootstrap
for time series: The meboot R package.' Journal of Statistical Software,
Vol. 29(5), pp. 1-19.
}
\author{
Prof. H. D. Vinod, Economics Dept., Fordham University, NY
}
\concept{bootstrap}
\concept{maximum entropy bootstrap}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{beregn_kredsmandater}
\alias{beregn_kredsmandater}
\title{Beregner kredsmandater}
\usage{
beregn_kredsmandater(storkreds_votes, kredsmandater)
}
\arguments{
\item{storkreds_votes}{stemmer fordelt på storkredse}
\item{kredsmandater}{oversigt over antallet af kredsmandater}
}
\description{
Beregner kredsmandater
}
| /man/beregn_kredsmandater.Rd | no_license | mikkelkrogsholm/mandatfordeling | R | false | true | 412 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{beregn_kredsmandater}
\alias{beregn_kredsmandater}
\title{Beregner kredsmandater}
\usage{
beregn_kredsmandater(storkreds_votes, kredsmandater)
}
\arguments{
\item{storkreds_votes}{stemmer fordelt på storkredse}
\item{kredsmandater}{oversigt over antallet af kredsmandater}
}
\description{
Beregner kredsmandater
}
|
library(caret)
library(pROC)
library(e1071)
library(preprocessCore)
library(kknn)
library(fastAdaboost)
library(limma)
options(stringsAsFactors = F)
set.seed(333)
#path <- "insert path here"
path <- "C:/Users/viche/Desktop/vc/code/foru"
pt6613 <- read.table(paste(path, "/GSE6613_phtype.tsv", sep=""))
gt6613 <- read.table(paste(path, "/GSE6613_gtype.tsv", sep=""))
gt7621 <- read.table(paste(path,"/GSE7621_gtype.tsv", sep=""))
pt7621 <- read.table(paste(path,"/GSE7621_phtype.tsv", sep=""))
pt8397_96 <- read.table(paste(path,"/GSE8397-96_phtype.tsv", sep=""))
pt8397_97 <- read.table(paste(path,"/GSE8397-97_phtype.tsv", sep=""))
gt8397_97 <- read.table(paste(path,"/GSE8397-97_gtype.tsv", sep=""))
gt8397_96 <- read.table(paste(path,"/GSE8397-96_gtype.tsv", sep=""))
gt20141 <- read.table(paste(path,"/GSE20141_gtype.tsv", sep=""))
pt20141 <- read.table(paste(path,"/GSE20141_phtype.tsv", sep=""))
pt20163 <- read.table(paste(path,"/GSE20163_phtype.tsv", sep=""))
gt20163 <- read.table(paste(path,"/GSE20163_gtype.tsv", sep=""))
gt20164 <- read.table(paste(path,"/GSE20164_gtype.tsv", sep=""))
pt20164 <- read.table(paste(path,"/GSE20164_phtype.tsv", sep=""))
pt20291 <- read.table(paste(path,"/GSE20291_phtype.tsv", sep=""))
gt20291 <- read.table(paste(path,"/GSE20291_gtype.tsv", sep=""))
gt20292 <- read.table(paste(path,"/GSE20292_gtype.tsv", sep=""))
pt20292 <- read.table(paste(path,"/GSE20292_phtype.tsv", sep=""))
#pt20333 <- read.table(paste(path,"/GSE20333_phtype.tsv", sep=""))
#gt20333 <- read.table(paste(path,"/GSE20333_gtype.tsv", sep=""))
#gt24378 <- read.table(paste(path,"/GSE24378_gtype.tsv", sep=""))
#pt24378 <- read.table(paste(path,"/GSE24378_phtype.tsv", sep=""))
# other nuerodegenerative stuff
pt26927 <- read.table(paste(path,"/GSE26927_phtype.tsv", sep=""))
gt26927 <- read.table(paste(path,"/GSE26927_gtype.tsv", sep=""))
# merging data with hgu133A.db
gt <- merge(gt20164, gt8397_96, by='V1')
gt <- merge(gt, gt20292, by='V1')
gt <- merge(gt, gt20163, by='V1')
gt <- merge(gt, gt6613, by='V1')
gt <- merge(gt, gt20291, by='V1')
# originally 56,000 something probes, but shared 22278/22284 probes with other hgu133a.db datasets
gt <- merge(gt, gt20141, by = "V1")
# shares 21942/22278 probes, will temporarily add for now
gt <- merge(gt, gt7621, by = "V1")
#add sample accession
pt20164[nrow(pt20164)+1,] <- "GSE20164"
pt20164[nrow(pt20164), 1] <- "code"
pt20141[nrow(pt20141)+1,] <- "GSE20141"
pt20141[nrow(pt20141), 1] <- "code"
pt20163[nrow(pt20163)+1,] <- "GSE20163"
pt20163[nrow(pt20163), 1] <- "code"
pt7621[nrow(pt7621)+1,] <- "GSE7621"
pt7621[nrow(pt7621), 1] <- "code"
pt20291[nrow(pt20291)+1,] <- "GSE20291"
pt20291[nrow(pt20291), 1] <- "code"
pt20292[nrow(pt20292)+1,] <- "GSE20292"
pt20292[nrow(pt20292), 1] <- "code"
pt8397_96[nrow(pt8397_96)+1,] <- "GSE8397_96"
pt8397_96[nrow(pt8397_96), 1] <- "code"
pt6613[nrow(pt6613)+1,] <- "GSE6613"
pt6613[nrow(pt6613), 1] <- "code"
#change rows to merge by
pt8397_96[1,1] <- "!Sample_characteristics_ch1"
#6613 does not have any gender or age :(
pt <- merge(pt20164[c(1,4, nrow(pt20164)),], pt8397_96[c(1, 2, nrow(pt8397_96)),], by='V1')
pt <- merge(pt, pt20292[c(1, 2, nrow(pt20292)),], by='V1')
pt <- merge(pt, pt20163[c(1, 4, nrow(pt20163)),], by='V1')
pt <- merge(pt, pt6613[c(1, 2, nrow(pt6613)),], by='V1')
pt <- merge(pt, pt20291[c(1, 2, nrow(pt20291)),], by='V1')
pt <- merge(pt, pt20141[c(1, 2, nrow(pt20141)),], by = "V1")
pt <- merge(pt, pt7621[c(1, 2, nrow(pt7621)),], by='V1')
# transposing the genotype data so that GSE is rownames
tgt <- t(gt)
gtds <- data.frame(tgt[2:288, 1:21941])
# setting rownames and colnames for gt and pt
rownames(gtds) <- tgt[2:288, 21942]
colnames(gtds) <- as.character(tgt[1, 1:21941])
ptds <- data.frame(pt[c(1, 3), 2:288])
rownames(ptds) <- pt$V1[c(1, 3)]
colnames(ptds) <- as.character(pt[2, 2:288])
simpleptd <- gsub(".*normal.*", replace = "ctr", ptds[1,])
simpleptd <- gsub(".*Parkinson.*", replace = "pkd", simpleptd)
simpleptd <- gsub(".*control.*", replace = "ctr", simpleptd)
simpleptd <- gsub(".*Control.*", replace = "ctr", simpleptd)
ptds2 <- data.frame(sample=colnames(ptds),group=simpleptd, code = as.character(pt[3,-1]))
ptds3 <- data.frame(ptds2[,2:3])
rownames(ptds3) <- ptds2[,1]
ds <- cbind(ptds3, gtds)
ds <- ds[,1:21907]
colnames(ds)[1] <- "group"
## drop neuro ctr
nctr <- readLines(paste(path, "/neuroctr.tsv", sep=""))
ds2 <- ds[!rownames(ds) %in% nctr,]
numds <- as.data.frame(apply(ds2[,-(1:2)], 2, as.numeric))
rownames(numds) <- rownames(ds2)
numdst <- data.frame(t(numds))
logdata = log2(numdst + 1)
logdata = logdata[rowMeans(logdata) > 3, ]
colramp = colorRampPalette(c(3,"white",2))(dim(numdst)[2])
plot(density(logdata[,1]),col=colramp[1],lwd=3,ylim=c(0,.80),
main = "Distribution before normalization")
for(i in 2:dim(numdst)[2]){lines(density(logdata[,i]),lwd=3,col=colramp[i])}
norm_ds = normalize.quantiles(as.matrix(logdata))
plot(density(norm_ds[,1]),col=colramp[1],lwd=3,ylim=c(0,.30),
main = "Distribution after normalization")
for(i in 2:dim(numdst)[2]){lines(density(norm_ds[,i]),lwd=3,col=colramp[i])}
rmbds <- removeBatchEffect(norm_ds, ds2$code,
design=model.matrix(~0+group, data=ds2))
# row & col names
fds <- data.frame(t(data.frame(rmbds)))
rownames(fds) <- rownames(ds2)
grouplabels <- ds2[,1:2]
findata <- cbind(grouplabels, fds)
# mds plot
d <- dist(fds) # euclidean distances between the rows
fit <- cmdscale(d,eig=TRUE, k=2) # k is the number of dim
# ggplot mds
rmdsp <- data.frame(dim1=fit$points[,1],dim2=fit$points[,2],
group=as.factor(ds2$group), study=ds2$code)
ggplot(data=rmdsp, aes(dim1, dim2, color = study, shape=group)) +
geom_point(size = 3) +
ggtitle("MDS plot after batch effect correction") +
theme(plot.title = element_text(hjust = 0.5))
set.seed(333)
pca <- prcomp(log2(findata[,-1:-2]+2))
#pca <- prcomp(log2(findata[,2:20725]+1))
pcadata <- data.frame(group=findata$group, study = findata$code, pca$x)
set.seed(333)
# training using train(method = svm)
set.seed(333)
train_control <- trainControl(method="repeatedcv", number = 5, repeats = 10)
svm <- train(as.factor(group) ~ ., data = pcadata, trControl = train_control,
method = "svmLinearWeights")
svm <- train(as.factor(group) ~ ., data = pcadata, trControl = train_control,
method = "svmLinear2", tuneLength = 10)
svmrad <- train(as.factor(group) ~ ., data = pcadata, trControl = train_control,
method = "svmRadial", tuneLength = 10)
# too many dependencies, not important
#set.seed(333)
#tr5 <- trainControl(method = "repeatedcv", number = 5, repeats = 5)
#naivemod <- train(as.factor(group) ~ ., data = pcadata,
# trControl = tr5, method = "nb")
## nb
#train_control <- trainControl(method="repeatedcv", number=5, repeats=5)
# train the model
#mnb <- train(as.factor(group)~., data=pcadata, trControl=train_control, method="nb")
# summarize results
#print(mnb)
#confusionMatrix(mnb)
## custom rf (VERY slow)
Sys.time()
library("randomForest")
customRF <- list(type = "Classification", library = "randomForest", loop = NULL)
customRF$parameters <- data.frame(parameter = c("mtry", "ntree"), class = rep("numeric", 2),
label = c("mtry", "ntree"))
customRF$grid <- function(x, y, len = NULL, search = "grid") {}
customRF$fit <- function(x, y, wts, param, lev, last, weights, classProbs, ...) {
randomForest(x, y, mtry = param$mtry, ntree=param$ntree, ...)
}
customRF$predict <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata)
customRF$prob <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata, type = "prob")
customRF$sort <- function(x) x[order(x[,1]),]
customRF$levels <- function(x) x$classes
set.seed(seed)
control2 <- trainControl(method="repeatedcv", number=5, repeats=10)
tunegrid2 <- expand.grid(.mtry=c(1:20), .ntree=c(1000, 1500, 2000, 2500))
mrfc <- train(as.factor(group)~., data=pcadata, method=customRF,
metric='Accuracy', tuneGrid=tunegrid2, trControl=control2)
Sys.time()
summary(mrfc)
plot(mrfc)
treemd1 <- train(group ~ ., method = "rpart", data = pcadata,
trControl = control2)
# neural network
set.seed(333)
mnet <- train(group ~ ., data = pcadata, trControl = control2,
method = "nnet", tuningLength = 1, MaxNWts = 1317)
# boostedtrees
set.seed(333)
mtbst <- train(as.factor(group) ~., data = pcadata,
trControl = control2, method = "adaboost")
# model accuracy
# svm 0.569
# svmRadial 0.573
# naivebayes dependency issues
# randomForest takes too long
# nnet 0.793
# boostedTrees 0.928??? overfitting?
# adaboost output
# > mtbst
# AdaBoost Classification Trees
#
# 254 samples
# 255 predictors
# 2 classes: 'ctr', 'pkd'
#
# No pre-processing
# Resampling: Cross-Validated (5 fold, repeated 10 times)
# Summary of sample sizes: 203, 203, 203, 204, 203, 203, ...
# Resampling results across tuning parameters:
#
# nIter method Accuracy Kappa
# 50 Adaboost.M1 0.9043686 0.8032668
# 50 Real adaboost 0.8772157 0.7457349
# 100 Adaboost.M1 0.9236784 0.8437119
# 100 Real adaboost 0.8850745 0.7614762
# 150 Adaboost.M1 0.9280314 0.8525355
# 150 Real adaboost 0.8933490 0.7787139
#
# Accuracy was used to select the optimal model using the largest value.
# The final values used for the model were nIter = 150 and method = Adaboost.M1. | /parkscriptMain.R | no_license | vc64/parkinsonsML | R | false | false | 9,596 | r | library(caret)
library(pROC)
library(e1071)
library(preprocessCore)
library(kknn)
library(fastAdaboost)
library(limma)
options(stringsAsFactors = F)
set.seed(333)
#path <- "insert path here"
path <- "C:/Users/viche/Desktop/vc/code/foru"
pt6613 <- read.table(paste(path, "/GSE6613_phtype.tsv", sep=""))
gt6613 <- read.table(paste(path, "/GSE6613_gtype.tsv", sep=""))
gt7621 <- read.table(paste(path,"/GSE7621_gtype.tsv", sep=""))
pt7621 <- read.table(paste(path,"/GSE7621_phtype.tsv", sep=""))
pt8397_96 <- read.table(paste(path,"/GSE8397-96_phtype.tsv", sep=""))
pt8397_97 <- read.table(paste(path,"/GSE8397-97_phtype.tsv", sep=""))
gt8397_97 <- read.table(paste(path,"/GSE8397-97_gtype.tsv", sep=""))
gt8397_96 <- read.table(paste(path,"/GSE8397-96_gtype.tsv", sep=""))
gt20141 <- read.table(paste(path,"/GSE20141_gtype.tsv", sep=""))
pt20141 <- read.table(paste(path,"/GSE20141_phtype.tsv", sep=""))
pt20163 <- read.table(paste(path,"/GSE20163_phtype.tsv", sep=""))
gt20163 <- read.table(paste(path,"/GSE20163_gtype.tsv", sep=""))
gt20164 <- read.table(paste(path,"/GSE20164_gtype.tsv", sep=""))
pt20164 <- read.table(paste(path,"/GSE20164_phtype.tsv", sep=""))
pt20291 <- read.table(paste(path,"/GSE20291_phtype.tsv", sep=""))
gt20291 <- read.table(paste(path,"/GSE20291_gtype.tsv", sep=""))
gt20292 <- read.table(paste(path,"/GSE20292_gtype.tsv", sep=""))
pt20292 <- read.table(paste(path,"/GSE20292_phtype.tsv", sep=""))
#pt20333 <- read.table(paste(path,"/GSE20333_phtype.tsv", sep=""))
#gt20333 <- read.table(paste(path,"/GSE20333_gtype.tsv", sep=""))
#gt24378 <- read.table(paste(path,"/GSE24378_gtype.tsv", sep=""))
#pt24378 <- read.table(paste(path,"/GSE24378_phtype.tsv", sep=""))
# other nuerodegenerative stuff
pt26927 <- read.table(paste(path,"/GSE26927_phtype.tsv", sep=""))
gt26927 <- read.table(paste(path,"/GSE26927_gtype.tsv", sep=""))
# merging data with hgu133A.db
gt <- merge(gt20164, gt8397_96, by='V1')
gt <- merge(gt, gt20292, by='V1')
gt <- merge(gt, gt20163, by='V1')
gt <- merge(gt, gt6613, by='V1')
gt <- merge(gt, gt20291, by='V1')
# originally 56,000 something probes, but shared 22278/22284 probes with other hgu133a.db datasets
gt <- merge(gt, gt20141, by = "V1")
# shares 21942/22278 probes, will temporarily add for now
gt <- merge(gt, gt7621, by = "V1")
#add sample accession
pt20164[nrow(pt20164)+1,] <- "GSE20164"
pt20164[nrow(pt20164), 1] <- "code"
pt20141[nrow(pt20141)+1,] <- "GSE20141"
pt20141[nrow(pt20141), 1] <- "code"
pt20163[nrow(pt20163)+1,] <- "GSE20163"
pt20163[nrow(pt20163), 1] <- "code"
pt7621[nrow(pt7621)+1,] <- "GSE7621"
pt7621[nrow(pt7621), 1] <- "code"
pt20291[nrow(pt20291)+1,] <- "GSE20291"
pt20291[nrow(pt20291), 1] <- "code"
pt20292[nrow(pt20292)+1,] <- "GSE20292"
pt20292[nrow(pt20292), 1] <- "code"
pt8397_96[nrow(pt8397_96)+1,] <- "GSE8397_96"
pt8397_96[nrow(pt8397_96), 1] <- "code"
pt6613[nrow(pt6613)+1,] <- "GSE6613"
pt6613[nrow(pt6613), 1] <- "code"
#change rows to merge by
pt8397_96[1,1] <- "!Sample_characteristics_ch1"
#6613 does not have any gender or age :(
pt <- merge(pt20164[c(1,4, nrow(pt20164)),], pt8397_96[c(1, 2, nrow(pt8397_96)),], by='V1')
pt <- merge(pt, pt20292[c(1, 2, nrow(pt20292)),], by='V1')
pt <- merge(pt, pt20163[c(1, 4, nrow(pt20163)),], by='V1')
pt <- merge(pt, pt6613[c(1, 2, nrow(pt6613)),], by='V1')
pt <- merge(pt, pt20291[c(1, 2, nrow(pt20291)),], by='V1')
pt <- merge(pt, pt20141[c(1, 2, nrow(pt20141)),], by = "V1")
pt <- merge(pt, pt7621[c(1, 2, nrow(pt7621)),], by='V1')
# transposing the genotype data so that GSE is rownames
tgt <- t(gt)
gtds <- data.frame(tgt[2:288, 1:21941])
# setting rownames and colnames for gt and pt
rownames(gtds) <- tgt[2:288, 21942]
colnames(gtds) <- as.character(tgt[1, 1:21941])
ptds <- data.frame(pt[c(1, 3), 2:288])
rownames(ptds) <- pt$V1[c(1, 3)]
colnames(ptds) <- as.character(pt[2, 2:288])
simpleptd <- gsub(".*normal.*", replace = "ctr", ptds[1,])
simpleptd <- gsub(".*Parkinson.*", replace = "pkd", simpleptd)
simpleptd <- gsub(".*control.*", replace = "ctr", simpleptd)
simpleptd <- gsub(".*Control.*", replace = "ctr", simpleptd)
ptds2 <- data.frame(sample=colnames(ptds),group=simpleptd, code = as.character(pt[3,-1]))
ptds3 <- data.frame(ptds2[,2:3])
rownames(ptds3) <- ptds2[,1]
ds <- cbind(ptds3, gtds)
ds <- ds[,1:21907]
colnames(ds)[1] <- "group"
## drop neuro ctr
nctr <- readLines(paste(path, "/neuroctr.tsv", sep=""))
ds2 <- ds[!rownames(ds) %in% nctr,]
numds <- as.data.frame(apply(ds2[,-(1:2)], 2, as.numeric))
rownames(numds) <- rownames(ds2)
numdst <- data.frame(t(numds))
logdata = log2(numdst + 1)
logdata = logdata[rowMeans(logdata) > 3, ]
colramp = colorRampPalette(c(3,"white",2))(dim(numdst)[2])
plot(density(logdata[,1]),col=colramp[1],lwd=3,ylim=c(0,.80),
main = "Distribution before normalization")
for(i in 2:dim(numdst)[2]){lines(density(logdata[,i]),lwd=3,col=colramp[i])}
norm_ds = normalize.quantiles(as.matrix(logdata))
plot(density(norm_ds[,1]),col=colramp[1],lwd=3,ylim=c(0,.30),
main = "Distribution after normalization")
for(i in 2:dim(numdst)[2]){lines(density(norm_ds[,i]),lwd=3,col=colramp[i])}
rmbds <- removeBatchEffect(norm_ds, ds2$code,
design=model.matrix(~0+group, data=ds2))
# row & col names
fds <- data.frame(t(data.frame(rmbds)))
rownames(fds) <- rownames(ds2)
grouplabels <- ds2[,1:2]
findata <- cbind(grouplabels, fds)
# mds plot
d <- dist(fds) # euclidean distances between the rows
fit <- cmdscale(d,eig=TRUE, k=2) # k is the number of dim
# ggplot mds
rmdsp <- data.frame(dim1=fit$points[,1],dim2=fit$points[,2],
group=as.factor(ds2$group), study=ds2$code)
ggplot(data=rmdsp, aes(dim1, dim2, color = study, shape=group)) +
geom_point(size = 3) +
ggtitle("MDS plot after batch effect correction") +
theme(plot.title = element_text(hjust = 0.5))
set.seed(333)
pca <- prcomp(log2(findata[,-1:-2]+2))
#pca <- prcomp(log2(findata[,2:20725]+1))
pcadata <- data.frame(group=findata$group, study = findata$code, pca$x)
set.seed(333)
# training using train(method = svm)
set.seed(333)
train_control <- trainControl(method="repeatedcv", number = 5, repeats = 10)
svm <- train(as.factor(group) ~ ., data = pcadata, trControl = train_control,
method = "svmLinearWeights")
svm <- train(as.factor(group) ~ ., data = pcadata, trControl = train_control,
method = "svmLinear2", tuneLength = 10)
svmrad <- train(as.factor(group) ~ ., data = pcadata, trControl = train_control,
method = "svmRadial", tuneLength = 10)
# too many dependencies, not important
#set.seed(333)
#tr5 <- trainControl(method = "repeatedcv", number = 5, repeats = 5)
#naivemod <- train(as.factor(group) ~ ., data = pcadata,
# trControl = tr5, method = "nb")
## nb
#train_control <- trainControl(method="repeatedcv", number=5, repeats=5)
# train the model
#mnb <- train(as.factor(group)~., data=pcadata, trControl=train_control, method="nb")
# summarize results
#print(mnb)
#confusionMatrix(mnb)
## custom rf (VERY slow)
Sys.time()
library("randomForest")
customRF <- list(type = "Classification", library = "randomForest", loop = NULL)
customRF$parameters <- data.frame(parameter = c("mtry", "ntree"), class = rep("numeric", 2),
label = c("mtry", "ntree"))
customRF$grid <- function(x, y, len = NULL, search = "grid") {}
customRF$fit <- function(x, y, wts, param, lev, last, weights, classProbs, ...) {
randomForest(x, y, mtry = param$mtry, ntree=param$ntree, ...)
}
customRF$predict <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata)
customRF$prob <- function(modelFit, newdata, preProc = NULL, submodels = NULL)
predict(modelFit, newdata, type = "prob")
customRF$sort <- function(x) x[order(x[,1]),]
customRF$levels <- function(x) x$classes
set.seed(seed)
control2 <- trainControl(method="repeatedcv", number=5, repeats=10)
tunegrid2 <- expand.grid(.mtry=c(1:20), .ntree=c(1000, 1500, 2000, 2500))
mrfc <- train(as.factor(group)~., data=pcadata, method=customRF,
metric='Accuracy', tuneGrid=tunegrid2, trControl=control2)
Sys.time()
summary(mrfc)
plot(mrfc)
treemd1 <- train(group ~ ., method = "rpart", data = pcadata,
trControl = control2)
# neural network
set.seed(333)
mnet <- train(group ~ ., data = pcadata, trControl = control2,
method = "nnet", tuningLength = 1, MaxNWts = 1317)
# boostedtrees
set.seed(333)
mtbst <- train(as.factor(group) ~., data = pcadata,
trControl = control2, method = "adaboost")
# model accuracy
# svm 0.569
# svmRadial 0.573
# naivebayes dependency issues
# randomForest takes too long
# nnet 0.793
# boostedTrees 0.928??? overfitting?
# adaboost output
# > mtbst
# AdaBoost Classification Trees
#
# 254 samples
# 255 predictors
# 2 classes: 'ctr', 'pkd'
#
# No pre-processing
# Resampling: Cross-Validated (5 fold, repeated 10 times)
# Summary of sample sizes: 203, 203, 203, 204, 203, 203, ...
# Resampling results across tuning parameters:
#
# nIter method Accuracy Kappa
# 50 Adaboost.M1 0.9043686 0.8032668
# 50 Real adaboost 0.8772157 0.7457349
# 100 Adaboost.M1 0.9236784 0.8437119
# 100 Real adaboost 0.8850745 0.7614762
# 150 Adaboost.M1 0.9280314 0.8525355
# 150 Real adaboost 0.8933490 0.7787139
#
# Accuracy was used to select the optimal model using the largest value.
# The final values used for the model were nIter = 150 and method = Adaboost.M1. |
######################################################################
# Goal is described in CompetitionDescription.pdf
# For the category
# Object_Type
# want to find features to help distinguish between
# Base Salary/Compensation
# NO_LABEL
# Other Compensation/Stipend
# Supplies/Materials
# the given expenditure must be placed in the appropriate classification.
# want to create a text file for
# ByCategory x ByClassification x ExplanatoryVariable
##################################################################
train <- read.csv('/home//michael/Data//EdClassification/TrainingData.csv')
ExplanVar <-c('Facility_or_Department', 'Function_Description', 'Fund_Description', 'Job_Title_Description', 'Location_Description', 'Object_Description', 'Position_Extra', 'Program_Description', 'SubFund_Description', 'Sub_Object_Description', 'Text_1', 'Text_2')
classObject <- c('Base Salary/Compensation', 'NO_LABEL', 'Other Compensation/Stipend', 'Supplies/Materials')
cat <- 'Object_Type'
colCat <- match(cat, colnames(train))
for (var in ExplanVar){
dir.create(paste0(var))
colVar <- match(var, colnames(train))
for (i in classObject){
entries <- train[train[,colCat] == i,colVar]
text <- tolower(toString(entries))
text <- gsub("[[:punct:]]", "", as.character(text))
filename <- paste0(var,'/',gsub('/', '_', i,'.txt'))
writeLines(text, filename)
}
}
# produce document term matrices
library(tm)
for (var in ExplanVar){
corp <- Corpus(DirSource(var))
corp <- tm_map(corp, stripWhitespace)
corpMat <- DocumentTermMatrix(corp)
matr <- inspect(corpMat)
keepers <- c()
for (i in 1:ncol(matr)){
if (max(matr[,i]) > 5000){keepers <- c(keepers,i)}
}
matr <- matr[,keepers]
write.csv(matr, paste0(var,'DocTermMatrix.csv'))
}
# Add these words to the training and test features
features <- read.csv('/home//michael/Data//EdClassification/trainFeatures.csv')
features[,1] <- train[,1]
features <- features[,c(1,8,12:279)]
colnum <- 271
for (var in ExplanVar){
dat <- read.csv(paste0(var,'DocTermMatrix.csv'))
words <- colnames(dat)
colVar <- match(var, colnames(train))
for (word in words[2:ncol(dat)]){
tested <- grep(word, tolower(train[,colVar]))
features[,colnum] <- c(rep(0, nrow(train)))
features[tested,colnum] <-1
names(features)[colnum]<-paste0(var,'_',word)
colnum <- colnum + 1
print(colnum)
}
}
write.csv(features, 'trainFeatures.csv', row.names = FALSE)
feature <- read.csv('/home//michael/Data//EdClassification/testFeatures.csv')
test <- read.csv('/home//michael/Data//EdClassification/TestData.csv')
colnum <- 270
for (var in ExplanVar){
dat <- read.csv(paste0(var,'DocTermMatrix.csv'))
words <- colnames(dat)
colVar <- match(var, colnames(test))
for (word in words[2:ncol(dat)]){
tested <- grep(word, tolower(test[,colVar]))
feature[,colnum] <- c(rep(0, nrow(test)))
feature[tested,colnum] <-1
names(feature)[colnum]<-paste0(var,'_',word)
colnum <- colnum + 1
print(colnum)
}
}
write.csv(feature, 'testFeatures.csv', row.names = FALSE)
# produce partition for python random forest training
library(caret)
ind <- createDataPartition(features[,2], p = .15, list = FALSE, times = 1)
feat <- features[ind,]
write.csv(feat, paste0('TrainPartition.csv'), row.names = FALSE)
############################################################################
# Generate a random forest model in scikit learn:
# GenerateRandomForestModels.py
#
# from pandas import read_csv
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.cross_validation import cross_val_score
# from sklearn.externals import joblib
#
#
#
# rfc = RandomForestClassifier(n_estimators = 100)
# cat = 'Function'
# filename = 'TrainPartition.csv'
# filename1 = 'RFModel.pkl'
# df = read_csv(filename)
# target = df.iloc[:,1]
# data = df.iloc[:,2:]
# rfc.fit(data, target)
# scores = cross_val_score(rfc, data, target, cv = 10)
# print('Accuracy: %0.2f (+/- %0.2f)' %(scores.mean(), scores.std()*2))
# joblib.dump(rfc, filename1)
###################################################################################
#####################################################################################
# make probabalistic predictions of test set
# ClassifyInPosition.py
# from pandas import read_csv, DataFrame
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.cross_validation import cross_val_score
# from sklearn.externals import joblib
# from numpy import savetxt
#
# cat = 'Function'
# filename = 'testFeatures.csv'
# filename1 = 'RFModel.pkl'
# filename3 = 'PredictProbs.csv'
# rfc = joblib.load(filename1)
# df = read_csv(filename)
# data = df.iloc[:,1:]
# print(data.shape)
# out = rfc.predict_proba(data)
# dataout = DataFrame(out)
# dataout.to_csv(filename3)
#########################################################################################
# Construct the submission
dat = read.csv('/home/michael/Data/EdClassification/PolishedPresentation/Submission/submit7.csv')
pred <- read.csv('PredictProbs.csv')
dat[,39:49] <- pred[,2:12]
write.csv(dat, '/home/michael/Data/EdClassification/PolishedPresentation/Submission/submit7.csv', row.names = FALSE)
# column labels are a terrible problem in R because of required punctuation; ugly but functional:
# copy and paste column headers in a text editor.
| /RefineObject/UpdateObjectFeatures.R | no_license | mkeysman/EdClassification | R | false | false | 5,412 | r | ######################################################################
# Goal is described in CompetitionDescription.pdf
# For the category
# Object_Type
# want to find features to help distinguish between
# Base Salary/Compensation
# NO_LABEL
# Other Compensation/Stipend
# Supplies/Materials
# the given expenditure must be placed in the appropriate classification.
# want to create a text file for
# ByCategory x ByClassification x ExplanatoryVariable
##################################################################
train <- read.csv('/home//michael/Data//EdClassification/TrainingData.csv')
ExplanVar <-c('Facility_or_Department', 'Function_Description', 'Fund_Description', 'Job_Title_Description', 'Location_Description', 'Object_Description', 'Position_Extra', 'Program_Description', 'SubFund_Description', 'Sub_Object_Description', 'Text_1', 'Text_2')
classObject <- c('Base Salary/Compensation', 'NO_LABEL', 'Other Compensation/Stipend', 'Supplies/Materials')
cat <- 'Object_Type'
colCat <- match(cat, colnames(train))
for (var in ExplanVar){
dir.create(paste0(var))
colVar <- match(var, colnames(train))
for (i in classObject){
entries <- train[train[,colCat] == i,colVar]
text <- tolower(toString(entries))
text <- gsub("[[:punct:]]", "", as.character(text))
filename <- paste0(var,'/',gsub('/', '_', i,'.txt'))
writeLines(text, filename)
}
}
# produce document term matrices
library(tm)
for (var in ExplanVar){
corp <- Corpus(DirSource(var))
corp <- tm_map(corp, stripWhitespace)
corpMat <- DocumentTermMatrix(corp)
matr <- inspect(corpMat)
keepers <- c()
for (i in 1:ncol(matr)){
if (max(matr[,i]) > 5000){keepers <- c(keepers,i)}
}
matr <- matr[,keepers]
write.csv(matr, paste0(var,'DocTermMatrix.csv'))
}
# Add these words to the training and test features
features <- read.csv('/home//michael/Data//EdClassification/trainFeatures.csv')
features[,1] <- train[,1]
features <- features[,c(1,8,12:279)]
colnum <- 271
for (var in ExplanVar){
dat <- read.csv(paste0(var,'DocTermMatrix.csv'))
words <- colnames(dat)
colVar <- match(var, colnames(train))
for (word in words[2:ncol(dat)]){
tested <- grep(word, tolower(train[,colVar]))
features[,colnum] <- c(rep(0, nrow(train)))
features[tested,colnum] <-1
names(features)[colnum]<-paste0(var,'_',word)
colnum <- colnum + 1
print(colnum)
}
}
write.csv(features, 'trainFeatures.csv', row.names = FALSE)
feature <- read.csv('/home//michael/Data//EdClassification/testFeatures.csv')
test <- read.csv('/home//michael/Data//EdClassification/TestData.csv')
colnum <- 270
for (var in ExplanVar){
dat <- read.csv(paste0(var,'DocTermMatrix.csv'))
words <- colnames(dat)
colVar <- match(var, colnames(test))
for (word in words[2:ncol(dat)]){
tested <- grep(word, tolower(test[,colVar]))
feature[,colnum] <- c(rep(0, nrow(test)))
feature[tested,colnum] <-1
names(feature)[colnum]<-paste0(var,'_',word)
colnum <- colnum + 1
print(colnum)
}
}
write.csv(feature, 'testFeatures.csv', row.names = FALSE)
# produce partition for python random forest training
library(caret)
ind <- createDataPartition(features[,2], p = .15, list = FALSE, times = 1)
feat <- features[ind,]
write.csv(feat, paste0('TrainPartition.csv'), row.names = FALSE)
############################################################################
# Generate a random forest model in scikit learn:
# GenerateRandomForestModels.py
#
# from pandas import read_csv
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.cross_validation import cross_val_score
# from sklearn.externals import joblib
#
#
#
# rfc = RandomForestClassifier(n_estimators = 100)
# cat = 'Function'
# filename = 'TrainPartition.csv'
# filename1 = 'RFModel.pkl'
# df = read_csv(filename)
# target = df.iloc[:,1]
# data = df.iloc[:,2:]
# rfc.fit(data, target)
# scores = cross_val_score(rfc, data, target, cv = 10)
# print('Accuracy: %0.2f (+/- %0.2f)' %(scores.mean(), scores.std()*2))
# joblib.dump(rfc, filename1)
###################################################################################
#####################################################################################
# make probabalistic predictions of test set
# ClassifyInPosition.py
# from pandas import read_csv, DataFrame
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.cross_validation import cross_val_score
# from sklearn.externals import joblib
# from numpy import savetxt
#
# cat = 'Function'
# filename = 'testFeatures.csv'
# filename1 = 'RFModel.pkl'
# filename3 = 'PredictProbs.csv'
# rfc = joblib.load(filename1)
# df = read_csv(filename)
# data = df.iloc[:,1:]
# print(data.shape)
# out = rfc.predict_proba(data)
# dataout = DataFrame(out)
# dataout.to_csv(filename3)
#########################################################################################
# Construct the submission
dat = read.csv('/home/michael/Data/EdClassification/PolishedPresentation/Submission/submit7.csv')
pred <- read.csv('PredictProbs.csv')
dat[,39:49] <- pred[,2:12]
write.csv(dat, '/home/michael/Data/EdClassification/PolishedPresentation/Submission/submit7.csv', row.names = FALSE)
# column labels are a terrible problem in R because of required punctuation; ugly but functional:
# copy and paste column headers in a text editor.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collections.R
\name{silent_library}
\alias{silent_library}
\title{Silently add packages}
\usage{
silent_library(packages)
}
\arguments{
\item{packages}{character vector of package names}
}
\description{
Load packages without any info returned
}
\details{
Uses invisible() and supressPackageStartMessages() to silently load the packages.
}
| /man/silent_library.Rd | no_license | MalteThodberg/ThodbergMisc | R | false | true | 417 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collections.R
\name{silent_library}
\alias{silent_library}
\title{Silently add packages}
\usage{
silent_library(packages)
}
\arguments{
\item{packages}{character vector of package names}
}
\description{
Load packages without any info returned
}
\details{
Uses invisible() and supressPackageStartMessages() to silently load the packages.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cognitoidentityprovider_operations.R
\name{cognitoidentityprovider_admin_list_user_auth_events}
\alias{cognitoidentityprovider_admin_list_user_auth_events}
\title{Lists a history of user activity and any risks detected as part of
Amazon Cognito advanced security}
\usage{
cognitoidentityprovider_admin_list_user_auth_events(UserPoolId,
Username, MaxResults, NextToken)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID.}
\item{Username}{[required] The user pool username or an alias.}
\item{MaxResults}{The maximum number of authentication events to return.}
\item{NextToken}{A pagination token.}
}
\value{
A list with the following syntax:\preformatted{list(
AuthEvents = list(
list(
EventId = "string",
EventType = "SignIn"|"SignUp"|"ForgotPassword",
CreationDate = as.POSIXct(
"2015-01-01"
),
EventResponse = "Success"|"Failure",
EventRisk = list(
RiskDecision = "NoRisk"|"AccountTakeover"|"Block",
RiskLevel = "Low"|"Medium"|"High",
CompromisedCredentialsDetected = TRUE|FALSE
),
ChallengeResponses = list(
list(
ChallengeName = "Password"|"Mfa",
ChallengeResponse = "Success"|"Failure"
)
),
EventContextData = list(
IpAddress = "string",
DeviceName = "string",
Timezone = "string",
City = "string",
Country = "string"
),
EventFeedback = list(
FeedbackValue = "Valid"|"Invalid",
Provider = "string",
FeedbackDate = as.POSIXct(
"2015-01-01"
)
)
)
),
NextToken = "string"
)
}
}
\description{
Lists a history of user activity and any risks detected as part of
Amazon Cognito advanced security.
}
\section{Request syntax}{
\preformatted{svc$admin_list_user_auth_events(
UserPoolId = "string",
Username = "string",
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/cognitoidentityprovider_admin_list_user_auth_events.Rd | permissive | TWarczak/paws | R | false | true | 2,022 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cognitoidentityprovider_operations.R
\name{cognitoidentityprovider_admin_list_user_auth_events}
\alias{cognitoidentityprovider_admin_list_user_auth_events}
\title{Lists a history of user activity and any risks detected as part of
Amazon Cognito advanced security}
\usage{
cognitoidentityprovider_admin_list_user_auth_events(UserPoolId,
Username, MaxResults, NextToken)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID.}
\item{Username}{[required] The user pool username or an alias.}
\item{MaxResults}{The maximum number of authentication events to return.}
\item{NextToken}{A pagination token.}
}
\value{
A list with the following syntax:\preformatted{list(
AuthEvents = list(
list(
EventId = "string",
EventType = "SignIn"|"SignUp"|"ForgotPassword",
CreationDate = as.POSIXct(
"2015-01-01"
),
EventResponse = "Success"|"Failure",
EventRisk = list(
RiskDecision = "NoRisk"|"AccountTakeover"|"Block",
RiskLevel = "Low"|"Medium"|"High",
CompromisedCredentialsDetected = TRUE|FALSE
),
ChallengeResponses = list(
list(
ChallengeName = "Password"|"Mfa",
ChallengeResponse = "Success"|"Failure"
)
),
EventContextData = list(
IpAddress = "string",
DeviceName = "string",
Timezone = "string",
City = "string",
Country = "string"
),
EventFeedback = list(
FeedbackValue = "Valid"|"Invalid",
Provider = "string",
FeedbackDate = as.POSIXct(
"2015-01-01"
)
)
)
),
NextToken = "string"
)
}
}
\description{
Lists a history of user activity and any risks detected as part of
Amazon Cognito advanced security.
}
\section{Request syntax}{
\preformatted{svc$admin_list_user_auth_events(
UserPoolId = "string",
Username = "string",
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
|
#' Deploy a production-ready predictive RandomForest model
#'
#' @description This step allows one to
#' \itemize{
#' \item Load a saved model from \code{\link{RandomForestDevelopment}}
#' \item Run the model against test data to generate predictions
#' \item Push these predictions to SQL Server
#' }
#' @docType class
#' @usage RandomForestDeployment(type, df, grainCol, testWindowCol,
#' predictedCol, impute, debug)
#' @import caret
#' @import doParallel
#' @importFrom R6 R6Class
#' @import ranger
#' @param type The type of model (either 'regression' or 'classification')
#' @param df Dataframe whose columns are used for calc.
#' @param grainCol The dataframe's column that has IDs pertaining to the grain
#' @param testWindowCol (depreciated) All data now receives a prediction
#' @param predictedCol Column that you want to predict. If you're doing
#' classification then this should be Y/N.
#' @param impute For training df, set all-column imputation to F or T.
#' This uses mean replacement for numeric columns
#' and most frequent for factorized columns.
#' F leads to removal of rows containing NULLs.
#' @param debug Provides the user extended output to the console, in order
#' to monitor the calculations throughout. Use T or F.
#' @export
#' @seealso \code{\link{healthcareai}}
#' @examples
#'
#' #### Classification Example using csv data ####
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' # setwd('C:/Yourscriptlocation/Useforwardslashes') # Uncomment if using csv
#'
#' # Can delete this line in your work
#' csvfile <- system.file("extdata",
#' "HCRDiabetesClinical.csv",
#' package = "healthcareai")
#'
#' # Replace csvfile with 'path/file'
#' df <- read.csv(file = csvfile,
#' header = TRUE,
#' na.strings = c("NULL", "NA", ""))
#'
#' df$PatientID <- NULL # Only one ID column (ie, PatientEncounterID) is needed remove this column
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run RandomForest
#' RandomForest <- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "classification"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "ThirtyDayReadmitFLG"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#'
#' dfOut <- dL$getOutDf()
#' head(dfOut)
#' # Write to CSV (or JSON, MySQL, etc) using plain R syntax
#' # write.csv(dfOut,'path/predictionsfile.csv')
#'
#' print(proc.time() - ptm)
#'
#' \donttest{
#' #### Classification example using SQL Server data ####
#' # This example requires you to first create a table in SQL Server
#' # If you prefer to not use SAMD, execute this in SSMS to create output table:
#' # CREATE TABLE dbo.HCRDeployClassificationBASE(
#' # BindingID float, BindingNM varchar(255), LastLoadDTS datetime2,
#' # PatientEncounterID int, <--change to match inputID
#' # PredictedProbNBR decimal(38, 2),
#' # Factor1TXT varchar(255), Factor2TXT varchar(255), Factor3TXT varchar(255)
#' # )
#'
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' connection.string <- "
#' driver={SQL Server};
#' server=localhost;
#' database=SAM;
#' trusted_connection=true
#' "
#'
#' query <- "
#' SELECT
#' [PatientEncounterID] --Only need one ID column for random forest
#' ,[SystolicBPNBR]
#' ,[LDLNBR]
#' ,[A1CNBR]
#' ,[GenderFLG]
#' ,[ThirtyDayReadmitFLG]
#' FROM [SAM].[dbo].[HCRDiabetesClinical]
#' "
#'
#' df <- selectData(connection.string, query)
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run RandomForest
#' RandomForest <- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "classification"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "ThirtyDayReadmitFLG"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#' dfOut <- dL$getOutDf()
#'
#' writeData(MSSQLConnectionString = connection.string,
#' df = dfOut,
#' tableName = 'HCRDeployClassificationBASE')
#'
#' print(proc.time() - ptm)
#' }
#'
#' \donttest{
#' #### Regression Example using SQL Server data ####
#' # This example requires you to first create a table in SQL Server
#' # If you prefer to not use SAMD, execute this in SSMS to create output table:
#' # CREATE TABLE dbo.HCRDeployRegressionBASE(
#' # BindingID float, BindingNM varchar(255), LastLoadDTS datetime2,
#' # PatientEncounterID int, <--change to match inputID
#' # PredictedValueNBR decimal(38, 2),
#' # Factor1TXT varchar(255), Factor2TXT varchar(255), Factor3TXT varchar(255)
#' # )
#'
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' connection.string <- "
#' driver={SQL Server};
#' server=localhost;
#' database=SAM;
#' trusted_connection=true
#' "
#'
#' query <- "
#' SELECT
#' [PatientEncounterID] --Only need one ID column for random forest
#' ,[SystolicBPNBR]
#' ,[LDLNBR]
#' ,[A1CNBR]
#' ,[GenderFLG]
#' ,[ThirtyDayReadmitFLG]
#' FROM [SAM].[dbo].[HCRDiabetesClinical]
#' "
#'
#' df <- selectData(connection.string, query)
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "regression"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "A1CNBR"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run Random Forest
#' RandomForest <- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' dfDeploy$A1CNBR <- NULL # You won't know the response in production
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "regression"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "A1CNBR"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#' dfOut <- dL$getOutDf()
#'
#' writeData(MSSQLConnectionString = connection.string,
#' df = dfOut,
#' tableName = 'HCRDeployRegressionBASE')
#'
#' print(proc.time() - ptm)
#' }
#'
#' #' #### Classification example pulling from CSV and writing to SQLite ####
#'
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' # Can delete these system.file lines in your work
#' csvfile <- system.file("extdata",
#' "HCRDiabetesClinical.csv",
#' package = "healthcareai")
#'
#' sqliteFile <- system.file("extdata",
#' "unit-test.sqlite",
#' package = "healthcareai")
#'
#' # Read in CSV; replace csvfile with 'path/file'
#' df <- read.csv(file = csvfile,
#' header = TRUE,
#' na.strings = c("NULL", "NA", ""))
#'
#' df$PatientID <- NULL # Only one ID column (ie, PatientEncounterID) is needed remove this column
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run Random Forest
#' RandomForest <- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "classification"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "ThirtyDayReadmitFLG"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#' dfOut <- dL$getOutDf()
#'
#' writeData(SQLiteFileName = sqliteFile,
#' df = dfOut,
#' tableName = 'HCRDeployClassificationBASE')
#'
#' print(proc.time() - ptm)
#'
#' #### Regression example pulling from CSV and writing to SQLite ####
#'
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' # Can delete these system.file lines in your work
#' csvfile <- system.file("extdata",
#' "HCRDiabetesClinical.csv",
#' package = "healthcareai")
#'
#' sqliteFile <- system.file("extdata",
#' "unit-test.sqlite",
#' package = "healthcareai")
#'
#' # Read in CSV; replace csvfile with 'path/file'
#' df <- read.csv(file = csvfile,
#' header = TRUE,
#' na.strings = c("NULL", "NA", ""))
#'
#' df$PatientID <- NULL # Only one ID column (ie, PatientEncounterID) is needed remove this column
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "regression"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "A1CNBR"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run Random Forest
#' RandomForest<- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' dfDeploy$A1CNBR <- NULL # You won't know the response in production
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "regression"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "A1CNBR"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#' dfOut <- dL$getOutDf()
#'
#' writeData(SQLiteFileName = sqliteFile,
#' df = dfOut,
#' tableName = 'HCRDeployRegressionBASE')
#'
#' print(proc.time() - ptm)
RandomForestDeployment <- R6Class("RandomForestDeployment",
#Inheritance
inherit = SupervisedModelDeployment,
#Private members
private = list(
# variables
coefficients = NA,
multiplyRes = NA,
orderedFactors = NA,
predictedValsForUnitTest = NA,
outDf = NA,
fitRF = NA,
predictions = NA,
# functions
# Perform prediction
performPrediction = function() {
if (self$params$type == 'classification') {
private$predictions <- caret::predict.train(object = private$fitRF,
newdata = self$params$df,
type = 'prob')
private$predictions <- private$predictions[,2]
if (isTRUE(self$params$debug)) {
cat('Number of predictions: ', nrow(private$predictions), '\n')
cat('First 10 raw classification probability predictions', '\n')
print(round(private$predictions[1:10],2))
}
} else if (self$params$type == 'regression') {
private$predictions <- caret::predict.train(private$fitRF, newdata = self$params$df)
if (isTRUE(self$params$debug)) {
cat('Rows in regression prediction: ', length(private$predictions), '\n')
cat('First 10 raw regression predictions (with row # first)', '\n')
print(round(private$predictions[1:10],2))
}
}
},
calculateCoeffcients = function() {
# Do semi-manual calc to rank cols by order of importance
coeffTemp <- self$modelInfo$fitLogit$coefficients
if (isTRUE(self$params$debug)) {
cat('Coefficients for the default logit (for ranking var import)', '\n')
print(coeffTemp)
}
private$coefficients <-
coeffTemp[2:length(coeffTemp)] # drop intercept
},
calculateMultiplyRes = function() {
if (isTRUE(self$params$debug)) {
cat("Test set to be multiplied with coefficients", '\n')
cat(str(private$dfTestRaw), '\n')
}
# Apply multiplication of coeff across each row of test set
private$multiplyRes <- sweep(private$dfTestRaw, 2, private$coefficients, `*`)
if (isTRUE(self$params$debug)) {
cat('Data frame after multiplying raw vals by coeffs', '\n')
print(private$multiplyRes[1:10, ])
}
},
calculateOrderedFactors = function() {
# Calculate ordered factors of importance for each row's prediction
private$orderedFactors <- t(sapply
(1:nrow(private$multiplyRes),
function(i)
colnames(private$multiplyRes[order(private$multiplyRes[i, ],
decreasing = TRUE)])))
if (isTRUE(self$params$debug)) {
cat('Data frame after getting column importance ordered', '\n')
print(head(private$orderedFactors, n = 10))
}
},
createDf = function() {
dtStamp <- as.POSIXlt(Sys.time())
# Combine grain.col, prediction, and time to be put back into SAM table
private$outDf <- data.frame(
0, # BindingID
'R', # BindingNM
dtStamp, # LastLoadDTS
private$grainTest, # GrainID
private$predictions, # PredictedProbab
# need three lines for case of single prediction
private$orderedFactors[, 1], # Top 1 Factor
private$orderedFactors[, 2], # Top 2 Factor
private$orderedFactors[, 3]) # Top 3 Factor
predictedResultsName = ""
if (self$params$type == 'classification') {
predictedResultsName = "PredictedProbNBR"
} else if (self$params$type == 'regression') {
predictedResultsName = "PredictedValueNBR"
}
colnames(private$outDf) <- c(
"BindingID",
"BindingNM",
"LastLoadDTS",
self$params$grainCol,
predictedResultsName,
"Factor1TXT",
"Factor2TXT",
"Factor3TXT"
)
# Remove row names so df can be written to DB
# TODO: in writeData function, find how to ignore row names
rownames(private$outDf) <- NULL
if (isTRUE(self$params$debug)) {
cat('Dataframe with predictions:', '\n')
cat(str(private$outDf), '\n')
}
}
),
#Public members
public = list(
#Constructor
#p: new SupervisedModelDeploymentParams class object,
# i.e. p = SupervisedModelDeploymentParams$new()
initialize = function(p) {
super$initialize(p)
if (is.null(self$params$modelName)) {
self$params$modelName = "RF"
}
if (!is.null(p$rfmtry))
self$params$rfmtry <- p$rfmtry
if (!is.null(p$trees))
self$params$trees <- p$trees
},
#Override: deploy the model
deploy = function() {
# Try to load the model
super$loadModelAndInfo(modelFullName = "RandomForest")
private$fitRF <- private$fitObj
private$fitObj <- NULL
# Make sure factor columns have the training data factor levels
super$formatFactorColumns()
# Update self$params$df to reflect the training data factor levels
self$params$df <- private$dfTestRaw
# Predict
private$performPrediction()
# Get dummy data based on factors from develop
super$makeFactorDummies()
# Calculate Coeffcients
private$calculateCoeffcients()
# Calculate MultiplyRes
private$calculateMultiplyRes()
# Calculate Ordered Factors
private$calculateOrderedFactors()
# create dataframe for output
private$createDf()
},
# Surface outDf as attribute for export to Oracle, MySQL, etc
getOutDf = function() {
return(private$outDf)
}
)
)
| /R/random-forest-deployment.R | permissive | DokottaBYO/healthcareai-r | R | false | false | 17,393 | r | #' Deploy a production-ready predictive RandomForest model
#'
#' @description This step allows one to
#' \itemize{
#' \item Load a saved model from \code{\link{RandomForestDevelopment}}
#' \item Run the model against test data to generate predictions
#' \item Push these predictions to SQL Server
#' }
#' @docType class
#' @usage RandomForestDeployment(type, df, grainCol, testWindowCol,
#' predictedCol, impute, debug)
#' @import caret
#' @import doParallel
#' @importFrom R6 R6Class
#' @import ranger
#' @param type The type of model (either 'regression' or 'classification')
#' @param df Dataframe whose columns are used for calc.
#' @param grainCol The dataframe's column that has IDs pertaining to the grain
#' @param testWindowCol (depreciated) All data now receives a prediction
#' @param predictedCol Column that you want to predict. If you're doing
#' classification then this should be Y/N.
#' @param impute For training df, set all-column imputation to F or T.
#' This uses mean replacement for numeric columns
#' and most frequent for factorized columns.
#' F leads to removal of rows containing NULLs.
#' @param debug Provides the user extended output to the console, in order
#' to monitor the calculations throughout. Use T or F.
#' @export
#' @seealso \code{\link{healthcareai}}
#' @examples
#'
#' #### Classification Example using csv data ####
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' # setwd('C:/Yourscriptlocation/Useforwardslashes') # Uncomment if using csv
#'
#' # Can delete this line in your work
#' csvfile <- system.file("extdata",
#' "HCRDiabetesClinical.csv",
#' package = "healthcareai")
#'
#' # Replace csvfile with 'path/file'
#' df <- read.csv(file = csvfile,
#' header = TRUE,
#' na.strings = c("NULL", "NA", ""))
#'
#' df$PatientID <- NULL # Only one ID column (ie, PatientEncounterID) is needed remove this column
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run RandomForest
#' RandomForest <- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "classification"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "ThirtyDayReadmitFLG"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#'
#' dfOut <- dL$getOutDf()
#' head(dfOut)
#' # Write to CSV (or JSON, MySQL, etc) using plain R syntax
#' # write.csv(dfOut,'path/predictionsfile.csv')
#'
#' print(proc.time() - ptm)
#'
#' \donttest{
#' #### Classification example using SQL Server data ####
#' # This example requires you to first create a table in SQL Server
#' # If you prefer to not use SAMD, execute this in SSMS to create output table:
#' # CREATE TABLE dbo.HCRDeployClassificationBASE(
#' # BindingID float, BindingNM varchar(255), LastLoadDTS datetime2,
#' # PatientEncounterID int, <--change to match inputID
#' # PredictedProbNBR decimal(38, 2),
#' # Factor1TXT varchar(255), Factor2TXT varchar(255), Factor3TXT varchar(255)
#' # )
#'
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' connection.string <- "
#' driver={SQL Server};
#' server=localhost;
#' database=SAM;
#' trusted_connection=true
#' "
#'
#' query <- "
#' SELECT
#' [PatientEncounterID] --Only need one ID column for random forest
#' ,[SystolicBPNBR]
#' ,[LDLNBR]
#' ,[A1CNBR]
#' ,[GenderFLG]
#' ,[ThirtyDayReadmitFLG]
#' FROM [SAM].[dbo].[HCRDiabetesClinical]
#' "
#'
#' df <- selectData(connection.string, query)
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run RandomForest
#' RandomForest <- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "classification"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "ThirtyDayReadmitFLG"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#' dfOut <- dL$getOutDf()
#'
#' writeData(MSSQLConnectionString = connection.string,
#' df = dfOut,
#' tableName = 'HCRDeployClassificationBASE')
#'
#' print(proc.time() - ptm)
#' }
#'
#' \donttest{
#' #### Regression Example using SQL Server data ####
#' # This example requires you to first create a table in SQL Server
#' # If you prefer to not use SAMD, execute this in SSMS to create output table:
#' # CREATE TABLE dbo.HCRDeployRegressionBASE(
#' # BindingID float, BindingNM varchar(255), LastLoadDTS datetime2,
#' # PatientEncounterID int, <--change to match inputID
#' # PredictedValueNBR decimal(38, 2),
#' # Factor1TXT varchar(255), Factor2TXT varchar(255), Factor3TXT varchar(255)
#' # )
#'
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' connection.string <- "
#' driver={SQL Server};
#' server=localhost;
#' database=SAM;
#' trusted_connection=true
#' "
#'
#' query <- "
#' SELECT
#' [PatientEncounterID] --Only need one ID column for random forest
#' ,[SystolicBPNBR]
#' ,[LDLNBR]
#' ,[A1CNBR]
#' ,[GenderFLG]
#' ,[ThirtyDayReadmitFLG]
#' FROM [SAM].[dbo].[HCRDiabetesClinical]
#' "
#'
#' df <- selectData(connection.string, query)
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "regression"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "A1CNBR"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run Random Forest
#' RandomForest <- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' dfDeploy$A1CNBR <- NULL # You won't know the response in production
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "regression"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "A1CNBR"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#' dfOut <- dL$getOutDf()
#'
#' writeData(MSSQLConnectionString = connection.string,
#' df = dfOut,
#' tableName = 'HCRDeployRegressionBASE')
#'
#' print(proc.time() - ptm)
#' }
#'
#' #' #### Classification example pulling from CSV and writing to SQLite ####
#'
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' # Can delete these system.file lines in your work
#' csvfile <- system.file("extdata",
#' "HCRDiabetesClinical.csv",
#' package = "healthcareai")
#'
#' sqliteFile <- system.file("extdata",
#' "unit-test.sqlite",
#' package = "healthcareai")
#'
#' # Read in CSV; replace csvfile with 'path/file'
#' df <- read.csv(file = csvfile,
#' header = TRUE,
#' na.strings = c("NULL", "NA", ""))
#'
#' df$PatientID <- NULL # Only one ID column (ie, PatientEncounterID) is needed remove this column
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "classification"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "ThirtyDayReadmitFLG"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run Random Forest
#' RandomForest <- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "classification"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "ThirtyDayReadmitFLG"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#' dfOut <- dL$getOutDf()
#'
#' writeData(SQLiteFileName = sqliteFile,
#' df = dfOut,
#' tableName = 'HCRDeployClassificationBASE')
#'
#' print(proc.time() - ptm)
#'
#' #### Regression example pulling from CSV and writing to SQLite ####
#'
#' ## 1. Loading data and packages.
#' ptm <- proc.time()
#' library(healthcareai)
#'
#' # Can delete these system.file lines in your work
#' csvfile <- system.file("extdata",
#' "HCRDiabetesClinical.csv",
#' package = "healthcareai")
#'
#' sqliteFile <- system.file("extdata",
#' "unit-test.sqlite",
#' package = "healthcareai")
#'
#' # Read in CSV; replace csvfile with 'path/file'
#' df <- read.csv(file = csvfile,
#' header = TRUE,
#' na.strings = c("NULL", "NA", ""))
#'
#' df$PatientID <- NULL # Only one ID column (ie, PatientEncounterID) is needed remove this column
#'
#' # Save a dataframe for validation later on
#' dfDeploy <- df[951:1000,]
#'
#' ## 2. Train and save the model using DEVELOP
#' print('Historical, development data:')
#' str(df)
#'
#' set.seed(42)
#' p <- SupervisedModelDevelopmentParams$new()
#' p$df <- df
#' p$type <- "regression"
#' p$impute <- TRUE
#' p$grainCol <- "PatientEncounterID"
#' p$predictedCol <- "A1CNBR"
#' p$debug <- FALSE
#' p$cores <- 1
#'
#' # Run Random Forest
#' RandomForest<- RandomForestDevelopment$new(p)
#' RandomForest$run()
#'
#' ## 3. Load saved model and use DEPLOY to generate predictions.
#' dfDeploy$A1CNBR <- NULL # You won't know the response in production
#' print('Fake production data:')
#' str(dfDeploy)
#'
#' p2 <- SupervisedModelDeploymentParams$new()
#' p2$type <- "regression"
#' p2$df <- dfDeploy
#' p2$grainCol <- "PatientEncounterID"
#' p2$predictedCol <- "A1CNBR"
#' p2$impute <- TRUE
#' p2$debug <- FALSE
#' p2$cores <- 1
#'
#' dL <- RandomForestDeployment$new(p2)
#' dL$deploy()
#' dfOut <- dL$getOutDf()
#'
#' writeData(SQLiteFileName = sqliteFile,
#' df = dfOut,
#' tableName = 'HCRDeployRegressionBASE')
#'
#' print(proc.time() - ptm)
RandomForestDeployment <- R6Class("RandomForestDeployment",
#Inheritance
inherit = SupervisedModelDeployment,
#Private members
private = list(
# variables
coefficients = NA,
multiplyRes = NA,
orderedFactors = NA,
predictedValsForUnitTest = NA,
outDf = NA,
fitRF = NA,
predictions = NA,
# functions
# Perform prediction
performPrediction = function() {
if (self$params$type == 'classification') {
private$predictions <- caret::predict.train(object = private$fitRF,
newdata = self$params$df,
type = 'prob')
private$predictions <- private$predictions[,2]
if (isTRUE(self$params$debug)) {
cat('Number of predictions: ', nrow(private$predictions), '\n')
cat('First 10 raw classification probability predictions', '\n')
print(round(private$predictions[1:10],2))
}
} else if (self$params$type == 'regression') {
private$predictions <- caret::predict.train(private$fitRF, newdata = self$params$df)
if (isTRUE(self$params$debug)) {
cat('Rows in regression prediction: ', length(private$predictions), '\n')
cat('First 10 raw regression predictions (with row # first)', '\n')
print(round(private$predictions[1:10],2))
}
}
},
calculateCoeffcients = function() {
# Do semi-manual calc to rank cols by order of importance
coeffTemp <- self$modelInfo$fitLogit$coefficients
if (isTRUE(self$params$debug)) {
cat('Coefficients for the default logit (for ranking var import)', '\n')
print(coeffTemp)
}
private$coefficients <-
coeffTemp[2:length(coeffTemp)] # drop intercept
},
calculateMultiplyRes = function() {
if (isTRUE(self$params$debug)) {
cat("Test set to be multiplied with coefficients", '\n')
cat(str(private$dfTestRaw), '\n')
}
# Apply multiplication of coeff across each row of test set
private$multiplyRes <- sweep(private$dfTestRaw, 2, private$coefficients, `*`)
if (isTRUE(self$params$debug)) {
cat('Data frame after multiplying raw vals by coeffs', '\n')
print(private$multiplyRes[1:10, ])
}
},
calculateOrderedFactors = function() {
# Calculate ordered factors of importance for each row's prediction
private$orderedFactors <- t(sapply
(1:nrow(private$multiplyRes),
function(i)
colnames(private$multiplyRes[order(private$multiplyRes[i, ],
decreasing = TRUE)])))
if (isTRUE(self$params$debug)) {
cat('Data frame after getting column importance ordered', '\n')
print(head(private$orderedFactors, n = 10))
}
},
createDf = function() {
dtStamp <- as.POSIXlt(Sys.time())
# Combine grain.col, prediction, and time to be put back into SAM table
private$outDf <- data.frame(
0, # BindingID
'R', # BindingNM
dtStamp, # LastLoadDTS
private$grainTest, # GrainID
private$predictions, # PredictedProbab
# need three lines for case of single prediction
private$orderedFactors[, 1], # Top 1 Factor
private$orderedFactors[, 2], # Top 2 Factor
private$orderedFactors[, 3]) # Top 3 Factor
predictedResultsName = ""
if (self$params$type == 'classification') {
predictedResultsName = "PredictedProbNBR"
} else if (self$params$type == 'regression') {
predictedResultsName = "PredictedValueNBR"
}
colnames(private$outDf) <- c(
"BindingID",
"BindingNM",
"LastLoadDTS",
self$params$grainCol,
predictedResultsName,
"Factor1TXT",
"Factor2TXT",
"Factor3TXT"
)
# Remove row names so df can be written to DB
# TODO: in writeData function, find how to ignore row names
rownames(private$outDf) <- NULL
if (isTRUE(self$params$debug)) {
cat('Dataframe with predictions:', '\n')
cat(str(private$outDf), '\n')
}
}
),
#Public members
public = list(
#Constructor
#p: new SupervisedModelDeploymentParams class object,
# i.e. p = SupervisedModelDeploymentParams$new()
initialize = function(p) {
super$initialize(p)
if (is.null(self$params$modelName)) {
self$params$modelName = "RF"
}
if (!is.null(p$rfmtry))
self$params$rfmtry <- p$rfmtry
if (!is.null(p$trees))
self$params$trees <- p$trees
},
#Override: deploy the model
deploy = function() {
# Try to load the model
super$loadModelAndInfo(modelFullName = "RandomForest")
private$fitRF <- private$fitObj
private$fitObj <- NULL
# Make sure factor columns have the training data factor levels
super$formatFactorColumns()
# Update self$params$df to reflect the training data factor levels
self$params$df <- private$dfTestRaw
# Predict
private$performPrediction()
# Get dummy data based on factors from develop
super$makeFactorDummies()
# Calculate Coeffcients
private$calculateCoeffcients()
# Calculate MultiplyRes
private$calculateMultiplyRes()
# Calculate Ordered Factors
private$calculateOrderedFactors()
# create dataframe for output
private$createDf()
},
# Surface outDf as attribute for export to Oracle, MySQL, etc
getOutDf = function() {
return(private$outDf)
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{generator_next}
\alias{generator_next}
\title{Retreive the next item from a generator}
\usage{
generator_next(generator, completed = NULL)
}
\arguments{
\item{generator}{Generator}
\item{completed}{Sentinel value to return from \code{generator_next()} if the iteration
completes (defaults to \code{NULL} but can be any R value you specify).}
}
\description{
Use to retrieve items from generators (e.g. \code{\link[=image_data_generator]{image_data_generator()}}). Will return
either the next item or \code{NULL} if there are no more items.
}
| /man/generator_next.Rd | no_license | rdrr1990/keras | R | false | true | 643 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{generator_next}
\alias{generator_next}
\title{Retreive the next item from a generator}
\usage{
generator_next(generator, completed = NULL)
}
\arguments{
\item{generator}{Generator}
\item{completed}{Sentinel value to return from \code{generator_next()} if the iteration
completes (defaults to \code{NULL} but can be any R value you specify).}
}
\description{
Use to retrieve items from generators (e.g. \code{\link[=image_data_generator]{image_data_generator()}}). Will return
either the next item or \code{NULL} if there are no more items.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRANonGRAN.R
\name{RepoToList}
\alias{RepoToList}
\title{Transform a GRANRepository object into a list}
\usage{
RepoToList(repo)
}
\arguments{
\item{repo}{repository}
}
\value{
a list suitable for use with RepoFromList
}
\description{
Utility to transform a GRANRepository object into a list
so that repos saved using GRANBase can be loaded by GRAN
without requiring GRANBase
}
| /man/RepoToList.Rd | no_license | mkearney/gRAN | R | false | true | 456 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRANonGRAN.R
\name{RepoToList}
\alias{RepoToList}
\title{Transform a GRANRepository object into a list}
\usage{
RepoToList(repo)
}
\arguments{
\item{repo}{repository}
}
\value{
a list suitable for use with RepoFromList
}
\description{
Utility to transform a GRANRepository object into a list
so that repos saved using GRANBase can be loaded by GRAN
without requiring GRANBase
}
|
# --------------------------------------
# Author: Andreas Alfons
# Erasmus Universiteit Rotterdam
# --------------------------------------
#' @export
coef.ladlasso <- function(object, zeros = TRUE, tol = .Machine$double.eps^0.5,
...) {
## extract coefficients
coef <- object$coefficients
## if requested, omit zero coefficients
if(!isTRUE(zeros)) {
if(is.null(dim(coef)))
coef <- coef[abs(coef) > tol]
else {
keep <- apply(abs(coef) > tol, 1, any)
coef <- coef[keep, , drop = FALSE]
}
}
## return coefficients
coef
}
#' @export
coef.lasso <- function(object, zeros = TRUE, ...) {
## extract coefficients
coef <- object$coefficients
## if requested, omit zero coefficients
if(!isTRUE(zeros)) {
if(is.null(dim(coef)))
coef <- coef[coef != 0]
else {
keep <- apply(coef != 0, 1, any)
coef <- coef[keep, , drop = FALSE]
}
}
## return coefficients
coef
}
| /R/coef.R | no_license | aalfons/perryExamples | R | false | false | 971 | r | # --------------------------------------
# Author: Andreas Alfons
# Erasmus Universiteit Rotterdam
# --------------------------------------
#' @export
coef.ladlasso <- function(object, zeros = TRUE, tol = .Machine$double.eps^0.5,
...) {
## extract coefficients
coef <- object$coefficients
## if requested, omit zero coefficients
if(!isTRUE(zeros)) {
if(is.null(dim(coef)))
coef <- coef[abs(coef) > tol]
else {
keep <- apply(abs(coef) > tol, 1, any)
coef <- coef[keep, , drop = FALSE]
}
}
## return coefficients
coef
}
#' @export
coef.lasso <- function(object, zeros = TRUE, ...) {
## extract coefficients
coef <- object$coefficients
## if requested, omit zero coefficients
if(!isTRUE(zeros)) {
if(is.null(dim(coef)))
coef <- coef[coef != 0]
else {
keep <- apply(coef != 0, 1, any)
coef <- coef[keep, , drop = FALSE]
}
}
## return coefficients
coef
}
|
# api de datos economicos
# @elcontrafactual
# librerias
library(httr)
library(jsonlite)
library(rjson)
library(lubridate)
# pib
url <- 'https://www.inegi.org.mx/app/api/indicadores/desarrolladores/jsonxml/INDICATOR/493621/es/0700/false/BIE/2.0/dafd0053-9a71-1cb4-f58d-4a7071445df0?type=json'
respuesta <- GET(url)
(datosGenerales <- content(respuesta,"text"))
(flujoDatos <- paste(datosGenerales,collapse = " "))
(flujoDatos <- fromJSON(flujoDatos))
(flujoDatos <- flujoDatos$Series)
(flujoDatos <- flujoDatos[[1]]$OBSERVATIONS)
View(flujoDatos)
datos <- 0
periodo <- 0
for (i in 1:length(flujoDatos)){
datos[i]<-flujoDatos[[i]]$OBS_VALUE
periodo[i] <- flujoDatos[[i]]$TIME_PERIOD
}
datos<-as.numeric(datos)
periodo <- as.Date(periodo)
df <- data.frame(periodo,datos)
as.Date(periodo)
print(mean(datos))
| /inegi_api.R | no_license | iamarin/api_economico | R | false | false | 818 | r | # api de datos economicos
# @elcontrafactual
# librerias
library(httr)
library(jsonlite)
library(rjson)
library(lubridate)
# pib
url <- 'https://www.inegi.org.mx/app/api/indicadores/desarrolladores/jsonxml/INDICATOR/493621/es/0700/false/BIE/2.0/dafd0053-9a71-1cb4-f58d-4a7071445df0?type=json'
respuesta <- GET(url)
(datosGenerales <- content(respuesta,"text"))
(flujoDatos <- paste(datosGenerales,collapse = " "))
(flujoDatos <- fromJSON(flujoDatos))
(flujoDatos <- flujoDatos$Series)
(flujoDatos <- flujoDatos[[1]]$OBSERVATIONS)
View(flujoDatos)
datos <- 0
periodo <- 0
for (i in 1:length(flujoDatos)){
datos[i]<-flujoDatos[[i]]$OBS_VALUE
periodo[i] <- flujoDatos[[i]]$TIME_PERIOD
}
datos<-as.numeric(datos)
periodo <- as.Date(periodo)
df <- data.frame(periodo,datos)
as.Date(periodo)
print(mean(datos))
|
/Assignment5_2.R | no_license | subasish/Statistical-Computing-Course | R | false | false | 402 | r | ||
Dtable <-
function(Dmatrix, PointType = NULL, PointWeight = NULL)
{
# Dmatrix must be a distance matrix
if (is.matrix(Dmatrix)) {
if (nrow(Dmatrix) != ncol(Dmatrix))
stop("Dmatrix should be a square matrix.")
if (any(is.na(Dmatrix)))
stop("NAs are not allowed in the distance matrix.")
if (any(Dmatrix < 0))
stop("negative values are not allowed in the distance matrix.")
if (any(diag(Dmatrix) > 0))
stop("diagonal values of the distance matrix must be 0.")
} else {
stop("Dmatrix must be a matrix.")
}
# Get PointType
if (is.null(PointType)) {
# Point types should be in the row or column names of the matrix
if (!is.null(rownames(Dmatrix))) {
PointType <- rownames(Dmatrix)
if (!is.null(colnames(Dmatrix))) {
# Check row and col names are identical
if (colnames(Dmatrix) != rownames(Dmatrix))
stop("row and column names of the distance matrix are different.")
}
} else {
if (!is.null(colnames(Dmatrix)))
PointType <- colnames(Dmatrix)
}
}
# Check PointType
if (any(is.null(PointType)))
stop("NULL values are not allowed in the point types.")
if (any(is.na(PointType)))
stop("NAs are not allowed in the point types.")
if (length(PointType) != nrow(Dmatrix))
stop("The vector of point types must have the same size as Dmatrix.")
PointType <- as.factor(PointType)
# Get PointWeight
if (is.null(PointWeight)) {
PointWeight <- rep(1, length(PointType))
} else {
if (any(is.na(PointWeight)))
stop("NAs are not allowed in the point weights.")
if (any(PointWeight <= 0))
stop("Point weights must be strictly positive.")
if (length(PointWeight) != nrow(Dmatrix))
stop("The vector of point weights must have the same size as Dmatrix.")
}
# Build the object
Dt <- list(Dmatrix=Dmatrix,
n=nrow(Dmatrix),
marks=list(PointType=PointType, PointWeight=PointWeight)
)
class(Dt) <- "Dtable"
return (Dt)
}
| /R/Dtable.R | no_license | EricMarcon/dbmss | R | false | false | 2,104 | r | Dtable <-
function(Dmatrix, PointType = NULL, PointWeight = NULL)
{
# Dmatrix must be a distance matrix
if (is.matrix(Dmatrix)) {
if (nrow(Dmatrix) != ncol(Dmatrix))
stop("Dmatrix should be a square matrix.")
if (any(is.na(Dmatrix)))
stop("NAs are not allowed in the distance matrix.")
if (any(Dmatrix < 0))
stop("negative values are not allowed in the distance matrix.")
if (any(diag(Dmatrix) > 0))
stop("diagonal values of the distance matrix must be 0.")
} else {
stop("Dmatrix must be a matrix.")
}
# Get PointType
if (is.null(PointType)) {
# Point types should be in the row or column names of the matrix
if (!is.null(rownames(Dmatrix))) {
PointType <- rownames(Dmatrix)
if (!is.null(colnames(Dmatrix))) {
# Check row and col names are identical
if (colnames(Dmatrix) != rownames(Dmatrix))
stop("row and column names of the distance matrix are different.")
}
} else {
if (!is.null(colnames(Dmatrix)))
PointType <- colnames(Dmatrix)
}
}
# Check PointType
if (any(is.null(PointType)))
stop("NULL values are not allowed in the point types.")
if (any(is.na(PointType)))
stop("NAs are not allowed in the point types.")
if (length(PointType) != nrow(Dmatrix))
stop("The vector of point types must have the same size as Dmatrix.")
PointType <- as.factor(PointType)
# Get PointWeight
if (is.null(PointWeight)) {
PointWeight <- rep(1, length(PointType))
} else {
if (any(is.na(PointWeight)))
stop("NAs are not allowed in the point weights.")
if (any(PointWeight <= 0))
stop("Point weights must be strictly positive.")
if (length(PointWeight) != nrow(Dmatrix))
stop("The vector of point weights must have the same size as Dmatrix.")
}
# Build the object
Dt <- list(Dmatrix=Dmatrix,
n=nrow(Dmatrix),
marks=list(PointType=PointType, PointWeight=PointWeight)
)
class(Dt) <- "Dtable"
return (Dt)
}
|
library(gridExtra)
library(ggrepel)
library(ggplot2)
library(scales)
library(ggtext)
library(gplots)
library(argparser, quietly=TRUE)
## Create a parser
p <- arg_parser("chunk size")
## Add command line arguments
p <- add_argument(p, "--st", help="survival type")
p <- add_argument(p, "--chunkN", help="chunk N")
p <- add_argument(p, "--chunkI", help="chunk I")
## Parse the command line arguments
argv <- parse_args(p)
## as.numeric transform
survType <- argv$st
chunkN <- as.numeric(argv$chunkN)
chunkI <- as.numeric(argv$chunkI)
fileDir <- "/home/webdata/spladder/"
outDir <- "/home/webdata/spladder/pancox/"
# file include each survival data of all cancer types
fileName <- paste(fileDir,"spladder_ase_all_cancertype_",survType,".rds",sep="")
infoAS <- readRDS(file=fileName)
aseAll <- read.csv(file="/home/u1357/webdata/spladder/newAdd/spladder_pancan_info.csv")
aseAll$idType <- paste(aseAll$Gene_Symbol,aseAll$Splice_Event,sep="_")
aseAll <- aseAll[,c("Splice_Event","idType")]
names(aseAll) <- c("SpliceEvent","idType")
aseAll <- aseAll[order(aseAll$SpliceEvent,decreasing = F),]
chunkSize <- nrow(aseAll)%/%chunkN
chunkLast <- chunkSize + nrow(aseAll)%%chunkN
aseAll$chunk <- c(rep(1:(chunkN-1),each=chunkSize),rep(chunkN,chunkLast))
aseAll <- aseAll[aseAll$chunk==chunkI,]
ase <- aseAll$SpliceEvent
infoAS <- infoAS[infoAS$SpliceEvent%in%ase,]
panx <- paste("Pan",survType,sep="")
# def function
PanCox <- function(SpliceEvent){
cat("\n",SpliceEvent)
infoX <- infoAS[infoAS$SpliceEvent==SpliceEvent,]
idType <- aseAll[aseAll$SpliceEvent==SpliceEvent,"idType"]
outFilePath <- paste(outDir,SpliceEvent,"-",panx,".pdf",sep = "")
## coxph
dfmcol <- c("SpliceEvent","cancerType","cutmed","pvalHRmed","bhHRmed","HRmed","nMinHRmed","nEventHRmed","idType")
dffcol <- c("SpliceEvent","cancerType","cutfit","pvalHRfit","bhHRfit","HRfit","nMinHRfit","nEventHRfit","idType")
dfm <- infoX[,dfmcol]
dfm <- na.omit(dfm)
dfm <- dfm[dfm$nMinHRmed>10 & dfm$nEventHRmed>5,]
dfm$logHRmed <- log2(dfm$HRmed)
dfm$logpvalHRmed <- ifelse(dfm$pvalHRmed==0,17,-log10(dfm$pvalHRmed))
dff <- infoX[,dffcol]
names(dff) <- dfmcol
dff <- na.omit(dff)
dff <- dff[dff$nMinHRmed>10 & dff$nEventHRmed>5,]
dff$logHRmed <- log2(dff$HRmed)
dff$logpvalHRmed <- ifelse(dff$pvalHRmed==0,17,-log10(dff$pvalHRmed))
## def plot
coxplot <- function(df,type,colors){
title <- idType
if (type=="med"){
Xtitle <- paste("log2(Hazard Ratio), ",survType,"\nMedian cutoff",sep="")
}else if(type=="fit"){
Xtitle <- paste("log2(Hazard Ratio), ",survType,"\nOptimal cutoff",sep="")
}
ggplot(df, aes(logHRmed,logpvalHRmed,fill=cancerType,label = cancerType)) +
geom_text_repel(size=3) +
geom_point(aes(size = cutmed),shape = 21, stroke=0.2, alpha=0.5, show.legend = T) +
scale_size_continuous(limits=sizelimits, breaks=sizebreaks)+
theme_bw(base_size = 9) +
guides(fill = F,size=guide_legend(title="Cutoff")) +
scale_fill_manual(values=colors) +
scale_y_continuous(limits=Ylimits,breaks=Ybreaks,labels=Ylabels,position="left") +
scale_x_continuous(limits=Xlimits,breaks=Xbreaks,labels=Xlabels,position="bottom") +
labs(x=Xtitle,y="-log10(P-value)", title = title) +
geom_hline(yintercept = cutoff, size = 0.4, color="#dd7d6a",linetype = "dashed") +
theme(title=element_text(size=9),
#axis.text.x = element_text(size=9,color=Xcolor),
#axis.text.y = element_text(size=9,color=Ycolor),
axis.text.x = element_markdown(size=9,color=Xcolor),
axis.text.y = element_markdown(size=9,color=Ycolor),
axis.title = element_text(size=9),
legend.title = element_text(size=9))
}
## X-axis
df <- rbind(dfm,dff)
df$logHRmed <- log2(df$HRmed)
maxhr <- max(abs(df$logHRmed))
n = floor(-log(maxhr, 10) + 1)
maxval <- round(maxhr,n)
Xlimits <- c(-max(maxhr,maxval),max(maxhr,maxval))
Xbreaks <- c(-maxval,-maxval/2,0,maxval/2,maxval)
Xquat <- maxval/2
# def function for x-axis transform
Xtransf <- function(data){
xCut <- 2
if(abs(data)<=xCut){
re <- data*Xquat/xCut
}else if(data>xCut){
re <- (1+(data-xCut)/(maxval-xCut))*Xquat
}else if(data< -xCut){
re <- (-1+(data-(-xCut))/(maxval-xCut))*Xquat
}
return(re)
}
Xtf <- length(dfm$logHRmed[dfm$logHRmed<=2])>1 | length(dff$logHRmed[dff$logHRmed<=2])>1
if (maxval>10 & Xtf){
dfm$logHRmed <- sapply(dfm$logHRmed,Xtransf)
dff$logHRmed <- sapply(dff$logHRmed,Xtransf)
Xlabels <- c(-maxval,-2,0,2,maxval)
Xcolor <- c("black","red","black","red","black")
}else{
Xlabels <- Xbreaks
Xcolor <- rep("black",length(Xbreaks))
}
## Y-axis
# min(p)<10-16
df$logpvalHRmed <- ifelse(df$pvalHRmed==0,16,-log10(df$pvalHRmed))
maxbh <- max(df$logpvalHRmed)
y = floor(-log(maxbh, 10) + 1)
Ymaxval <- ifelse(round(maxbh,y)<2,2,round(maxbh,y))
Ylimits <- c(0,max(maxbh,Ymaxval))
if (maxbh==16){
Ybreaks <- c(0,Ymaxval*0.25,Ymaxval*0.5,Ymaxval*0.75,16,Ymaxval)
Ylabels <- c(Ybreaks[1:5],"Inf")
Ycolor <- c("black","black","black","black","black","red")
cutoff <- -log(0.05,10)
}else{
Ybreaks <- c(0,Ymaxval*0.25,Ymaxval*0.5,Ymaxval*0.75,Ymaxval)
Ylabels <- Ybreaks
Ycolor <- rep("black",length(Ybreaks))
cutoff <- -log(0.05,10)
}
## point size - psi cutoff
maxsize <- max(df$cutmed)
minsize <- min(df$cutmed)
range <- maxsize-minsize
if (range==0){
sizelimits = c(minsize,maxsize)
sizebreaks = c(minsize,maxsize)
}else if (range<0.05){
sizelimits = c(minsize,maxsize)
sizebreaks = c(minsize,maxsize)
}else{
x = ceiling(-log(maxsize, 10) + 1)
n = ceiling(-log(minsize, 10) + 1)
maxval <- round(maxsize,x)
minval <- round(minsize,n)
sizelimits = c(min(minval,minsize),max(maxval,maxsize))
if(minval==0){
sizebreaks <- round(seq(minval,maxval,(maxval-minval)/4)[-1],2)
}else{
sizebreaks <- round(seq(minval,maxval,(maxval-minval)/4),2)
}
}
## color
dfcol <- data.frame(table(df$cancerType))
if(nrow(dfcol)>0){
colors <- hue_pal()(nrow(dfcol))
colors1 <- colors[dfcol$Var1%in%dfm$cancerType]
colors2 <- colors[dfcol$Var1%in%dff$cancerType]
}
nCancer=3
if (nrow(dfm)>=nCancer & nrow(dff)>=nCancer){
width=10
p1 <- coxplot(dfm,"med",colors1)
p2 <- coxplot(dff,"fit",colors2)
pdf(file=outFilePath,width=width,height=4)
suppressWarnings(grid.arrange(p1,p2, ncol=2))
dev.off()
}else if (nrow(dfm)>=nCancer & nrow(dff)<nCancer){
width=5
p1 <- coxplot(dfm,"med",colors1)
pdf(file=outFilePath,width=width,height=4)
suppressWarnings(print(p1))
dev.off()
}else if (nrow(dfm)<nCancer & nrow(dff)>=nCancer){
width=5
p2 <- coxplot(dff,"fit",colors2)
pdf(file=outFilePath,width=width,height=4)
suppressWarnings(print(p2))
dev.off()
}else if (nrow(dfm)<nCancer & nrow(dff)<nCancer){
width=5
pdf(file=outFilePath,width=width,height=4)
temptext1 <- paste("! ",SpliceEvent,"\nNo data presented.\n","Try another one!",sep="")
textplot(temptext1,valign="top", cex=1.3, halign= "center",col="grey")
dev.off()
}
}
PanNon <- function(SpliceEvent){
outFilePath <- paste(outDir,SpliceEvent,"-",panx,".pdf",sep = "")
width=5
pdf(file=outFilePath,width=width,height=4)
temptext1 <- paste("! ",SpliceEvent,"\nNo data presented.\n","Try another one!",sep="")
textplot(temptext1,valign="top", cex=1.3, halign= "center",col="grey")
dev.off()
}
sapply(ase,PanCox)
if (survType=="OS"){
aseNon <- ase[!ase%in%infoAS$SpliceEvent]
sapply(aseNon,PanNon)
}
| /v2/spladder_pancox.R | no_license | buzhizhang121/OncoSplicing | R | false | false | 7,709 | r | library(gridExtra)
library(ggrepel)
library(ggplot2)
library(scales)
library(ggtext)
library(gplots)
library(argparser, quietly=TRUE)
## Create a parser
p <- arg_parser("chunk size")
## Add command line arguments
p <- add_argument(p, "--st", help="survival type")
p <- add_argument(p, "--chunkN", help="chunk N")
p <- add_argument(p, "--chunkI", help="chunk I")
## Parse the command line arguments
argv <- parse_args(p)
## as.numeric transform
survType <- argv$st
chunkN <- as.numeric(argv$chunkN)
chunkI <- as.numeric(argv$chunkI)
fileDir <- "/home/webdata/spladder/"
outDir <- "/home/webdata/spladder/pancox/"
# file include each survival data of all cancer types
fileName <- paste(fileDir,"spladder_ase_all_cancertype_",survType,".rds",sep="")
infoAS <- readRDS(file=fileName)
aseAll <- read.csv(file="/home/u1357/webdata/spladder/newAdd/spladder_pancan_info.csv")
aseAll$idType <- paste(aseAll$Gene_Symbol,aseAll$Splice_Event,sep="_")
aseAll <- aseAll[,c("Splice_Event","idType")]
names(aseAll) <- c("SpliceEvent","idType")
aseAll <- aseAll[order(aseAll$SpliceEvent,decreasing = F),]
chunkSize <- nrow(aseAll)%/%chunkN
chunkLast <- chunkSize + nrow(aseAll)%%chunkN
aseAll$chunk <- c(rep(1:(chunkN-1),each=chunkSize),rep(chunkN,chunkLast))
aseAll <- aseAll[aseAll$chunk==chunkI,]
ase <- aseAll$SpliceEvent
infoAS <- infoAS[infoAS$SpliceEvent%in%ase,]
panx <- paste("Pan",survType,sep="")
# def function
PanCox <- function(SpliceEvent){
cat("\n",SpliceEvent)
infoX <- infoAS[infoAS$SpliceEvent==SpliceEvent,]
idType <- aseAll[aseAll$SpliceEvent==SpliceEvent,"idType"]
outFilePath <- paste(outDir,SpliceEvent,"-",panx,".pdf",sep = "")
## coxph
dfmcol <- c("SpliceEvent","cancerType","cutmed","pvalHRmed","bhHRmed","HRmed","nMinHRmed","nEventHRmed","idType")
dffcol <- c("SpliceEvent","cancerType","cutfit","pvalHRfit","bhHRfit","HRfit","nMinHRfit","nEventHRfit","idType")
dfm <- infoX[,dfmcol]
dfm <- na.omit(dfm)
dfm <- dfm[dfm$nMinHRmed>10 & dfm$nEventHRmed>5,]
dfm$logHRmed <- log2(dfm$HRmed)
dfm$logpvalHRmed <- ifelse(dfm$pvalHRmed==0,17,-log10(dfm$pvalHRmed))
dff <- infoX[,dffcol]
names(dff) <- dfmcol
dff <- na.omit(dff)
dff <- dff[dff$nMinHRmed>10 & dff$nEventHRmed>5,]
dff$logHRmed <- log2(dff$HRmed)
dff$logpvalHRmed <- ifelse(dff$pvalHRmed==0,17,-log10(dff$pvalHRmed))
## def plot
coxplot <- function(df,type,colors){
title <- idType
if (type=="med"){
Xtitle <- paste("log2(Hazard Ratio), ",survType,"\nMedian cutoff",sep="")
}else if(type=="fit"){
Xtitle <- paste("log2(Hazard Ratio), ",survType,"\nOptimal cutoff",sep="")
}
ggplot(df, aes(logHRmed,logpvalHRmed,fill=cancerType,label = cancerType)) +
geom_text_repel(size=3) +
geom_point(aes(size = cutmed),shape = 21, stroke=0.2, alpha=0.5, show.legend = T) +
scale_size_continuous(limits=sizelimits, breaks=sizebreaks)+
theme_bw(base_size = 9) +
guides(fill = F,size=guide_legend(title="Cutoff")) +
scale_fill_manual(values=colors) +
scale_y_continuous(limits=Ylimits,breaks=Ybreaks,labels=Ylabels,position="left") +
scale_x_continuous(limits=Xlimits,breaks=Xbreaks,labels=Xlabels,position="bottom") +
labs(x=Xtitle,y="-log10(P-value)", title = title) +
geom_hline(yintercept = cutoff, size = 0.4, color="#dd7d6a",linetype = "dashed") +
theme(title=element_text(size=9),
#axis.text.x = element_text(size=9,color=Xcolor),
#axis.text.y = element_text(size=9,color=Ycolor),
axis.text.x = element_markdown(size=9,color=Xcolor),
axis.text.y = element_markdown(size=9,color=Ycolor),
axis.title = element_text(size=9),
legend.title = element_text(size=9))
}
## X-axis
df <- rbind(dfm,dff)
df$logHRmed <- log2(df$HRmed)
maxhr <- max(abs(df$logHRmed))
n = floor(-log(maxhr, 10) + 1)
maxval <- round(maxhr,n)
Xlimits <- c(-max(maxhr,maxval),max(maxhr,maxval))
Xbreaks <- c(-maxval,-maxval/2,0,maxval/2,maxval)
Xquat <- maxval/2
# def function for x-axis transform
Xtransf <- function(data){
xCut <- 2
if(abs(data)<=xCut){
re <- data*Xquat/xCut
}else if(data>xCut){
re <- (1+(data-xCut)/(maxval-xCut))*Xquat
}else if(data< -xCut){
re <- (-1+(data-(-xCut))/(maxval-xCut))*Xquat
}
return(re)
}
Xtf <- length(dfm$logHRmed[dfm$logHRmed<=2])>1 | length(dff$logHRmed[dff$logHRmed<=2])>1
if (maxval>10 & Xtf){
dfm$logHRmed <- sapply(dfm$logHRmed,Xtransf)
dff$logHRmed <- sapply(dff$logHRmed,Xtransf)
Xlabels <- c(-maxval,-2,0,2,maxval)
Xcolor <- c("black","red","black","red","black")
}else{
Xlabels <- Xbreaks
Xcolor <- rep("black",length(Xbreaks))
}
## Y-axis
# min(p)<10-16
df$logpvalHRmed <- ifelse(df$pvalHRmed==0,16,-log10(df$pvalHRmed))
maxbh <- max(df$logpvalHRmed)
y = floor(-log(maxbh, 10) + 1)
Ymaxval <- ifelse(round(maxbh,y)<2,2,round(maxbh,y))
Ylimits <- c(0,max(maxbh,Ymaxval))
if (maxbh==16){
Ybreaks <- c(0,Ymaxval*0.25,Ymaxval*0.5,Ymaxval*0.75,16,Ymaxval)
Ylabels <- c(Ybreaks[1:5],"Inf")
Ycolor <- c("black","black","black","black","black","red")
cutoff <- -log(0.05,10)
}else{
Ybreaks <- c(0,Ymaxval*0.25,Ymaxval*0.5,Ymaxval*0.75,Ymaxval)
Ylabels <- Ybreaks
Ycolor <- rep("black",length(Ybreaks))
cutoff <- -log(0.05,10)
}
## point size - psi cutoff
maxsize <- max(df$cutmed)
minsize <- min(df$cutmed)
range <- maxsize-minsize
if (range==0){
sizelimits = c(minsize,maxsize)
sizebreaks = c(minsize,maxsize)
}else if (range<0.05){
sizelimits = c(minsize,maxsize)
sizebreaks = c(minsize,maxsize)
}else{
x = ceiling(-log(maxsize, 10) + 1)
n = ceiling(-log(minsize, 10) + 1)
maxval <- round(maxsize,x)
minval <- round(minsize,n)
sizelimits = c(min(minval,minsize),max(maxval,maxsize))
if(minval==0){
sizebreaks <- round(seq(minval,maxval,(maxval-minval)/4)[-1],2)
}else{
sizebreaks <- round(seq(minval,maxval,(maxval-minval)/4),2)
}
}
## color
dfcol <- data.frame(table(df$cancerType))
if(nrow(dfcol)>0){
colors <- hue_pal()(nrow(dfcol))
colors1 <- colors[dfcol$Var1%in%dfm$cancerType]
colors2 <- colors[dfcol$Var1%in%dff$cancerType]
}
nCancer=3
if (nrow(dfm)>=nCancer & nrow(dff)>=nCancer){
width=10
p1 <- coxplot(dfm,"med",colors1)
p2 <- coxplot(dff,"fit",colors2)
pdf(file=outFilePath,width=width,height=4)
suppressWarnings(grid.arrange(p1,p2, ncol=2))
dev.off()
}else if (nrow(dfm)>=nCancer & nrow(dff)<nCancer){
width=5
p1 <- coxplot(dfm,"med",colors1)
pdf(file=outFilePath,width=width,height=4)
suppressWarnings(print(p1))
dev.off()
}else if (nrow(dfm)<nCancer & nrow(dff)>=nCancer){
width=5
p2 <- coxplot(dff,"fit",colors2)
pdf(file=outFilePath,width=width,height=4)
suppressWarnings(print(p2))
dev.off()
}else if (nrow(dfm)<nCancer & nrow(dff)<nCancer){
width=5
pdf(file=outFilePath,width=width,height=4)
temptext1 <- paste("! ",SpliceEvent,"\nNo data presented.\n","Try another one!",sep="")
textplot(temptext1,valign="top", cex=1.3, halign= "center",col="grey")
dev.off()
}
}
PanNon <- function(SpliceEvent){
outFilePath <- paste(outDir,SpliceEvent,"-",panx,".pdf",sep = "")
width=5
pdf(file=outFilePath,width=width,height=4)
temptext1 <- paste("! ",SpliceEvent,"\nNo data presented.\n","Try another one!",sep="")
textplot(temptext1,valign="top", cex=1.3, halign= "center",col="grey")
dev.off()
}
sapply(ase,PanCox)
if (survType=="OS"){
aseNon <- ase[!ase%in%infoAS$SpliceEvent]
sapply(aseNon,PanNon)
}
|
#' Automatically detect variables from an Mplus model object
#'
#' This is a function to automatically detect the variables used in an
#' Mplus model object.
#'
#'
#' @param object An Mplus model object from \code{mplusObject}.#'
#' @return A vector of variables from the R dataset to use.
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @export
#' @importFrom stats na.omit
#' @seealso \code{\link{mplusModeler}}, \code{\link{mplusObject}}
#' @keywords interface
#' @examples
#'
#' example1 <- mplusObject(MODEL = "mpg ON wt;",
#' rdata = mtcars, autov = FALSE)
#' example1$usevariables
#' MplusAutomation:::detectVariables(example1)
#'
#' example2 <- mplusObject(MODEL = "mpg ON wt;",
#' rdata = mtcars, autov = TRUE)
#' example2$usevariables
#' example3 <- update(example2,
#' MODEL = ~ . + "mpg ON qsec; wt WITH qsec;",
#' autov = TRUE)
#' example3$usevariables
#' rm(example1, example2, example3)
detectVariables <- function(object) {
if (!is.null(object$MONTECARLO)) {
stop("detectVariables() does not work with MONTECARLO models")
}
if (!is.null(object$rdata) && !is.null(object$MODEL)) {
if (object$imputed) {
v <- colnames(object$rdata[[1]])
} else {
v <- colnames(object$rdata)
}
tmpVARIABLE <- unlist(c(
tryCatch(unlist(strsplit(object$VARIABLE, split = ";")), error = function(e) ""),
tryCatch(unlist(strsplit(object$DEFINE, split = ";")), error = function(e) ""),
tryCatch(unlist(strsplit(object$MODEL, split = ";")), error = function(e) "")))
tmpVARIABLE <- na.omit(tmpVARIABLE)
tmpVARIABLE <- tmpVARIABLE[nzchar(tmpVARIABLE)]
tmpVARIABLE <- gsub("\\s", "", unique(unlist(lapply(tmpVARIABLE, function(x) {
x <- gsub("\n|\t|\r", "", gsub("^(.*)(=| ARE | are | IS | is )(.*)$", "\\3", x))
xalt <- unique(unlist(strsplit(x, split = "\\s")))
y <- separateHyphens(x)
if (is.list(y)) {
lapply(y, function(z) {
if (all(sapply(z, function(var) any(grepl(var, x = v, ignore.case=TRUE))))) {
i1 <- which(grepl(z[[1]], x = v, ignore.case = TRUE))
if (length(i1) > 1) {
test <- tolower(z[[1]]) == tolower(v)
if (any(test)) {
i1 <- which(test)[1]
}
}
i2 <- which(grepl(z[[2]], x = v, ignore.case = TRUE))
if (length(i2) > 1) {
test <- tolower(z[[2]]) == tolower(v)
if (any(test)) {
i2 <- which(test)[1]
}
}
if (length(i1) && length(i2)) {
c(v[i1:i2], xalt)
}
} else {
c(unlist(z), xalt)
}
})
} else {
xalt
}
}))))
message("R variables selected automatically as any variable name that\noccurs in the MODEL, VARIABLE, or DEFINE section.")
usevariables <- unique(v[sapply(v, function(var) any(grepl(var, x = tmpVARIABLE, ignore.case = TRUE)))])
if (!isTRUE(grepl("usevariables", object$VARIABLE, ignore.case=TRUE))) {
message(sprintf("If any issues, suggest explicitly specifying USEVARIABLES.\nA starting point may be:\nUSEVARIABLES = %s;",
paste(usevariables, collapse = " ")))
}
}
return(usevariables)
}
#' Create an Mplus model object
#'
#' This is a function to create an Mplus model object in \code{R}.
#' The object holds all the sections of an Mplus input file, plus some
#' extra \code{R} ones. Once created, the model can be run using other
#' functions such as \code{mplusModeler} or updated using methods defined
#' for the \code{update} function.
#'
#' Mplus model objects allow a base model to be defined,
#' and then flexibly update the data, change the precise model, etc. If a section
#' does not vary between models, you can leave it the same. For example, suppose
#' you are fitting a number of models, but in all cases, wish to use maximum likelihood
#' estimator, \dQuote{ANALYSIS: ESTIMATOR = ML;} and would like standardized output,
#' \dQuote{OUTPUT: STDYX;}. Rather than retype those in every model, they can be defined
#' in one Mplus model object, and then that can simply be updated with different models,
#' leaving the analysis and output sections untouched. This also means that if a reviewer
#' comes back and asks for all analyses to be re-run say using the robust maximum likelihood
#' estimator, all you have to do is change it in the model object once, and re run all your code.
#'
#' @param TITLE A character string of the title for Mplus.
#' @param DATA A charater string of the data section for Mplus (note, do not define
#' the filename as this is generated automatically)
#' @param VARIABLE A character string of the variable section for Mplus (note, do not
#' define the variable names from the dataset as this is generated automatically)
#' @param DEFINE A character string of the define section for Mplus (optional)
#' @param MONTECARLO A character string of the montecarlo section for Mplus (optional).
#' If used, \code{autov} is defaults to \code{FALSE} instead of the usual default,
#' \code{TRUE}, but may still be overwritten, if desired.
#' @param MODELPOPULATION A character string of the MODEL POPULATION section for Mplus (optional).
#' @param MODELMISSING A character string of the MODEL MISSING section for Mplus (optional).
#' @param ANALYSIS A character string of the analysis section for Mplus (optional)
#' @param MODEL A character string of the model section for Mplus (optional, although
#' typically you want to define a model)
#' @param MODELINDIRECT A character string of the MODEL INDIRECT section for Mplus (optional).
#' @param MODELCONSTRAINT A character string of the MODEL CONSTRAINT section for Mplus (optional).
#' @param MODELTEST A character string of the MODEL TEST section for Mplus (optional).
#' @param MODELPRIORS A character string of the MODEL PRIORS section for Mplus (optional).
#' @param OUTPUT A character string of the output section for Mplus (optional)
#' @param SAVEDATA A character string of the savedata section for Mplus (optional)
#' @param PLOT A character string of the plot section for Mplus (optional)
#' @param usevariables A character vector of the variables from the
#' \code{R} dataset to use in the model.
#' @param rdata An \code{R} dataset to be used for the model.
#' @param autov A logical (defaults to \code{TRUE}) argument indicating
#' whether R should attempt to guess the correct variables to use from
#' the R dataset, if \code{usevariables} is left \code{NULL}.
#' @param imputed A logical whether the data are multiply imputed (a list).
#' Defaults to \code{FALSE}.
#'
#' @return A list of class \code{mplusObject} with elements
#' \item{TITLE}{The title in Mplus (if defined)}
#' \item{DATA}{The data section in Mplus (if defined)}
#' \item{VARIABLE}{The variable section in Mplus (if defined)}
#' \item{DEFINE}{The define section in Mplus (if defined)}
#' \item{MONTECARLO}{The montecarlo section in Mplus (if defined)}
#' \item{MODELPOPULATION}{The modelpopulation section in Mplus (if defined)}
#' \item{MODELMISSING}{The modelmissing section in Mplus (if defined)}
#' \item{ANALYSIS}{The analysis section in Mplus (if defined)}
#' \item{MODEL}{The model section in Mplus (if defined)}
#' \item{MODELINDIRECT}{The modelindirect section in Mplus (if defined)}
#' \item{MODELCONSTRAINT}{The modelconstraint section in Mplus (if defined)}
#' \item{MODELTEST}{The modeltest section in Mplus (if defined)}
#' \item{MODELPRIORS}{The modelpriors section in Mplus (if defined)}
#' \item{OUTPUT}{The output section in Mplus (if defined)}
#' \item{SAVEDATA}{The savedata section in Mplus (if defined)}
#' \item{PLOT}{The plot section in Mplus (if defined)}
#' \item{results}{NULL by default, but can be later updated to include the results from the model run.}
#' \item{usevariables}{A character vector of the variables from the \code{R} data set to be used.}
#' \item{rdata}{The \code{R} data set to use for the model.}
#' \item{imputed}{A logical whether the data are multiply imputed.}
#' \item{autov}{A logical whether the data should have the usevariables detected automatically or not}
#'
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @export
#' @seealso \code{\link{mplusModeler}}
#' @keywords interface
#' @examples
#'
#' example1 <- mplusObject(MODEL = "mpg ON wt;",
#' usevariables = c("mpg", "hp"), rdata = mtcars)
#' str(example1)
#' rm(example1)
#'
#' # R figures out the variables automagically, with a message
#' example2 <- mplusObject(MODEL = "mpg ON wt;",
#' rdata = mtcars, autov = TRUE)
#' str(example2)
#' rm(example2)
#'
#' # R can also try to figure out a list of variables when
#' # variable names are hyphenated first-last variable, all variables
#' # between the first and last one will be included
#' example3 <- mplusObject(MODEL = "mpg ON wt-vs;",
#' rdata = mtcars, autov = TRUE)
#' str(example3)
#' rm(example3)
#'
#' # R warns if the first 8 characters of a (used) variable name are not unique
#' # as they will be indistinguishable in the Mplus output
#' example4 <- mplusObject(MODEL = "basename_01 ON basename_02;",
#' rdata = data.frame(basename_01 = 1:5, basename_02 = 5:1),
#' autov = TRUE)
#' rm(example4)
mplusObject <- function(TITLE = NULL, DATA = NULL, VARIABLE = NULL, DEFINE = NULL,
MONTECARLO = NULL, MODELPOPULATION = NULL, MODELMISSING = NULL, ANALYSIS = NULL,
MODEL = NULL, MODELINDIRECT = NULL, MODELCONSTRAINT = NULL, MODELTEST = NULL, MODELPRIORS = NULL,
OUTPUT = NULL, SAVEDATA = NULL, PLOT = NULL,
usevariables = NULL, rdata = NULL, autov = TRUE, imputed = FALSE) {
charOrNull <- function(x) {is.character(x) || is.null(x)}
stopifnot(charOrNull(TITLE))
stopifnot(charOrNull(DATA))
stopifnot(charOrNull(VARIABLE))
stopifnot(charOrNull(DEFINE))
stopifnot(charOrNull(MONTECARLO))
stopifnot(charOrNull(MODELPOPULATION))
stopifnot(charOrNull(MODELMISSING))
stopifnot(charOrNull(ANALYSIS))
stopifnot(charOrNull(MODEL))
stopifnot(charOrNull(MODELINDIRECT))
stopifnot(charOrNull(MODELCONSTRAINT))
stopifnot(charOrNull(MODELTEST))
stopifnot(charOrNull(MODELPRIORS))
stopifnot(charOrNull(OUTPUT))
stopifnot(charOrNull(SAVEDATA))
stopifnot(charOrNull(PLOT))
object <- list(
TITLE = TITLE,
DATA = DATA,
VARIABLE = VARIABLE,
DEFINE = DEFINE,
MONTECARLO = MONTECARLO,
MODELPOPULATION = MODELPOPULATION,
MODELMISSING = MODELMISSING,
ANALYSIS = ANALYSIS,
MODEL = MODEL,
MODELINDIRECT = MODELINDIRECT,
MODELCONSTRAINT = MODELCONSTRAINT,
MODELTEST = MODELTEST,
MODELPRIORS = MODELPRIORS,
OUTPUT = OUTPUT,
SAVEDATA = SAVEDATA,
PLOT = PLOT,
results = NULL,
usevariables = usevariables,
rdata = rdata,
imputed = imputed,
autov = autov)
class(object) <- c("mplusObject", "list")
if (!is.null(MONTECARLO) && missing(autov)) {
object$autov <- autov <- FALSE
}
if (autov && is.null(usevariables) && !is.null(rdata) && !is.null(MODEL)) {
object$usevariables <- detectVariables(object)
}
i <- duplicated(substr(object$usevariables, start = 1, stop = 8))
if (any(i)) {
message(sprintf("The following variables are not unique in the first 8 characters:\n %s",
paste(object$usevariables[i], collapse = ", ")))
}
return(object)
}
#' Update an Mplus model object
#'
#' This is a method for updating an Mplus model object.
#' It takes an Mplus model object as the first argument, and
#' then optionally any sections to update. There are two ways
#' to update a section using a formula interface. \code{~ "new stuff"} will
#' replace a given section with the new text. Alternately, you can add
#' additional text using \code{~ + "additional stuff"}. Combined these let you
#' replace or add to a section.
#'
#' @param object An object of class mplusObject
#' @param \dots Additional arguments to pass on
#' @return An (updated) Mplus model object
#' @export
#' @method update mplusObject
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @keywords interface
#' @examples
#' example1 <- mplusObject(MODEL = "mpg ON wt;",
#' usevariables = c("mpg", "hp"), rdata = mtcars)
#' x <- ~ "ESTIMATOR = ML;"
#' str(update(example1, rdata = iris))
#' str(update(example1, ANALYSIS = x))
#' str(update(example1, MODEL = ~ "wt ON hp;"))
#' str(update(example1, MODEL = ~ . + "wt ON hp;"))
#' str(update(example1, ANALYSIS = x, MODEL = ~ . + "wt ON hp;"))
#'
#' # test to make sure . in Mplus code does not cause problems
#' str(update(example1, ANALYSIS = x, MODEL = ~ . + "wt ON hp*.5;"))
#' rm(example1, x)
update.mplusObject <- function(object, ...) {
dots <- list(...)
if (!length(dots)) return(object)
sectionNames <- names(dots)
mplusList <- c("TITLE", "DATA", "VARIABLE", "DEFINE",
"MONTECARLO", "MODELPOPULATION", "MODELMISSING", "ANALYSIS",
"MODEL", "MODELINDIRECT", "MODELCONSTRAINT", "MODELTEST", "MODELPRIORS",
"OUTPUT", "SAVEDATA", "PLOT")
rList <- c("results", "usevariables", "rdata")
mplusIndex <- sectionNames[which(sectionNames %in% mplusList)]
rIndex <- sectionNames[which(sectionNames %in% rList)]
if (length(mplusIndex)) {
mplusSections <- lapply(mplusIndex, function(n) {
tmp <- dots[[n]]
tmp <- as.character(tmp[[2]])
if (any(grepl("^\\.$", tmp))) {
old <- paste0(object[[n]], "\n")
} else {
old <- ""
}
new <- tmp[length(tmp)]
combined <- paste(old, new, collapse = "\n")
})
object[mplusIndex] <- mplusSections
}
if (length(rIndex)) {
object[rIndex] <- dots[rIndex]
}
if (object$autov) {
object$usevariables <- detectVariables(object)
}
return(object)
}
#' Create the Mplus input text for an mplusObject
#'
#' This function takes an object of class \code{mplusObject} and creates
#' the Mplus input text corresponding to it, including data link and
#' variable names.
#'
#' @param object An object of class mplusObject
#' @param filename The name of the data file as a character vector
#' @param check A logical indicating whether or not to run \code{parseMplus}
#' on the created input file. Checks for errors like lines that are too long,
#' or for missing semi-colons and gives notes.
#' @param add A logical passed on to \code{parseMplus} whether to add semi
#' colons to line ends. Defaults to \code{FALSE}.
#' @param imputed A logical whether the data are multiply imputed.
#' Defaults to \code{FALSE}.
#' @return A character string containing all the text for the Mplus
#' input file.
#' @keywords interface
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @seealso \code{\link{prepareMplusData}}, \code{\link{mplusModeler}}
#' @examples
#' # example mplusObject
#' example1 <- mplusObject(MODEL = "mpg ON wt;",
#' usevariables = c("mpg", "hp"), rdata = mtcars)
#'
#' # create the Mplus input text
#' cat(createSyntax(example1, "example1.dat"), file=stdout(), fill=TRUE)
#'
#' # update the object, then create input text
#' cat(createSyntax(update(example1,
#' TITLE = ~ "This is my title;",
#' MODEL = ~ . + "\nmpg ON hp;",
#' usevariables = c("mpg", "hp", "wt")), "example1.dat"),
#' file=stdout(),
#' fill=TRUE)
#' rm(example1)
#' closeAllConnections()
createSyntax <- function(object, filename, check=TRUE, add=FALSE, imputed=FALSE) {
stopifnot(inherits(object, "mplusObject"))
mplusList <- data.frame(
Names = c("TITLE", "DATA", "VARIABLE", "DEFINE",
"MONTECARLO", "MODELPOPULATION", "MODELMISSING", "ANALYSIS",
"MODEL", "MODELINDIRECT", "MODELCONSTRAINT", "MODELTEST", "MODELPRIORS",
"OUTPUT", "SAVEDATA", "PLOT"),
Labels = c("TITLE", "DATA", "VARIABLE", "DEFINE",
"MONTECARLO", "MODEL POPULATION", "MODEL MISSING", "ANALYSIS",
"MODEL", "MODEL INDIRECT", "MODEL CONSTRAINT", "MODEL TEST", "MODEL PRIORS",
"OUTPUT", "SAVEDATA", "PLOT"),
stringsAsFactors = FALSE)
simulation <- !is.null(object$MONTECARLO)
if (!simulation && !missing(filename)) {
dFile <- paste0("FILE = \"", filename, "\";\n")
if (imputed) {
dFile <- paste0(dFile, "TYPE = IMPUTATION;\n")
}
object$DATA <- paste(dFile, object$DATA, collapse = "\n")
}
if (!is.null(object$rdata) && !is.null(object$usevariables)) {
if (object$imputed) {
vNames <- createVarSyntax(object$rdata[[1]][, object$usevariables])
} else {
vNames <- createVarSyntax(object$rdata[, object$usevariables])
}
object$VARIABLE <- paste(vNames, "MISSING=.;\n", object$VARIABLE, collapse = "\n")
}
index <- sapply(object[mplusList$Names], function(x) {
!(is.null(x) || !nzchar(gsub("\\s*", "", x, perl=TRUE)))
})
sections <- mplusList$Names[index]
body <- unlist(lapply(sections, function(n) {
c(paste0(mplusList$Labels[match(n, mplusList$Names)], ":"), object[[n]])
}))
body <- paste(body, collapse = "\n")
if (check) {
body <- parseMplus(body, add = add)
}
return(body)
}
#' Create, run, and read Mplus models.
#'
#' This is a convenience wrapper to automate many of the
#' usual steps required to run an Mplus model. It relies in part
#' on functions from the MplusAutomation package.
#'
#' Combined with functions from the MplusAutomation package,
#' this function is designed to make it easy to fit Mplus models
#' from R and to ease many of the usual frustrations with Mplus.
#' For example, Mplus has very specific formats it accepts data in,
#' but also very little data management facilities. Using \R data
#' management is easy. This function is designed to make using data
#' from \R in Mplus models easy.
#' It is also common to want to fit many different models that are
#' slight variants. This can be tedius in Mplus, but using \R you can
#' create one basic set of input, store it in a vector, and then just
#' modify that (e.g., using regular expressions) and pass it to Mplus.
#' You can even use loops or the \code{*apply} constructs to fit the same
#' sort of model with little variants.
#'
#' The \code{writeData} argument is new and can be used to reduce overhead
#' from repeatedly writing the same data from R to the disk. When using the
#' \sQuote{always} option, \code{mplusModeler} behaves as before, always writing
#' data from R to the disk. This remains the default for the \code{prepareMplusData}
#' function to avoid confusion or breaking old code. However, for \code{mplusModeler},
#' the default has been set to \sQuote{ifmissing}. In this case, R generates an
#' md5 hash of the data prior to writing it out to the disk. The md5 hash is based on:
#' (1) the dimensions of the dataset, (2) the variable names,
#' (3) the class of every variable, and (4) the raw data from the first and last rows.
#' This combination ensures that under most all circumstances, if the data changes,
#' the hash will change. The hash is appended to the specified data file name
#' (which is controlled by the logical \code{hashfilename} argument). Next R
#' checks in the directory where the data would normally be written. If a data file
#' exists in that directory that matches the hash generated from the data, R will
#' use that existing data file instead of writing out the data again.
#' A final option is \sQuote{never}. If this option is used, R will not write
#' the data out even if no file matching the hash is found.
#'
#' @param object An object of class mplusObject
#' @param dataout the name of the file to output the data to for Mplus.
#' If missing, defaults to \code{modelout} changing .inp to .dat.
#' @param modelout the name of the output file for the model.
#' This is the file all the syntax is written to, which becomes the
#' Mplus input file. It should end in .inp. If missing, defaults to
#' \code{dataout} changing the extension to .inp.
#' @param run an integer indicating how many models should be run. Defaults to zero.
#' If zero, the data and model input files are all created, but the model is not run.
#' This can be useful for seeing how the function works and what setup is done. If one, a basic
#' model is run. If greater than one, the model is bootstrapped with \code{run} replications as
#' well as the basic model.
#' @param check logical whether the body of the Mplus syntax should be checked for missing
#' semicolons using the \code{\link{parseMplus}} function. Defaults to \code{FALSE}.
#' @param varwarnings A logical whether warnings about variable length should be left, the
#' default, or removed from the output file.
#' @param Mplus_command optional. N.B.: No need to pass this parameter for most users (has intelligent
#' defaults). Allows the user to specify the name/path of the Mplus executable to be used for
#' running models. This covers situations where Mplus is not in the system's path,
#' or where one wants to test different versions of the Mplus program.
#' @param writeData A character vector, one of \sQuote{ifmissing},
#' \sQuote{always}, \sQuote{never} indicating whether the data files
#' (*.dat) should be written to disk. This is passed on to \code{prepareMplusData}.
#' Note that previously, \code{mplusModeler} always (re)wrote the data to disk.
#' However, now the default is to write the data to disk only if it is missing
#' (i.e., \sQuote{ifmissing}). See details for further information.
#' @param hashfilename A logical whether or not to add a hash of the raw data to the
#' data file name. Defaults to \code{TRUE} in \code{mplusModeler}. Note that this
#' behavior is a change from previous versions and differs from \code{prepareMplusData}
#' which maintains the old behavior by default of \code{FALSE}.
#' @param \ldots additional arguments passed to the
#' \code{\link[MplusAutomation]{prepareMplusData}} function.
#' @return An Mplus model object, with results.
#' If \code{run = 1}, returns an invisible list of results from the run of
#' the Mplus model (see \code{\link[MplusAutomation]{readModels}} from the
#' MplusAutomation package). If \code{run = 0}, the function returns a list
#' with two elements, \sQuote{model} and \sQuote{boot} that are both \code{NULL}.
#' if \code{run >= 1},returns a list with two elements, \sQuote{model} and \sQuote{boot}
#' containing the regular Mplus model output and the boot object, respectively.
#' In all cases, the Mplus data file and input files are created.
#' @seealso \code{\link{runModels}} and \code{\link{readModels}}
#' @import boot
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @examples
#' \dontrun{
#' # minimal example of a model using builtin data, allowing R
#' # to automatically guess the correct variables to use
#' test <- mplusObject(MODEL = "mpg ON wt hp;
#' wt WITH hp;", rdata = mtcars)
#'
#' # estimate the model in Mplus and read results back into R
#' res <- mplusModeler(test, modelout = "model1.inp", run = 1L)
#'
#' # when forcing writeData = "always" data gets overwritten (with a warning)
#' resb <- mplusModeler(test, modelout = "model1.inp", run = 1L,
#' writeData = "always")
#'
#' # using writeData = "ifmissing", the default, no data re-written
#' resc <- mplusModeler(test, modelout = "model1.inp", run = 1L)
#'
#' # using writeData = "ifmissing", the default, data ARE written
#' # if data changes
#' test <- mplusObject(MODEL = "mpg ON wt hp;
#' wt WITH hp;", rdata = mtcars[-10, ])
#' resd <- mplusModeler(test, modelout = "model1.inp", run = 1L)
#'
#' # show summary
#' summary(resd)
#'
#' # show coefficients
#' coef(resd)
#'
#' # what if you wanted confidence intervals
#' # and standardized values?
#' # first update to tell Mplus you want them, re-run and print
#' test <- update(test, OUTPUT = ~ "CINTERVAL; STDYX;")
#' resd <- mplusModeler(test, modelout = "model1.inp", run = 1L)
#'
#' coef(resd)
#' confint(resd)
#'
#' # now standardized
#' coef(resd, type = "stdyx")
#' confint(resd, type = "stdyx")
#'
#' # put together in one data frame if desired
#' merge(
#' coef(resd, type = "stdyx"),
#' confint(resd, type = "stdyx"),
#' by = "Label")
#'
#' # remove files
#' unlink(resc$results$input$data$file)
#' unlink(resd$results$input$data$file)
#' unlink("model1.inp")
#' unlink("model1.out")
#'
#' # simple example of a model using builtin data
#' # demonstrates use with a few more sections
#' test2 <- mplusObject(
#' TITLE = "test the MplusAutomation Package and mplusModeler wrapper;",
#' MODEL = "
#' mpg ON wt hp;
#' wt WITH hp;",
#' usevariables = c("mpg", "wt", "hp"),
#' rdata = mtcars)
#'
#' res2 <- mplusModeler(test2, modelout = "model2.inp", run = 1L)
#'
#' # remove files
#' unlink(res2$results$input$data$file)
#' unlink("model2.inp")
#' unlink("model2.out")
#'
#' # similar example using a robust estimator for standard errors
#' # and showing how an existing model can be easily updated and reused
#' test3 <- update(test2, ANALYSIS = ~ "ESTIMATOR = MLR;")
#'
#' res3 <- mplusModeler(test3, modelout = "model3.inp", run = 1L)
#' unlink(res3$results$input$data$file)
#' unlink("model3.inp")
#' unlink("model3.out")
#'
#' # now use the built in bootstrapping methods
#' # note that these work, even when Mplus will not bootstrap
#' # also note how categorical variables and weights are declared
#' # in particular, the usevariables for Mplus must be specified
#' # because mroe variables are included in the data than are in the
#' # model. Note the R usevariables includes all variables for both
#' # model and weights. The same is true for clustering.
#' test4 <- mplusObject(
#' TITLE = "test bootstrapping;",
#' VARIABLE = "
#' CATEGORICAL = cyl;
#' WEIGHT = wt;
#' USEVARIABLES = cyl mpg;",
#' ANALYSIS = "ESTIMATOR = MLR;",
#' MODEL = "
#' cyl ON mpg;",
#' usevariables = c("mpg", "wt", "cyl"),
#' rdata = mtcars)
#'
#' res4 <- mplusModeler(test4, "mtcars.dat", modelout = "model4.inp", run = 10L,
#' hashfilename = FALSE)
#' # see the results
#' res4$results$boot
#'
#' # remove files
#' unlink("mtcars.dat")
#' unlink("model4.inp")
#' unlink("model4.out")
#'
#' # Monte Carlo Simulation Example
#' montecarlo <- mplusObject(
#' TITLE = "Monte Carlo Example;",
#' MONTECARLO = "
#' NAMES ARE i1-i5;
#' NOBSERVATIONS = 100;
#' NREPS = 100;
#' SEED = 1234;",
#' MODELPOPULATION = "
#' f BY i1-i5*1;
#' f@1;
#' i1-i5*1;",
#' ANALYSIS = "
#' ESTIMATOR = BAYES;
#' PROC = 2;
#' fbiter = 100;",
#' MODEL = "
#' f BY i1-i5*.8 (l1-l5);
#' f@1;
#' i1-i5*1;",
#' MODELPRIORS = "
#' l1-l5 ~ N(.5 .1);",
#' OUTPUT = "TECH9;")
#'
#' fitMonteCarlo <- mplusModeler(montecarlo,
#' modelout = "montecarlo.inp",
#' run = 1L,
#' writeData = "always",
#' hashfilename = FALSE)
#'
#' unlink("montecarlo.inp")
#' unlink("montecarlo.out")
#'
#'
#' # Example including ID variable and extracting factor scores
#' dat <- mtcars
#' dat$UID <- 1:nrow(mtcars)
#'
#' testIDs <- mplusObject(
#' TITLE = "test the mplusModeler wrapper with IDs;",
#' VARIABLE = "IDVARIABLE = UID;",
#' MODEL = "
#' F BY mpg wt hp;",
#' SAVEDATA = "
#' FILE IS testid_fscores.dat;
#' SAVE IS fscores;
#' FORMAT IS free;",
#' usevariables = c("UID", "mpg", "wt", "hp"),
#' rdata = dat)
#'
#' resIDs <- mplusModeler(testIDs, modelout = "testid.inp", run = 1L)
#'
#' # view the saved data from Mplus, including factor scores
#' # the indicator variables, and the ID variable we specified
#' head(resIDs$results$savedata)
#'
#' # merge the factor scores with the rest of the original data
#' # merge together by the ID column
#' dat <- merge(dat, resIDs$results$savedata[, c("F", "UID")],
#' by = "UID")
#'
#' # correlate merged factor scores against some other new variable
#' with(dat, cor(F, qsec))
#'
#' # remove files
#' unlink(resIDs$results$input$data$file)
#' unlink("testid.inp")
#' unlink("testid.out")
#' unlink("testid_fscores.dat")
#' unlink("Mplus Run Models.log")
#' }
mplusModeler <- function(object, dataout, modelout, run = 0L,
check = FALSE, varwarnings = TRUE, Mplus_command="Mplus",
writeData = c("ifmissing", "always", "never"),
hashfilename = TRUE, ...) {
stopifnot((run %% 1) == 0 && length(run) == 1)
oldSHELL <- Sys.getenv("SHELL")
Sys.setenv(SHELL = Sys.getenv("COMSPEC"))
on.exit(Sys.setenv(SHELL = oldSHELL))
writeData <- match.arg(writeData)
simulation <- !is.null(object$MONTECARLO)
if (missing(modelout) & missing(dataout)) {
stop("You must specify either modelout or dataout")
} else if (missing(dataout) && !simulation) {
dataout <- gsub("(^.*)(\\.inp$)", "\\1.dat", modelout)
} else if (missing(modelout)) {
modelout <- gsub("(.*)(\\..+$)", "\\1.inp", dataout)
}
if (simulation) {
if (run > 1) {
run <- 1L
message("run cannot be greater than 1 when using montecarlo simulation, setting run = 1")
}
dataout <- dataout2 <- NULL
} else if (!simulation) {
if (object$imputed) {
if (identical(writeData, "ifmissing")) {
writeData <- "always"
message("When imputed = TRUE, writeData cannot be 'ifmissing', setting to 'always'")
}
if (hashfilename) {
hashfilename <- FALSE
message("When imputed = TRUE, hashfilename cannot be TRUE, setting to FALSE")
}
}
if (run > 1) {
if (identical(writeData, "ifmissing")) {
writeData <- "always"
message("When run > 1, writeData cannot be 'ifmissing', setting to 'always'")
}
if (hashfilename) {
hashfilename <- FALSE
message("When run > 1, hashfilename cannot be TRUE, setting to FALSE")
}
}
if (!hashfilename && identical(writeData, "ifmissing")) {
writeData <- "always"
message("When hashfilename = FALSE, writeData cannot be 'ifmissing', setting to 'always'")
}
if (hashfilename) {
md5 <- .cleanHashData(
df = object$rdata,
keepCols = object$usevariables,
imputed = object$imputed)$md5
tmp <- .hashifyFile(dataout, md5,
useexisting = identical(writeData, "ifmissing"))
dataout2 <- tmp$filename
} else {
dataout2 <- dataout
}
}
.run <- function(data, i, boot = TRUE, imputed = FALSE, ...) {
if (!simulation) {
if (imputed) {
if(boot) stop("Cannot use imputed data and bootstrap")
prepareMplusData(df = data,
keepCols = object$usevariables,
filename = dataout,
inpfile = tempfile(),
imputed = imputed,
writeData = writeData,
hashfilename = hashfilename,
...)
} else {
prepareMplusData(df = data[i, , drop = FALSE],
keepCols = object$usevariables,
filename = dataout, inpfile = tempfile(),
writeData = ifelse(boot, "always", writeData),
hashfilename = ifelse(boot, FALSE, hashfilename),
...)
}
}
runModels(target = modelout, Mplus_command = Mplus_command, logFile=NULL)
outfile <- gsub("(^.*)(\\.inp$)", "\\1.out", modelout)
results <- readModels(target = outfile)
if (!boot) {
if (!varwarnings) rmVarWarnings(outfile)
return(invisible(results))
} else {
with(results, unlist(lapply(
parameters[!grepl("^ci\\..+", names(parameters))],
function(x) {
x <- x[, c("est", "se")]
x[] <- lapply(x, as.numeric)
as.vector(t(na.omit(x)))
}
)))
}
}
body <- createSyntax(object, dataout2, check=check, imputed = object$imputed)
writeLines(body, con = modelout, sep = "\n")
message("Wrote model to: ", modelout)
if (!simulation) {
if (hashfilename && identical(writeData, "ifmissing")) {
if (tmp$fileexists) {
NULL
} else {
message("Wrote data to: ", dataout2)
}
} else {
message("Wrote data to: ", dataout2)
}
}
results <- bootres <- NULL
finalres <- list(model = results, boot = bootres)
if (!simulation) {
if (run > 1 & !object$imputed) {
bootres <- boot(object$rdata, .run, R = run, sim = "ordinary")
finalres$boot <- bootres
class(finalres) <- c("boot.mplus.model", "list")
}
}
if (run) {
results <- .run(data = object$rdata, boot = FALSE, imputed = object$imputed, ...)
finalres$model <- results
} else if (!simulation) {
prepareMplusData(df = object$rdata,
keepCols = object$usevariables,
filename = dataout,
inpfile = tempfile(),
imputed = object$imputed,
writeData = writeData,
hashfilename = hashfilename,
...)
return(object)
}
if (run == 1) {
object$results <- finalres$model
} else {
object$results <- finalres
}
return(object)
}
#' Create Mplus code for various residual covariance structures.
#'
#' This function makes it easy to write the Mplus syntax for various
#' residual covariance structure.
#'
#'
#' The \strong{homogenous} residual covariance structure estimates one parameter:
#' the residual variance, \eqn{\sigma^{2}_{e}}{s^2}. The residual variance
#' is assumed to be identical for all variables and all covariances are
#' assumed to be zero. The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab 0 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \ldots \tab \cr
#' t3 \tab 0 \tab 0 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab 0 \tab 0 \tab 0 \tab \ldots \tab \eqn{\sigma^{2}_{e}}{s^2} \cr
#' }
#'
#' The \strong{heterogenous} residual covariance structure estimates
#' \bold{n} parameters, where \bold{n} is the number of variables.
#' A unique residual variance is estimated for every variable. All
#' covariances are assumed to be zero. The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e1}}{s1^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab 0 \tab \eqn{\sigma^{2}_{e2}}{s2^2} \tab \tab \ldots \tab \cr
#' t3 \tab 0 \tab 0 \tab \eqn{\sigma^{2}_{e3}}{s3^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab 0 \tab 0 \tab 0 \tab \ldots \tab \eqn{\sigma^{2}_{en}}{sn^2} \cr
#' }
#'
#' The \strong{compound symmetric} residual covariance structure estimates
#' two parameters: one for the residual variance , \eqn{\sigma^{2}_{e}}{s^2},
#' and one for the covariance. The residual variance
#' is assumed to be identical for all variables and all covariances are
#' assumed to be identical. The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \ldots \tab \cr
#' t3 \tab \eqn{\rho}{rho} \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab \eqn{\rho}{rho} \tab \eqn{\rho}{rho} \tab \eqn{\rho}{rho} \tab \ldots \tab \eqn{\sigma^{2}_{e}}{s^2} \cr
#' }
#'
#' The \strong{toeplitz} residual covariance structure estimates
#' \bold{n} parameters, one for every band of the matrix.
#' The residual variance , \eqn{\sigma^{2}_{e}}{s^2}, is
#' assumed to be identical for all variables. The covariances one step removed
#' are all assumed identical. Likewise for all further bands.
#' The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \ldots \tab \cr
#' t3 \tab \eqn{\rho_{2}}{rho2} \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab \eqn{\rho_{n}}{rhon} \tab \eqn{\rho_{n - 1}}{rho(n - 1)} \tab \eqn{\rho_{n - 2}}{rho(n - 2)} \tab \ldots \tab \eqn{\sigma^{2}_{e}}{s^2} \cr
#' }
#'
#' The \strong{autoregressive} residual covariance structure has two parameters:
#' the residual variance, \eqn{\sigma^{2}_{e}}{s^2} and
#' the correlation between adjacent time points, \eqn{\rho}{rho}. The variances
#' are constrained to be equal for all time points. A single correlation
#' parameter is estimated. The \eqn{\rho}{rho} is the correlation between adjacent
#' time points such as 1 and 2 or 2 and 3. More distant relationships are assumed
#' to have smaller correlations, decreasing exponentially. Thus between 1 and 3,
#' the estimate is \eqn{\rho^2}{rho^2}. The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \ldots \tab \cr
#' t3 \tab \eqn{\rho^2}{rho^2} \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab \eqn{\rho^{n-1}}{rho^(n-1)} \tab \eqn{\rho^{n-2}}{rho^(n-2)} \tab \eqn{\rho^{n-3}}{rho^(n-3)} \tab \ldots \tab \eqn{\sigma^{2}_{e}}{s^2} \cr
#' }
#' Because structural equation models generally model covariance structures,
#' the autoregressive residual structure must be parameterized in terms of
#' covariances. This is done in two parts. First, the function returns
#' syntax to estimate all the pairwise covariances, labelling the parameters
#' \eqn{\rho}{rho}, \eqn{\rho^2}{rho^2}, etc. so that they are constrained to be
#' equal. Next, it returns the syntax for the necessary model constraints to
#' constrain the different covariances, to decrease exponentially in their
#' correlations. This is done via:
#' \deqn{\rho^2 = (\frac{\rho}{\sigma^2_{e}})^{2}\sigma^2_{e}}{rho^2 = (rho/s^2)^2 * s^2}
#' and likewise for all later time points.
#'
#' The \strong{unstructured} residual covariance structure estimates
#' \eqn{\frac{n(n + 1)}{2}}{(n(n + 1))/2} parameters. It is unstructured
#' in that every variance and covariance is freely estimated with no
#' constraints. However, in most cases, this results in an overparameterized
#' model and is unestimable. The structure is represented in this table.
#'
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e1}}{s1^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab \eqn{\rho_{1}}{rho1} \tab \eqn{\sigma^{2}_{e2}}{s2^2} \tab \tab \ldots \tab \cr
#' t3 \tab \eqn{\rho_{2}}{rho2} \tab \eqn{\rho_{3}}{rho3} \tab \eqn{\sigma^{2}_{e3}}{s3^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab \eqn{\rho_{5}}{rho5} \tab \eqn{\rho_{6}}{rho6} \tab \eqn{\rho_{7}}{rho7} \tab \ldots \tab \eqn{\sigma^{2}_{en}}{sn^2} \cr
#' }
#'
#' @param x input character vector of variable names, ordered by time
#' @param type A character string indicating the type of residual covariance
#' structure to be used. Defaults to \sQuote{homogenous}. Current options include
#' \sQuote{homogenous}, \sQuote{heterogenous}, \sQuote{cs} for compound symmetric,
#' \sQuote{toeplitz} for banded toeplitz, \sQuote{ar} for autoregressive, and
#' \sQuote{un} for unstructured.
#' @param r a character vector of the base label to name covariance parameters.
#' Defaults to \sQuote{rho}.
#' @param e a character vector of the error variance of the variable.
#' Used to create constraints on the covariance parameters. Defaults to \sQuote{e}.
#' @param collapse whether to collapse the covariance code using \sQuote{PWITH}. Note that
#' at the time of writing, Mplus does not allow more than 80 characters per row.
#' Defaults to \code{FALSE}.
#' @return A named character vector of class \sQuote{MplusRstructure} with four elements:
#' \item{all}{A character string collapsing all other sections.}
#' \item{Variances}{A character string containing all of the variances.}
#' \item{Covariances}{A character string containing all of the
#' covariances, properly labelled to allow constraints and the
#' autoregressive residual covariance structure.}
#' \item{Constraints}{A character string containing the \sQuote{MODEL CONSTRAINT}
#' section and code needed to parameterize the residual covariance structure
#' as autoregressive.}
#' @keywords interface
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @examples
#' # all five structures collapsing
#' mplusRcov(letters[1:4], "homogenous", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "heterogenous", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "cs", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "toeplitz", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "ar", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "un", "rho", "e", TRUE)
#'
#' # all five structures without collapsing
#' # useful for long names or many variables
#' # where a line may cross 80 characters
#' mplusRcov(letters[1:4], "homogenous", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "heterogenous", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "cs", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "toeplitz", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "ar", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "un", "rho", "e", FALSE)
mplusRcov <- function(x, type = c("homogenous", "heterogenous", "cs", "toeplitz", "ar", "un"),
r = "rho", e = "e", collapse=FALSE) {
type <- match.arg(type)
indCov <- function(x, r, e, collapse) {
n <- length(x)
if (collapse) {
res <- lapply(1:(n - 1), function(i) {
paste0(x[i], " WITH ", paste(paste0(x, "@0")[(i+1):n], collapse = " "), ";")
})
} else {
res <- lapply(1:(n - 1), function(i) {
paste(paste0(x[i], " WITH ", paste0(x, "@0")[(i+1):n], ";"), collapse = "\n")
})
}
res <- do.call("paste", list(unlist(res), collapse = "\n"))
list(Covariances = res, Constraints = "")
}
toeplitzCov <- function(x, r, e, collapse, type = c("toeplitz", "cs", "ar")) {
type <- match.arg(type)
n <- length(x)
k <- n - 1
index <- lapply(1:k, function(j) {
sapply(1:(n - j), function(i) c(i, i + j))
})
rho <- switch(type,
toeplitz = paste0(r, c("", seq_along(index)[-1])),
cs = rep(r, length(index)),
ar = paste0(r, c("", seq_along(index)[-1]))
)
if (collapse) {
res <- lapply(seq_along(index), function(i) {
start <- paste(x[index[[i]][1, ]], collapse = " ")
end <- paste(x[index[[i]][2, ]], collapse = " ")
paste0(paste(c(start, end), collapse = " PWITH "), paste0(" (", rho[i], ");"))
})
} else {
res <- lapply(seq_along(index), function(i) {
sapply(1:ncol(index[[i]]), function(j) {
paste0(x[index[[i]][1, j]], " WITH ", x[index[[i]][2, j]], paste0(" (", rho[i], ");"))
})
})
}
res <- do.call("paste", list(unlist(res), collapse = "\n"))
constraint <- switch(type,
toeplitz = "",
cs = "",
ar = {
cons <- lapply(seq_along(rho)[-1], function(i) {
paste0(" ", rho[i], " = ((", r, "/", e, ")^", i, ") * ", e, ";")
})
cons <- do.call("paste", list(unlist(cons), collapse = "\n"))
paste(c("MODEL CONSTRAINT: \n", cons, "\n"), collapse = "")
}
)
list(Covariances = res, Constraints = constraint)
}
V <- switch(type,
homogenous = paste0(paste(x, collapse = " "), " (", e, ");"),
heterogenous = paste0(paste(x, collapse = " "), ";"),
cs = paste0(paste(x, collapse = " "), " (", e, ");"),
toeplitz = paste0(paste(x, collapse = " "), " (", e, ");"),
ar = paste0(paste(x, collapse = " "), " (", e, ");"),
un = paste0(paste(x, collapse = " "), ";")
)
Rcov <- switch(type,
homogenous = indCov(x = x, r = r, e = e, collapse = collapse),
heterogenous = indCov(x = x, r = r, e = e, collapse = collapse),
cs = toeplitzCov(x = x, r = r, e = e, collapse = collapse, type = type),
toeplitz = toeplitzCov(x = x, r = r, e = e, collapse = collapse, type = type),
ar = toeplitzCov(x = x, r = r, e = e, collapse = collapse, type = type),
un = lapply(indCov(x = x, r = r, e = e, collapse = collapse), function(x) {
gsub("@0", "", x)})
)
Rstruc <- c(Variances = V, Rcov)
allres <- do.call(paste, list(Rstruc, collapse = "\n"))
Rstruc <- c(all = allres, Rstruc)
class(Rstruc) <- "MplusRstructure"
return(Rstruc)
}
#' Extract parameters from a data frame of Mplus estimates
#'
#' This is a simple convenience function designed to facilitate
#' looking at specific parameter types by easily return a subset
#' of a data frame with those types only. It is designed to follow up
#' the results returned from the \code{\link{readModels}} function.
#'
#' @param x A data frame (specifically the type returned by \code{readModels}) containing
#' parameters. Should be specific such as unstandardized and the data frame must have a
#' column called \sQuote{paramHeader}.
#' @param params A character string indicating the types of parameters to be returned.
#' Options currently include \sQuote{regression}, \sQuote{loading}, \sQuote{undirected},
#' \sQuote{expectation}, \sQuote{variability}, and \sQuote{new} for new/additional parameters.
#' Regressions include regression of one variable
#' \code{ON} another. \sQuote{loading} include indicator variables (which are assumed caused by the
#' underlying latent variable) and variables in latent growth models (\code{BY} or \code{|}).
#' Undirected paths currently only include covariances, indicated by the \code{WITH}
#' syntax in Mplus. Expectation paths are the unconditional or conditional expectations of
#' variables. In other words those parameters related to the first moments. For independent
#' variables, these are the means, \eqn{E(X)} and the conditional means or intercepts,
#' \eqn{E(X | f(\theta))}{E(X | f(theta))} where \eqn{f(\theta)}{f(theta)} is the model,
#' some function of the parameters, \eqn{\theta}{theta}. Finally \sQuote{variability}
#' refers to both variances and residual variances, corresponding to the second moments.
#' As with the expectations, variances are unconditional for variables that are not
#' predicted or conditioned on any other variable in the model whereas residual variances
#' are conditional on the model. Note that \R uses fuzzy matching so that each of these
#' can be called via shorthand, \sQuote{r}, \sQuote{l}, \sQuote{u}, \sQuote{e}, and \sQuote{v}.
#' @return A subset data frame with the parameters of interest.
#' @seealso \code{\link{readModels}}
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @keywords utils
#' @export
#' @examples
#' \dontrun{
#' test <- mplusObject(
#' TITLE = "test the MplusAutomation Package and my Wrapper;",
#' MODEL = "
#' mpg ON wt hp;
#' wt WITH hp;",
#' usevariables = c("mpg", "wt", "hp"),
#' rdata = mtcars)
#'
#' res <- mplusModeler(test, "mtcars.dat", modelout = "model1.inp", run = 1L)
#'
#' # store just the unstandardized parameters in 'd'
#' d <- res$results$parameters$unstandardized
#' # extract just regression parameters
#' paramExtract(d, "regression")
#' # extract other types of parameters using shorthand
#' paramExtract(d, "u")
#' paramExtract(d, "e")
#' paramExtract(d, "v")
#' }
paramExtract <- function(x, params = c("regression", "loading", "undirected", "expectation", "variability", "new")) {
#readModels("C:/Program Files/Mplus/Mplus Examples/User's Guide Examples/Outputs/ex3.9.out")
params <- match.arg(params)
keys <- switch(params,
regression = c("ON"),
loading = c("BY", "\\|"),
undirected = c("WITH"),
expectation = c("Means", "Intercepts", "Thresholds"),
variability = c("Variances", "Residual.Variances"),
new = "New.Additional.Parameters")
index <- sapply(keys, function(pattern) {
grepl(paste0(".*", pattern, "$"), x[, "paramHeader"])
})
if (is.matrix(index)) {
index <- rowSums(index) > 0
} else {
index <- sum(index) > 0
}
index <- which(index)
# catch cases where there is nothing to extract
if (!length(index)) return(NULL)
output <- x[index, , drop=FALSE]
attr(output, "type") <- params
return(output)
}
#' Check Mplus code for missing semicolons or too long lines.
#'
#' The function parses a character string containing Mplus code
#' and checks that every non blank line ends in either a colon or
#' a semicolon. In addition, it checks that every line is less than 90
#' characters, because Mplus ignores everything after 90 characters on a line
#' which can be a source of enigmatic errors.
#'
#' The function is fairly basic at the moment. It works by simply
#' removing blank space (spaces, tabs, etc.) and then if a line does not
#' terminate in a colon or semicolon, it returns a note and the line
#' number. Optionally, it can add semicolons to any lines missing them
#' and return the input with added semicolons. To check for lines that are too long,
#' all trailing (but not before) white space is removed, and then the number of
#' characters is checked.
#'
#' @param x a character string containing Mplus code.
#' @param add logical indicating whether or not to add semicolons to
#' lines that do not have them. Defaults to \code{FALSE}.
#' @return a character vector containing the input text and
#' optionally added semicolons.
#' @seealso \code{\link{mplusModeler}}
#' @keywords utils
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @examples
#'
#' # sample input
#' test <- "
#' MODEL:
#' mpg ON wt hp;
#' wt WITH hp
#' "
#' # check and return
#' cat(parseMplus(test), file=stdout(), fill=TRUE)
#' # add missing semicolons and return
#' cat(parseMplus(test, TRUE), file=stdout(), fill=TRUE)
#' # line that is too long for Mplus
#' test <- "
#' MODEL:
#' mpg cyl disp hp drat wt qsec vs am gear PWITH cyl disp hp drat wt qsec vs am gear carb;
#' "
#' cat(parseMplus(test), file=stdout())
#' closeAllConnections()
parseMplus <- function(x, add = FALSE) {
cc <- textConnection(x)
init <- readLines(cc) #need to close the connection explicitly
close(cc)
nospace <- gsub("[[:space:]]", "", init)
empty <- nchar(nospace) < 1
end <- grepl(".*[:;]$", nospace)
semiok <- empty | end
if (!all(semiok)) {
index <- which(!semiok)
message(paste(c("The following lines are not empty and do not end in a : or ;.",
paste(index, init[index], sep = ": ")), collapse = "\n"))
if (add) {
init[index] <- paste0(init[index], ";")
message("added semicolons ';' to all of the above lines")
} else {
message("Rerun with parseMplus(add = TRUE) to add semicolons to all lines")
}
}
notrailspace <- gsub("[[:space:]]+$", "", init)
lengthok <- nchar(notrailspace) <= 90
if (!all(lengthok)) {
index <- which(!lengthok)
message(paste(c("The following lines are longer than 90 characters",
paste(index, init[index], sep = ": ")), collapse = "\n"))
message("Mplus will ignore everything after the 90th character on a line.\n",
"Consider breaking the line(s)")
}
if(all(semiok & lengthok)) message("All ok")
return(paste(init, collapse = "\n"))
}
#' Remove variable name length warnings from Mplus output file
#'
#' This function is designed to remove warnings in Mplus output files
#' about variable names being greater than 8 characters. It replaces them
#' with a note that the warnings were removed and a count of how many warnings
#' were removed.
#'
#' This is an internal function and not meant to be directly called by the
#' user under most circumstances. It is called by \code{\link{mplusModeler}}
#' when the \code{varwarnings = FALSE} argument is used.
#'
#' @param file A file name (including path if necessary) to an Mplus output
#' file. Note that you must have read and write privileges on the file
#' for this function to work properly.
#' @return Usually NULL. Called for the side effect of removing warnings in
#' Mplus output files. If \code{\link{file.access}} testing for write permissions
#' returns \code{FALSE}, a character string note that \code{rmVarWarnings}
#' could not run.
#' @seealso \code{\link{mplusModeler}}
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @keywords internal
#' @examples
#' # to do
rmVarWarnings <- function(file) {
dat <- readLines(file)
init <- grep("Variable name contains more than 8 characters.", dat)
stopifnot(all(
grepl("^\\*", dat[init - 1]),
grepl("^[[:space:]]*Only", dat[init + 1]),
grepl("^[[:space:]]*Variable", dat[init + 2])))
index <- sort(rep(init, each = 4) + (-1:2L))
dat[index[1]] <- sprintf("%d Mplus warnings about variable name length removed",
length(init))
dat <- dat[-index[-1]]
if (file.access(file, mode = 2)==0) {
unlink(file)
writeLines(dat, con = file)
} else {
return("Could not access file")
}
}
#' Change directory
#'
#' The function takes a path and changes the current working directory
#' to the path. If the directory specified in the path does not
#' currently exist, it will be created.
#'
#' The function has been designed to be platform independent,
#' although it has had limited testing. Path creation is done using
#' \code{file.path}, the existence of the directory is checked using
#' \code{file.exists} and the directory created with \code{dir.create}.
#' Only the first argument, is required. The other optional arguments
#' are handy when one wants to create many similar directories with a common base.
#'
#' @param base a character string with the base path to the directory. This is required.
#' @param pre an optional character string with the prefix to add to
#' the base path. Non character strings will be coerced to character class.
#' @param num an optional character string, prefixed by \code{pre}.
#' Non character strings will be coerced to character class.
#' @return NULL, changes the current working directory
#' @keywords utilities
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @examples
#' \dontrun{
#' # an example just using the base
#' cd("~/testdir")
#'
#' # an example using the optional arguments
#' base <- "~/testdir"
#' pre <- "test_"
#'
#' cd(base, pre, 1)
#' cd(base, pre, 2)
#' }
cd <- function(base, pre, num) {
stopifnot(is.character(base))
if (!missing(pre) & !missing(num)) {
pre <- as.character(pre)
num <- as.character(num)
newdir <- file.path(base, paste0(pre, num))
} else {
newdir <- file.path(base)
}
if (!file.exists(newdir)) {
dir.create(newdir)
}
setwd(newdir)
return(invisible(NULL))
}
| /R/mplus.R | no_license | ryanji/MplusAutomation | R | false | false | 55,323 | r | #' Automatically detect variables from an Mplus model object
#'
#' This is a function to automatically detect the variables used in an
#' Mplus model object.
#'
#'
#' @param object An Mplus model object from \code{mplusObject}.#'
#' @return A vector of variables from the R dataset to use.
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @export
#' @importFrom stats na.omit
#' @seealso \code{\link{mplusModeler}}, \code{\link{mplusObject}}
#' @keywords interface
#' @examples
#'
#' example1 <- mplusObject(MODEL = "mpg ON wt;",
#' rdata = mtcars, autov = FALSE)
#' example1$usevariables
#' MplusAutomation:::detectVariables(example1)
#'
#' example2 <- mplusObject(MODEL = "mpg ON wt;",
#' rdata = mtcars, autov = TRUE)
#' example2$usevariables
#' example3 <- update(example2,
#' MODEL = ~ . + "mpg ON qsec; wt WITH qsec;",
#' autov = TRUE)
#' example3$usevariables
#' rm(example1, example2, example3)
detectVariables <- function(object) {
if (!is.null(object$MONTECARLO)) {
stop("detectVariables() does not work with MONTECARLO models")
}
if (!is.null(object$rdata) && !is.null(object$MODEL)) {
if (object$imputed) {
v <- colnames(object$rdata[[1]])
} else {
v <- colnames(object$rdata)
}
tmpVARIABLE <- unlist(c(
tryCatch(unlist(strsplit(object$VARIABLE, split = ";")), error = function(e) ""),
tryCatch(unlist(strsplit(object$DEFINE, split = ";")), error = function(e) ""),
tryCatch(unlist(strsplit(object$MODEL, split = ";")), error = function(e) "")))
tmpVARIABLE <- na.omit(tmpVARIABLE)
tmpVARIABLE <- tmpVARIABLE[nzchar(tmpVARIABLE)]
tmpVARIABLE <- gsub("\\s", "", unique(unlist(lapply(tmpVARIABLE, function(x) {
x <- gsub("\n|\t|\r", "", gsub("^(.*)(=| ARE | are | IS | is )(.*)$", "\\3", x))
xalt <- unique(unlist(strsplit(x, split = "\\s")))
y <- separateHyphens(x)
if (is.list(y)) {
lapply(y, function(z) {
if (all(sapply(z, function(var) any(grepl(var, x = v, ignore.case=TRUE))))) {
i1 <- which(grepl(z[[1]], x = v, ignore.case = TRUE))
if (length(i1) > 1) {
test <- tolower(z[[1]]) == tolower(v)
if (any(test)) {
i1 <- which(test)[1]
}
}
i2 <- which(grepl(z[[2]], x = v, ignore.case = TRUE))
if (length(i2) > 1) {
test <- tolower(z[[2]]) == tolower(v)
if (any(test)) {
i2 <- which(test)[1]
}
}
if (length(i1) && length(i2)) {
c(v[i1:i2], xalt)
}
} else {
c(unlist(z), xalt)
}
})
} else {
xalt
}
}))))
message("R variables selected automatically as any variable name that\noccurs in the MODEL, VARIABLE, or DEFINE section.")
usevariables <- unique(v[sapply(v, function(var) any(grepl(var, x = tmpVARIABLE, ignore.case = TRUE)))])
if (!isTRUE(grepl("usevariables", object$VARIABLE, ignore.case=TRUE))) {
message(sprintf("If any issues, suggest explicitly specifying USEVARIABLES.\nA starting point may be:\nUSEVARIABLES = %s;",
paste(usevariables, collapse = " ")))
}
}
return(usevariables)
}
#' Create an Mplus model object
#'
#' This is a function to create an Mplus model object in \code{R}.
#' The object holds all the sections of an Mplus input file, plus some
#' extra \code{R} ones. Once created, the model can be run using other
#' functions such as \code{mplusModeler} or updated using methods defined
#' for the \code{update} function.
#'
#' Mplus model objects allow a base model to be defined,
#' and then flexibly update the data, change the precise model, etc. If a section
#' does not vary between models, you can leave it the same. For example, suppose
#' you are fitting a number of models, but in all cases, wish to use maximum likelihood
#' estimator, \dQuote{ANALYSIS: ESTIMATOR = ML;} and would like standardized output,
#' \dQuote{OUTPUT: STDYX;}. Rather than retype those in every model, they can be defined
#' in one Mplus model object, and then that can simply be updated with different models,
#' leaving the analysis and output sections untouched. This also means that if a reviewer
#' comes back and asks for all analyses to be re-run say using the robust maximum likelihood
#' estimator, all you have to do is change it in the model object once, and re run all your code.
#'
#' @param TITLE A character string of the title for Mplus.
#' @param DATA A charater string of the data section for Mplus (note, do not define
#' the filename as this is generated automatically)
#' @param VARIABLE A character string of the variable section for Mplus (note, do not
#' define the variable names from the dataset as this is generated automatically)
#' @param DEFINE A character string of the define section for Mplus (optional)
#' @param MONTECARLO A character string of the montecarlo section for Mplus (optional).
#' If used, \code{autov} is defaults to \code{FALSE} instead of the usual default,
#' \code{TRUE}, but may still be overwritten, if desired.
#' @param MODELPOPULATION A character string of the MODEL POPULATION section for Mplus (optional).
#' @param MODELMISSING A character string of the MODEL MISSING section for Mplus (optional).
#' @param ANALYSIS A character string of the analysis section for Mplus (optional)
#' @param MODEL A character string of the model section for Mplus (optional, although
#' typically you want to define a model)
#' @param MODELINDIRECT A character string of the MODEL INDIRECT section for Mplus (optional).
#' @param MODELCONSTRAINT A character string of the MODEL CONSTRAINT section for Mplus (optional).
#' @param MODELTEST A character string of the MODEL TEST section for Mplus (optional).
#' @param MODELPRIORS A character string of the MODEL PRIORS section for Mplus (optional).
#' @param OUTPUT A character string of the output section for Mplus (optional)
#' @param SAVEDATA A character string of the savedata section for Mplus (optional)
#' @param PLOT A character string of the plot section for Mplus (optional)
#' @param usevariables A character vector of the variables from the
#' \code{R} dataset to use in the model.
#' @param rdata An \code{R} dataset to be used for the model.
#' @param autov A logical (defaults to \code{TRUE}) argument indicating
#' whether R should attempt to guess the correct variables to use from
#' the R dataset, if \code{usevariables} is left \code{NULL}.
#' @param imputed A logical whether the data are multiply imputed (a list).
#' Defaults to \code{FALSE}.
#'
#' @return A list of class \code{mplusObject} with elements
#' \item{TITLE}{The title in Mplus (if defined)}
#' \item{DATA}{The data section in Mplus (if defined)}
#' \item{VARIABLE}{The variable section in Mplus (if defined)}
#' \item{DEFINE}{The define section in Mplus (if defined)}
#' \item{MONTECARLO}{The montecarlo section in Mplus (if defined)}
#' \item{MODELPOPULATION}{The modelpopulation section in Mplus (if defined)}
#' \item{MODELMISSING}{The modelmissing section in Mplus (if defined)}
#' \item{ANALYSIS}{The analysis section in Mplus (if defined)}
#' \item{MODEL}{The model section in Mplus (if defined)}
#' \item{MODELINDIRECT}{The modelindirect section in Mplus (if defined)}
#' \item{MODELCONSTRAINT}{The modelconstraint section in Mplus (if defined)}
#' \item{MODELTEST}{The modeltest section in Mplus (if defined)}
#' \item{MODELPRIORS}{The modelpriors section in Mplus (if defined)}
#' \item{OUTPUT}{The output section in Mplus (if defined)}
#' \item{SAVEDATA}{The savedata section in Mplus (if defined)}
#' \item{PLOT}{The plot section in Mplus (if defined)}
#' \item{results}{NULL by default, but can be later updated to include the results from the model run.}
#' \item{usevariables}{A character vector of the variables from the \code{R} data set to be used.}
#' \item{rdata}{The \code{R} data set to use for the model.}
#' \item{imputed}{A logical whether the data are multiply imputed.}
#' \item{autov}{A logical whether the data should have the usevariables detected automatically or not}
#'
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @export
#' @seealso \code{\link{mplusModeler}}
#' @keywords interface
#' @examples
#'
#' example1 <- mplusObject(MODEL = "mpg ON wt;",
#' usevariables = c("mpg", "hp"), rdata = mtcars)
#' str(example1)
#' rm(example1)
#'
#' # R figures out the variables automagically, with a message
#' example2 <- mplusObject(MODEL = "mpg ON wt;",
#' rdata = mtcars, autov = TRUE)
#' str(example2)
#' rm(example2)
#'
#' # R can also try to figure out a list of variables when
#' # variable names are hyphenated first-last variable, all variables
#' # between the first and last one will be included
#' example3 <- mplusObject(MODEL = "mpg ON wt-vs;",
#' rdata = mtcars, autov = TRUE)
#' str(example3)
#' rm(example3)
#'
#' # R warns if the first 8 characters of a (used) variable name are not unique
#' # as they will be indistinguishable in the Mplus output
#' example4 <- mplusObject(MODEL = "basename_01 ON basename_02;",
#' rdata = data.frame(basename_01 = 1:5, basename_02 = 5:1),
#' autov = TRUE)
#' rm(example4)
mplusObject <- function(TITLE = NULL, DATA = NULL, VARIABLE = NULL, DEFINE = NULL,
MONTECARLO = NULL, MODELPOPULATION = NULL, MODELMISSING = NULL, ANALYSIS = NULL,
MODEL = NULL, MODELINDIRECT = NULL, MODELCONSTRAINT = NULL, MODELTEST = NULL, MODELPRIORS = NULL,
OUTPUT = NULL, SAVEDATA = NULL, PLOT = NULL,
usevariables = NULL, rdata = NULL, autov = TRUE, imputed = FALSE) {
charOrNull <- function(x) {is.character(x) || is.null(x)}
stopifnot(charOrNull(TITLE))
stopifnot(charOrNull(DATA))
stopifnot(charOrNull(VARIABLE))
stopifnot(charOrNull(DEFINE))
stopifnot(charOrNull(MONTECARLO))
stopifnot(charOrNull(MODELPOPULATION))
stopifnot(charOrNull(MODELMISSING))
stopifnot(charOrNull(ANALYSIS))
stopifnot(charOrNull(MODEL))
stopifnot(charOrNull(MODELINDIRECT))
stopifnot(charOrNull(MODELCONSTRAINT))
stopifnot(charOrNull(MODELTEST))
stopifnot(charOrNull(MODELPRIORS))
stopifnot(charOrNull(OUTPUT))
stopifnot(charOrNull(SAVEDATA))
stopifnot(charOrNull(PLOT))
object <- list(
TITLE = TITLE,
DATA = DATA,
VARIABLE = VARIABLE,
DEFINE = DEFINE,
MONTECARLO = MONTECARLO,
MODELPOPULATION = MODELPOPULATION,
MODELMISSING = MODELMISSING,
ANALYSIS = ANALYSIS,
MODEL = MODEL,
MODELINDIRECT = MODELINDIRECT,
MODELCONSTRAINT = MODELCONSTRAINT,
MODELTEST = MODELTEST,
MODELPRIORS = MODELPRIORS,
OUTPUT = OUTPUT,
SAVEDATA = SAVEDATA,
PLOT = PLOT,
results = NULL,
usevariables = usevariables,
rdata = rdata,
imputed = imputed,
autov = autov)
class(object) <- c("mplusObject", "list")
if (!is.null(MONTECARLO) && missing(autov)) {
object$autov <- autov <- FALSE
}
if (autov && is.null(usevariables) && !is.null(rdata) && !is.null(MODEL)) {
object$usevariables <- detectVariables(object)
}
i <- duplicated(substr(object$usevariables, start = 1, stop = 8))
if (any(i)) {
message(sprintf("The following variables are not unique in the first 8 characters:\n %s",
paste(object$usevariables[i], collapse = ", ")))
}
return(object)
}
#' Update an Mplus model object
#'
#' This is a method for updating an Mplus model object.
#' It takes an Mplus model object as the first argument, and
#' then optionally any sections to update. There are two ways
#' to update a section using a formula interface. \code{~ "new stuff"} will
#' replace a given section with the new text. Alternately, you can add
#' additional text using \code{~ + "additional stuff"}. Combined these let you
#' replace or add to a section.
#'
#' @param object An object of class mplusObject
#' @param \dots Additional arguments to pass on
#' @return An (updated) Mplus model object
#' @export
#' @method update mplusObject
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @keywords interface
#' @examples
#' example1 <- mplusObject(MODEL = "mpg ON wt;",
#' usevariables = c("mpg", "hp"), rdata = mtcars)
#' x <- ~ "ESTIMATOR = ML;"
#' str(update(example1, rdata = iris))
#' str(update(example1, ANALYSIS = x))
#' str(update(example1, MODEL = ~ "wt ON hp;"))
#' str(update(example1, MODEL = ~ . + "wt ON hp;"))
#' str(update(example1, ANALYSIS = x, MODEL = ~ . + "wt ON hp;"))
#'
#' # test to make sure . in Mplus code does not cause problems
#' str(update(example1, ANALYSIS = x, MODEL = ~ . + "wt ON hp*.5;"))
#' rm(example1, x)
update.mplusObject <- function(object, ...) {
dots <- list(...)
if (!length(dots)) return(object)
sectionNames <- names(dots)
mplusList <- c("TITLE", "DATA", "VARIABLE", "DEFINE",
"MONTECARLO", "MODELPOPULATION", "MODELMISSING", "ANALYSIS",
"MODEL", "MODELINDIRECT", "MODELCONSTRAINT", "MODELTEST", "MODELPRIORS",
"OUTPUT", "SAVEDATA", "PLOT")
rList <- c("results", "usevariables", "rdata")
mplusIndex <- sectionNames[which(sectionNames %in% mplusList)]
rIndex <- sectionNames[which(sectionNames %in% rList)]
if (length(mplusIndex)) {
mplusSections <- lapply(mplusIndex, function(n) {
tmp <- dots[[n]]
tmp <- as.character(tmp[[2]])
if (any(grepl("^\\.$", tmp))) {
old <- paste0(object[[n]], "\n")
} else {
old <- ""
}
new <- tmp[length(tmp)]
combined <- paste(old, new, collapse = "\n")
})
object[mplusIndex] <- mplusSections
}
if (length(rIndex)) {
object[rIndex] <- dots[rIndex]
}
if (object$autov) {
object$usevariables <- detectVariables(object)
}
return(object)
}
#' Create the Mplus input text for an mplusObject
#'
#' This function takes an object of class \code{mplusObject} and creates
#' the Mplus input text corresponding to it, including data link and
#' variable names.
#'
#' @param object An object of class mplusObject
#' @param filename The name of the data file as a character vector
#' @param check A logical indicating whether or not to run \code{parseMplus}
#' on the created input file. Checks for errors like lines that are too long,
#' or for missing semi-colons and gives notes.
#' @param add A logical passed on to \code{parseMplus} whether to add semi
#' colons to line ends. Defaults to \code{FALSE}.
#' @param imputed A logical whether the data are multiply imputed.
#' Defaults to \code{FALSE}.
#' @return A character string containing all the text for the Mplus
#' input file.
#' @keywords interface
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @seealso \code{\link{prepareMplusData}}, \code{\link{mplusModeler}}
#' @examples
#' # example mplusObject
#' example1 <- mplusObject(MODEL = "mpg ON wt;",
#' usevariables = c("mpg", "hp"), rdata = mtcars)
#'
#' # create the Mplus input text
#' cat(createSyntax(example1, "example1.dat"), file=stdout(), fill=TRUE)
#'
#' # update the object, then create input text
#' cat(createSyntax(update(example1,
#' TITLE = ~ "This is my title;",
#' MODEL = ~ . + "\nmpg ON hp;",
#' usevariables = c("mpg", "hp", "wt")), "example1.dat"),
#' file=stdout(),
#' fill=TRUE)
#' rm(example1)
#' closeAllConnections()
createSyntax <- function(object, filename, check=TRUE, add=FALSE, imputed=FALSE) {
stopifnot(inherits(object, "mplusObject"))
mplusList <- data.frame(
Names = c("TITLE", "DATA", "VARIABLE", "DEFINE",
"MONTECARLO", "MODELPOPULATION", "MODELMISSING", "ANALYSIS",
"MODEL", "MODELINDIRECT", "MODELCONSTRAINT", "MODELTEST", "MODELPRIORS",
"OUTPUT", "SAVEDATA", "PLOT"),
Labels = c("TITLE", "DATA", "VARIABLE", "DEFINE",
"MONTECARLO", "MODEL POPULATION", "MODEL MISSING", "ANALYSIS",
"MODEL", "MODEL INDIRECT", "MODEL CONSTRAINT", "MODEL TEST", "MODEL PRIORS",
"OUTPUT", "SAVEDATA", "PLOT"),
stringsAsFactors = FALSE)
simulation <- !is.null(object$MONTECARLO)
if (!simulation && !missing(filename)) {
dFile <- paste0("FILE = \"", filename, "\";\n")
if (imputed) {
dFile <- paste0(dFile, "TYPE = IMPUTATION;\n")
}
object$DATA <- paste(dFile, object$DATA, collapse = "\n")
}
if (!is.null(object$rdata) && !is.null(object$usevariables)) {
if (object$imputed) {
vNames <- createVarSyntax(object$rdata[[1]][, object$usevariables])
} else {
vNames <- createVarSyntax(object$rdata[, object$usevariables])
}
object$VARIABLE <- paste(vNames, "MISSING=.;\n", object$VARIABLE, collapse = "\n")
}
index <- sapply(object[mplusList$Names], function(x) {
!(is.null(x) || !nzchar(gsub("\\s*", "", x, perl=TRUE)))
})
sections <- mplusList$Names[index]
body <- unlist(lapply(sections, function(n) {
c(paste0(mplusList$Labels[match(n, mplusList$Names)], ":"), object[[n]])
}))
body <- paste(body, collapse = "\n")
if (check) {
body <- parseMplus(body, add = add)
}
return(body)
}
#' Create, run, and read Mplus models.
#'
#' This is a convenience wrapper to automate many of the
#' usual steps required to run an Mplus model. It relies in part
#' on functions from the MplusAutomation package.
#'
#' Combined with functions from the MplusAutomation package,
#' this function is designed to make it easy to fit Mplus models
#' from R and to ease many of the usual frustrations with Mplus.
#' For example, Mplus has very specific formats it accepts data in,
#' but also very little data management facilities. Using \R data
#' management is easy. This function is designed to make using data
#' from \R in Mplus models easy.
#' It is also common to want to fit many different models that are
#' slight variants. This can be tedius in Mplus, but using \R you can
#' create one basic set of input, store it in a vector, and then just
#' modify that (e.g., using regular expressions) and pass it to Mplus.
#' You can even use loops or the \code{*apply} constructs to fit the same
#' sort of model with little variants.
#'
#' The \code{writeData} argument is new and can be used to reduce overhead
#' from repeatedly writing the same data from R to the disk. When using the
#' \sQuote{always} option, \code{mplusModeler} behaves as before, always writing
#' data from R to the disk. This remains the default for the \code{prepareMplusData}
#' function to avoid confusion or breaking old code. However, for \code{mplusModeler},
#' the default has been set to \sQuote{ifmissing}. In this case, R generates an
#' md5 hash of the data prior to writing it out to the disk. The md5 hash is based on:
#' (1) the dimensions of the dataset, (2) the variable names,
#' (3) the class of every variable, and (4) the raw data from the first and last rows.
#' This combination ensures that under most all circumstances, if the data changes,
#' the hash will change. The hash is appended to the specified data file name
#' (which is controlled by the logical \code{hashfilename} argument). Next R
#' checks in the directory where the data would normally be written. If a data file
#' exists in that directory that matches the hash generated from the data, R will
#' use that existing data file instead of writing out the data again.
#' A final option is \sQuote{never}. If this option is used, R will not write
#' the data out even if no file matching the hash is found.
#'
#' @param object An object of class mplusObject
#' @param dataout the name of the file to output the data to for Mplus.
#' If missing, defaults to \code{modelout} changing .inp to .dat.
#' @param modelout the name of the output file for the model.
#' This is the file all the syntax is written to, which becomes the
#' Mplus input file. It should end in .inp. If missing, defaults to
#' \code{dataout} changing the extension to .inp.
#' @param run an integer indicating how many models should be run. Defaults to zero.
#' If zero, the data and model input files are all created, but the model is not run.
#' This can be useful for seeing how the function works and what setup is done. If one, a basic
#' model is run. If greater than one, the model is bootstrapped with \code{run} replications as
#' well as the basic model.
#' @param check logical whether the body of the Mplus syntax should be checked for missing
#' semicolons using the \code{\link{parseMplus}} function. Defaults to \code{FALSE}.
#' @param varwarnings A logical whether warnings about variable length should be left, the
#' default, or removed from the output file.
#' @param Mplus_command optional. N.B.: No need to pass this parameter for most users (has intelligent
#' defaults). Allows the user to specify the name/path of the Mplus executable to be used for
#' running models. This covers situations where Mplus is not in the system's path,
#' or where one wants to test different versions of the Mplus program.
#' @param writeData A character vector, one of \sQuote{ifmissing},
#' \sQuote{always}, \sQuote{never} indicating whether the data files
#' (*.dat) should be written to disk. This is passed on to \code{prepareMplusData}.
#' Note that previously, \code{mplusModeler} always (re)wrote the data to disk.
#' However, now the default is to write the data to disk only if it is missing
#' (i.e., \sQuote{ifmissing}). See details for further information.
#' @param hashfilename A logical whether or not to add a hash of the raw data to the
#' data file name. Defaults to \code{TRUE} in \code{mplusModeler}. Note that this
#' behavior is a change from previous versions and differs from \code{prepareMplusData}
#' which maintains the old behavior by default of \code{FALSE}.
#' @param \ldots additional arguments passed to the
#' \code{\link[MplusAutomation]{prepareMplusData}} function.
#' @return An Mplus model object, with results.
#' If \code{run = 1}, returns an invisible list of results from the run of
#' the Mplus model (see \code{\link[MplusAutomation]{readModels}} from the
#' MplusAutomation package). If \code{run = 0}, the function returns a list
#' with two elements, \sQuote{model} and \sQuote{boot} that are both \code{NULL}.
#' if \code{run >= 1},returns a list with two elements, \sQuote{model} and \sQuote{boot}
#' containing the regular Mplus model output and the boot object, respectively.
#' In all cases, the Mplus data file and input files are created.
#' @seealso \code{\link{runModels}} and \code{\link{readModels}}
#' @import boot
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @examples
#' \dontrun{
#' # minimal example of a model using builtin data, allowing R
#' # to automatically guess the correct variables to use
#' test <- mplusObject(MODEL = "mpg ON wt hp;
#' wt WITH hp;", rdata = mtcars)
#'
#' # estimate the model in Mplus and read results back into R
#' res <- mplusModeler(test, modelout = "model1.inp", run = 1L)
#'
#' # when forcing writeData = "always" data gets overwritten (with a warning)
#' resb <- mplusModeler(test, modelout = "model1.inp", run = 1L,
#' writeData = "always")
#'
#' # using writeData = "ifmissing", the default, no data re-written
#' resc <- mplusModeler(test, modelout = "model1.inp", run = 1L)
#'
#' # using writeData = "ifmissing", the default, data ARE written
#' # if data changes
#' test <- mplusObject(MODEL = "mpg ON wt hp;
#' wt WITH hp;", rdata = mtcars[-10, ])
#' resd <- mplusModeler(test, modelout = "model1.inp", run = 1L)
#'
#' # show summary
#' summary(resd)
#'
#' # show coefficients
#' coef(resd)
#'
#' # what if you wanted confidence intervals
#' # and standardized values?
#' # first update to tell Mplus you want them, re-run and print
#' test <- update(test, OUTPUT = ~ "CINTERVAL; STDYX;")
#' resd <- mplusModeler(test, modelout = "model1.inp", run = 1L)
#'
#' coef(resd)
#' confint(resd)
#'
#' # now standardized
#' coef(resd, type = "stdyx")
#' confint(resd, type = "stdyx")
#'
#' # put together in one data frame if desired
#' merge(
#' coef(resd, type = "stdyx"),
#' confint(resd, type = "stdyx"),
#' by = "Label")
#'
#' # remove files
#' unlink(resc$results$input$data$file)
#' unlink(resd$results$input$data$file)
#' unlink("model1.inp")
#' unlink("model1.out")
#'
#' # simple example of a model using builtin data
#' # demonstrates use with a few more sections
#' test2 <- mplusObject(
#' TITLE = "test the MplusAutomation Package and mplusModeler wrapper;",
#' MODEL = "
#' mpg ON wt hp;
#' wt WITH hp;",
#' usevariables = c("mpg", "wt", "hp"),
#' rdata = mtcars)
#'
#' res2 <- mplusModeler(test2, modelout = "model2.inp", run = 1L)
#'
#' # remove files
#' unlink(res2$results$input$data$file)
#' unlink("model2.inp")
#' unlink("model2.out")
#'
#' # similar example using a robust estimator for standard errors
#' # and showing how an existing model can be easily updated and reused
#' test3 <- update(test2, ANALYSIS = ~ "ESTIMATOR = MLR;")
#'
#' res3 <- mplusModeler(test3, modelout = "model3.inp", run = 1L)
#' unlink(res3$results$input$data$file)
#' unlink("model3.inp")
#' unlink("model3.out")
#'
#' # now use the built in bootstrapping methods
#' # note that these work, even when Mplus will not bootstrap
#' # also note how categorical variables and weights are declared
#' # in particular, the usevariables for Mplus must be specified
#' # because mroe variables are included in the data than are in the
#' # model. Note the R usevariables includes all variables for both
#' # model and weights. The same is true for clustering.
#' test4 <- mplusObject(
#' TITLE = "test bootstrapping;",
#' VARIABLE = "
#' CATEGORICAL = cyl;
#' WEIGHT = wt;
#' USEVARIABLES = cyl mpg;",
#' ANALYSIS = "ESTIMATOR = MLR;",
#' MODEL = "
#' cyl ON mpg;",
#' usevariables = c("mpg", "wt", "cyl"),
#' rdata = mtcars)
#'
#' res4 <- mplusModeler(test4, "mtcars.dat", modelout = "model4.inp", run = 10L,
#' hashfilename = FALSE)
#' # see the results
#' res4$results$boot
#'
#' # remove files
#' unlink("mtcars.dat")
#' unlink("model4.inp")
#' unlink("model4.out")
#'
#' # Monte Carlo Simulation Example
#' montecarlo <- mplusObject(
#' TITLE = "Monte Carlo Example;",
#' MONTECARLO = "
#' NAMES ARE i1-i5;
#' NOBSERVATIONS = 100;
#' NREPS = 100;
#' SEED = 1234;",
#' MODELPOPULATION = "
#' f BY i1-i5*1;
#' f@1;
#' i1-i5*1;",
#' ANALYSIS = "
#' ESTIMATOR = BAYES;
#' PROC = 2;
#' fbiter = 100;",
#' MODEL = "
#' f BY i1-i5*.8 (l1-l5);
#' f@1;
#' i1-i5*1;",
#' MODELPRIORS = "
#' l1-l5 ~ N(.5 .1);",
#' OUTPUT = "TECH9;")
#'
#' fitMonteCarlo <- mplusModeler(montecarlo,
#' modelout = "montecarlo.inp",
#' run = 1L,
#' writeData = "always",
#' hashfilename = FALSE)
#'
#' unlink("montecarlo.inp")
#' unlink("montecarlo.out")
#'
#'
#' # Example including ID variable and extracting factor scores
#' dat <- mtcars
#' dat$UID <- 1:nrow(mtcars)
#'
#' testIDs <- mplusObject(
#' TITLE = "test the mplusModeler wrapper with IDs;",
#' VARIABLE = "IDVARIABLE = UID;",
#' MODEL = "
#' F BY mpg wt hp;",
#' SAVEDATA = "
#' FILE IS testid_fscores.dat;
#' SAVE IS fscores;
#' FORMAT IS free;",
#' usevariables = c("UID", "mpg", "wt", "hp"),
#' rdata = dat)
#'
#' resIDs <- mplusModeler(testIDs, modelout = "testid.inp", run = 1L)
#'
#' # view the saved data from Mplus, including factor scores
#' # the indicator variables, and the ID variable we specified
#' head(resIDs$results$savedata)
#'
#' # merge the factor scores with the rest of the original data
#' # merge together by the ID column
#' dat <- merge(dat, resIDs$results$savedata[, c("F", "UID")],
#' by = "UID")
#'
#' # correlate merged factor scores against some other new variable
#' with(dat, cor(F, qsec))
#'
#' # remove files
#' unlink(resIDs$results$input$data$file)
#' unlink("testid.inp")
#' unlink("testid.out")
#' unlink("testid_fscores.dat")
#' unlink("Mplus Run Models.log")
#' }
mplusModeler <- function(object, dataout, modelout, run = 0L,
check = FALSE, varwarnings = TRUE, Mplus_command="Mplus",
writeData = c("ifmissing", "always", "never"),
hashfilename = TRUE, ...) {
stopifnot((run %% 1) == 0 && length(run) == 1)
oldSHELL <- Sys.getenv("SHELL")
Sys.setenv(SHELL = Sys.getenv("COMSPEC"))
on.exit(Sys.setenv(SHELL = oldSHELL))
writeData <- match.arg(writeData)
simulation <- !is.null(object$MONTECARLO)
if (missing(modelout) & missing(dataout)) {
stop("You must specify either modelout or dataout")
} else if (missing(dataout) && !simulation) {
dataout <- gsub("(^.*)(\\.inp$)", "\\1.dat", modelout)
} else if (missing(modelout)) {
modelout <- gsub("(.*)(\\..+$)", "\\1.inp", dataout)
}
if (simulation) {
if (run > 1) {
run <- 1L
message("run cannot be greater than 1 when using montecarlo simulation, setting run = 1")
}
dataout <- dataout2 <- NULL
} else if (!simulation) {
if (object$imputed) {
if (identical(writeData, "ifmissing")) {
writeData <- "always"
message("When imputed = TRUE, writeData cannot be 'ifmissing', setting to 'always'")
}
if (hashfilename) {
hashfilename <- FALSE
message("When imputed = TRUE, hashfilename cannot be TRUE, setting to FALSE")
}
}
if (run > 1) {
if (identical(writeData, "ifmissing")) {
writeData <- "always"
message("When run > 1, writeData cannot be 'ifmissing', setting to 'always'")
}
if (hashfilename) {
hashfilename <- FALSE
message("When run > 1, hashfilename cannot be TRUE, setting to FALSE")
}
}
if (!hashfilename && identical(writeData, "ifmissing")) {
writeData <- "always"
message("When hashfilename = FALSE, writeData cannot be 'ifmissing', setting to 'always'")
}
if (hashfilename) {
md5 <- .cleanHashData(
df = object$rdata,
keepCols = object$usevariables,
imputed = object$imputed)$md5
tmp <- .hashifyFile(dataout, md5,
useexisting = identical(writeData, "ifmissing"))
dataout2 <- tmp$filename
} else {
dataout2 <- dataout
}
}
.run <- function(data, i, boot = TRUE, imputed = FALSE, ...) {
if (!simulation) {
if (imputed) {
if(boot) stop("Cannot use imputed data and bootstrap")
prepareMplusData(df = data,
keepCols = object$usevariables,
filename = dataout,
inpfile = tempfile(),
imputed = imputed,
writeData = writeData,
hashfilename = hashfilename,
...)
} else {
prepareMplusData(df = data[i, , drop = FALSE],
keepCols = object$usevariables,
filename = dataout, inpfile = tempfile(),
writeData = ifelse(boot, "always", writeData),
hashfilename = ifelse(boot, FALSE, hashfilename),
...)
}
}
runModels(target = modelout, Mplus_command = Mplus_command, logFile=NULL)
outfile <- gsub("(^.*)(\\.inp$)", "\\1.out", modelout)
results <- readModels(target = outfile)
if (!boot) {
if (!varwarnings) rmVarWarnings(outfile)
return(invisible(results))
} else {
with(results, unlist(lapply(
parameters[!grepl("^ci\\..+", names(parameters))],
function(x) {
x <- x[, c("est", "se")]
x[] <- lapply(x, as.numeric)
as.vector(t(na.omit(x)))
}
)))
}
}
body <- createSyntax(object, dataout2, check=check, imputed = object$imputed)
writeLines(body, con = modelout, sep = "\n")
message("Wrote model to: ", modelout)
if (!simulation) {
if (hashfilename && identical(writeData, "ifmissing")) {
if (tmp$fileexists) {
NULL
} else {
message("Wrote data to: ", dataout2)
}
} else {
message("Wrote data to: ", dataout2)
}
}
results <- bootres <- NULL
finalres <- list(model = results, boot = bootres)
if (!simulation) {
if (run > 1 & !object$imputed) {
bootres <- boot(object$rdata, .run, R = run, sim = "ordinary")
finalres$boot <- bootres
class(finalres) <- c("boot.mplus.model", "list")
}
}
if (run) {
results <- .run(data = object$rdata, boot = FALSE, imputed = object$imputed, ...)
finalres$model <- results
} else if (!simulation) {
prepareMplusData(df = object$rdata,
keepCols = object$usevariables,
filename = dataout,
inpfile = tempfile(),
imputed = object$imputed,
writeData = writeData,
hashfilename = hashfilename,
...)
return(object)
}
if (run == 1) {
object$results <- finalres$model
} else {
object$results <- finalres
}
return(object)
}
#' Create Mplus code for various residual covariance structures.
#'
#' This function makes it easy to write the Mplus syntax for various
#' residual covariance structure.
#'
#'
#' The \strong{homogenous} residual covariance structure estimates one parameter:
#' the residual variance, \eqn{\sigma^{2}_{e}}{s^2}. The residual variance
#' is assumed to be identical for all variables and all covariances are
#' assumed to be zero. The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab 0 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \ldots \tab \cr
#' t3 \tab 0 \tab 0 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab 0 \tab 0 \tab 0 \tab \ldots \tab \eqn{\sigma^{2}_{e}}{s^2} \cr
#' }
#'
#' The \strong{heterogenous} residual covariance structure estimates
#' \bold{n} parameters, where \bold{n} is the number of variables.
#' A unique residual variance is estimated for every variable. All
#' covariances are assumed to be zero. The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e1}}{s1^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab 0 \tab \eqn{\sigma^{2}_{e2}}{s2^2} \tab \tab \ldots \tab \cr
#' t3 \tab 0 \tab 0 \tab \eqn{\sigma^{2}_{e3}}{s3^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab 0 \tab 0 \tab 0 \tab \ldots \tab \eqn{\sigma^{2}_{en}}{sn^2} \cr
#' }
#'
#' The \strong{compound symmetric} residual covariance structure estimates
#' two parameters: one for the residual variance , \eqn{\sigma^{2}_{e}}{s^2},
#' and one for the covariance. The residual variance
#' is assumed to be identical for all variables and all covariances are
#' assumed to be identical. The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \ldots \tab \cr
#' t3 \tab \eqn{\rho}{rho} \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab \eqn{\rho}{rho} \tab \eqn{\rho}{rho} \tab \eqn{\rho}{rho} \tab \ldots \tab \eqn{\sigma^{2}_{e}}{s^2} \cr
#' }
#'
#' The \strong{toeplitz} residual covariance structure estimates
#' \bold{n} parameters, one for every band of the matrix.
#' The residual variance , \eqn{\sigma^{2}_{e}}{s^2}, is
#' assumed to be identical for all variables. The covariances one step removed
#' are all assumed identical. Likewise for all further bands.
#' The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \ldots \tab \cr
#' t3 \tab \eqn{\rho_{2}}{rho2} \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab \eqn{\rho_{n}}{rhon} \tab \eqn{\rho_{n - 1}}{rho(n - 1)} \tab \eqn{\rho_{n - 2}}{rho(n - 2)} \tab \ldots \tab \eqn{\sigma^{2}_{e}}{s^2} \cr
#' }
#'
#' The \strong{autoregressive} residual covariance structure has two parameters:
#' the residual variance, \eqn{\sigma^{2}_{e}}{s^2} and
#' the correlation between adjacent time points, \eqn{\rho}{rho}. The variances
#' are constrained to be equal for all time points. A single correlation
#' parameter is estimated. The \eqn{\rho}{rho} is the correlation between adjacent
#' time points such as 1 and 2 or 2 and 3. More distant relationships are assumed
#' to have smaller correlations, decreasing exponentially. Thus between 1 and 3,
#' the estimate is \eqn{\rho^2}{rho^2}. The structure is represented in this table.
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \tab \ldots \tab \cr
#' t3 \tab \eqn{\rho^2}{rho^2} \tab \eqn{\rho}{rho} \tab \eqn{\sigma^{2}_{e}}{s^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab \eqn{\rho^{n-1}}{rho^(n-1)} \tab \eqn{\rho^{n-2}}{rho^(n-2)} \tab \eqn{\rho^{n-3}}{rho^(n-3)} \tab \ldots \tab \eqn{\sigma^{2}_{e}}{s^2} \cr
#' }
#' Because structural equation models generally model covariance structures,
#' the autoregressive residual structure must be parameterized in terms of
#' covariances. This is done in two parts. First, the function returns
#' syntax to estimate all the pairwise covariances, labelling the parameters
#' \eqn{\rho}{rho}, \eqn{\rho^2}{rho^2}, etc. so that they are constrained to be
#' equal. Next, it returns the syntax for the necessary model constraints to
#' constrain the different covariances, to decrease exponentially in their
#' correlations. This is done via:
#' \deqn{\rho^2 = (\frac{\rho}{\sigma^2_{e}})^{2}\sigma^2_{e}}{rho^2 = (rho/s^2)^2 * s^2}
#' and likewise for all later time points.
#'
#' The \strong{unstructured} residual covariance structure estimates
#' \eqn{\frac{n(n + 1)}{2}}{(n(n + 1))/2} parameters. It is unstructured
#' in that every variance and covariance is freely estimated with no
#' constraints. However, in most cases, this results in an overparameterized
#' model and is unestimable. The structure is represented in this table.
#'
#' \tabular{llllll}{
#' \tab t1 \tab t2 \tab t3 \tab \ldots \tab tn \cr
#' t1 \tab \eqn{\sigma^{2}_{e1}}{s1^2} \tab \tab \tab \ldots \tab \cr
#' t2 \tab \eqn{\rho_{1}}{rho1} \tab \eqn{\sigma^{2}_{e2}}{s2^2} \tab \tab \ldots \tab \cr
#' t3 \tab \eqn{\rho_{2}}{rho2} \tab \eqn{\rho_{3}}{rho3} \tab \eqn{\sigma^{2}_{e3}}{s3^2} \tab \ldots \tab \cr
#' \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \tab \ldots \cr
#' tn \tab \eqn{\rho_{5}}{rho5} \tab \eqn{\rho_{6}}{rho6} \tab \eqn{\rho_{7}}{rho7} \tab \ldots \tab \eqn{\sigma^{2}_{en}}{sn^2} \cr
#' }
#'
#' @param x input character vector of variable names, ordered by time
#' @param type A character string indicating the type of residual covariance
#' structure to be used. Defaults to \sQuote{homogenous}. Current options include
#' \sQuote{homogenous}, \sQuote{heterogenous}, \sQuote{cs} for compound symmetric,
#' \sQuote{toeplitz} for banded toeplitz, \sQuote{ar} for autoregressive, and
#' \sQuote{un} for unstructured.
#' @param r a character vector of the base label to name covariance parameters.
#' Defaults to \sQuote{rho}.
#' @param e a character vector of the error variance of the variable.
#' Used to create constraints on the covariance parameters. Defaults to \sQuote{e}.
#' @param collapse whether to collapse the covariance code using \sQuote{PWITH}. Note that
#' at the time of writing, Mplus does not allow more than 80 characters per row.
#' Defaults to \code{FALSE}.
#' @return A named character vector of class \sQuote{MplusRstructure} with four elements:
#' \item{all}{A character string collapsing all other sections.}
#' \item{Variances}{A character string containing all of the variances.}
#' \item{Covariances}{A character string containing all of the
#' covariances, properly labelled to allow constraints and the
#' autoregressive residual covariance structure.}
#' \item{Constraints}{A character string containing the \sQuote{MODEL CONSTRAINT}
#' section and code needed to parameterize the residual covariance structure
#' as autoregressive.}
#' @keywords interface
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @examples
#' # all five structures collapsing
#' mplusRcov(letters[1:4], "homogenous", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "heterogenous", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "cs", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "toeplitz", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "ar", "rho", "e", TRUE)
#' mplusRcov(letters[1:4], "un", "rho", "e", TRUE)
#'
#' # all five structures without collapsing
#' # useful for long names or many variables
#' # where a line may cross 80 characters
#' mplusRcov(letters[1:4], "homogenous", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "heterogenous", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "cs", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "toeplitz", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "ar", "rho", "e", FALSE)
#' mplusRcov(letters[1:4], "un", "rho", "e", FALSE)
mplusRcov <- function(x, type = c("homogenous", "heterogenous", "cs", "toeplitz", "ar", "un"),
r = "rho", e = "e", collapse=FALSE) {
type <- match.arg(type)
indCov <- function(x, r, e, collapse) {
n <- length(x)
if (collapse) {
res <- lapply(1:(n - 1), function(i) {
paste0(x[i], " WITH ", paste(paste0(x, "@0")[(i+1):n], collapse = " "), ";")
})
} else {
res <- lapply(1:(n - 1), function(i) {
paste(paste0(x[i], " WITH ", paste0(x, "@0")[(i+1):n], ";"), collapse = "\n")
})
}
res <- do.call("paste", list(unlist(res), collapse = "\n"))
list(Covariances = res, Constraints = "")
}
toeplitzCov <- function(x, r, e, collapse, type = c("toeplitz", "cs", "ar")) {
type <- match.arg(type)
n <- length(x)
k <- n - 1
index <- lapply(1:k, function(j) {
sapply(1:(n - j), function(i) c(i, i + j))
})
rho <- switch(type,
toeplitz = paste0(r, c("", seq_along(index)[-1])),
cs = rep(r, length(index)),
ar = paste0(r, c("", seq_along(index)[-1]))
)
if (collapse) {
res <- lapply(seq_along(index), function(i) {
start <- paste(x[index[[i]][1, ]], collapse = " ")
end <- paste(x[index[[i]][2, ]], collapse = " ")
paste0(paste(c(start, end), collapse = " PWITH "), paste0(" (", rho[i], ");"))
})
} else {
res <- lapply(seq_along(index), function(i) {
sapply(1:ncol(index[[i]]), function(j) {
paste0(x[index[[i]][1, j]], " WITH ", x[index[[i]][2, j]], paste0(" (", rho[i], ");"))
})
})
}
res <- do.call("paste", list(unlist(res), collapse = "\n"))
constraint <- switch(type,
toeplitz = "",
cs = "",
ar = {
cons <- lapply(seq_along(rho)[-1], function(i) {
paste0(" ", rho[i], " = ((", r, "/", e, ")^", i, ") * ", e, ";")
})
cons <- do.call("paste", list(unlist(cons), collapse = "\n"))
paste(c("MODEL CONSTRAINT: \n", cons, "\n"), collapse = "")
}
)
list(Covariances = res, Constraints = constraint)
}
V <- switch(type,
homogenous = paste0(paste(x, collapse = " "), " (", e, ");"),
heterogenous = paste0(paste(x, collapse = " "), ";"),
cs = paste0(paste(x, collapse = " "), " (", e, ");"),
toeplitz = paste0(paste(x, collapse = " "), " (", e, ");"),
ar = paste0(paste(x, collapse = " "), " (", e, ");"),
un = paste0(paste(x, collapse = " "), ";")
)
Rcov <- switch(type,
homogenous = indCov(x = x, r = r, e = e, collapse = collapse),
heterogenous = indCov(x = x, r = r, e = e, collapse = collapse),
cs = toeplitzCov(x = x, r = r, e = e, collapse = collapse, type = type),
toeplitz = toeplitzCov(x = x, r = r, e = e, collapse = collapse, type = type),
ar = toeplitzCov(x = x, r = r, e = e, collapse = collapse, type = type),
un = lapply(indCov(x = x, r = r, e = e, collapse = collapse), function(x) {
gsub("@0", "", x)})
)
Rstruc <- c(Variances = V, Rcov)
allres <- do.call(paste, list(Rstruc, collapse = "\n"))
Rstruc <- c(all = allres, Rstruc)
class(Rstruc) <- "MplusRstructure"
return(Rstruc)
}
#' Extract parameters from a data frame of Mplus estimates
#'
#' This is a simple convenience function designed to facilitate
#' looking at specific parameter types by easily return a subset
#' of a data frame with those types only. It is designed to follow up
#' the results returned from the \code{\link{readModels}} function.
#'
#' @param x A data frame (specifically the type returned by \code{readModels}) containing
#' parameters. Should be specific such as unstandardized and the data frame must have a
#' column called \sQuote{paramHeader}.
#' @param params A character string indicating the types of parameters to be returned.
#' Options currently include \sQuote{regression}, \sQuote{loading}, \sQuote{undirected},
#' \sQuote{expectation}, \sQuote{variability}, and \sQuote{new} for new/additional parameters.
#' Regressions include regression of one variable
#' \code{ON} another. \sQuote{loading} include indicator variables (which are assumed caused by the
#' underlying latent variable) and variables in latent growth models (\code{BY} or \code{|}).
#' Undirected paths currently only include covariances, indicated by the \code{WITH}
#' syntax in Mplus. Expectation paths are the unconditional or conditional expectations of
#' variables. In other words those parameters related to the first moments. For independent
#' variables, these are the means, \eqn{E(X)} and the conditional means or intercepts,
#' \eqn{E(X | f(\theta))}{E(X | f(theta))} where \eqn{f(\theta)}{f(theta)} is the model,
#' some function of the parameters, \eqn{\theta}{theta}. Finally \sQuote{variability}
#' refers to both variances and residual variances, corresponding to the second moments.
#' As with the expectations, variances are unconditional for variables that are not
#' predicted or conditioned on any other variable in the model whereas residual variances
#' are conditional on the model. Note that \R uses fuzzy matching so that each of these
#' can be called via shorthand, \sQuote{r}, \sQuote{l}, \sQuote{u}, \sQuote{e}, and \sQuote{v}.
#' @return A subset data frame with the parameters of interest.
#' @seealso \code{\link{readModels}}
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @keywords utils
#' @export
#' @examples
#' \dontrun{
#' test <- mplusObject(
#' TITLE = "test the MplusAutomation Package and my Wrapper;",
#' MODEL = "
#' mpg ON wt hp;
#' wt WITH hp;",
#' usevariables = c("mpg", "wt", "hp"),
#' rdata = mtcars)
#'
#' res <- mplusModeler(test, "mtcars.dat", modelout = "model1.inp", run = 1L)
#'
#' # store just the unstandardized parameters in 'd'
#' d <- res$results$parameters$unstandardized
#' # extract just regression parameters
#' paramExtract(d, "regression")
#' # extract other types of parameters using shorthand
#' paramExtract(d, "u")
#' paramExtract(d, "e")
#' paramExtract(d, "v")
#' }
paramExtract <- function(x, params = c("regression", "loading", "undirected", "expectation", "variability", "new")) {
#readModels("C:/Program Files/Mplus/Mplus Examples/User's Guide Examples/Outputs/ex3.9.out")
params <- match.arg(params)
keys <- switch(params,
regression = c("ON"),
loading = c("BY", "\\|"),
undirected = c("WITH"),
expectation = c("Means", "Intercepts", "Thresholds"),
variability = c("Variances", "Residual.Variances"),
new = "New.Additional.Parameters")
index <- sapply(keys, function(pattern) {
grepl(paste0(".*", pattern, "$"), x[, "paramHeader"])
})
if (is.matrix(index)) {
index <- rowSums(index) > 0
} else {
index <- sum(index) > 0
}
index <- which(index)
# catch cases where there is nothing to extract
if (!length(index)) return(NULL)
output <- x[index, , drop=FALSE]
attr(output, "type") <- params
return(output)
}
#' Check Mplus code for missing semicolons or too long lines.
#'
#' The function parses a character string containing Mplus code
#' and checks that every non blank line ends in either a colon or
#' a semicolon. In addition, it checks that every line is less than 90
#' characters, because Mplus ignores everything after 90 characters on a line
#' which can be a source of enigmatic errors.
#'
#' The function is fairly basic at the moment. It works by simply
#' removing blank space (spaces, tabs, etc.) and then if a line does not
#' terminate in a colon or semicolon, it returns a note and the line
#' number. Optionally, it can add semicolons to any lines missing them
#' and return the input with added semicolons. To check for lines that are too long,
#' all trailing (but not before) white space is removed, and then the number of
#' characters is checked.
#'
#' @param x a character string containing Mplus code.
#' @param add logical indicating whether or not to add semicolons to
#' lines that do not have them. Defaults to \code{FALSE}.
#' @return a character vector containing the input text and
#' optionally added semicolons.
#' @seealso \code{\link{mplusModeler}}
#' @keywords utils
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @examples
#'
#' # sample input
#' test <- "
#' MODEL:
#' mpg ON wt hp;
#' wt WITH hp
#' "
#' # check and return
#' cat(parseMplus(test), file=stdout(), fill=TRUE)
#' # add missing semicolons and return
#' cat(parseMplus(test, TRUE), file=stdout(), fill=TRUE)
#' # line that is too long for Mplus
#' test <- "
#' MODEL:
#' mpg cyl disp hp drat wt qsec vs am gear PWITH cyl disp hp drat wt qsec vs am gear carb;
#' "
#' cat(parseMplus(test), file=stdout())
#' closeAllConnections()
parseMplus <- function(x, add = FALSE) {
cc <- textConnection(x)
init <- readLines(cc) #need to close the connection explicitly
close(cc)
nospace <- gsub("[[:space:]]", "", init)
empty <- nchar(nospace) < 1
end <- grepl(".*[:;]$", nospace)
semiok <- empty | end
if (!all(semiok)) {
index <- which(!semiok)
message(paste(c("The following lines are not empty and do not end in a : or ;.",
paste(index, init[index], sep = ": ")), collapse = "\n"))
if (add) {
init[index] <- paste0(init[index], ";")
message("added semicolons ';' to all of the above lines")
} else {
message("Rerun with parseMplus(add = TRUE) to add semicolons to all lines")
}
}
notrailspace <- gsub("[[:space:]]+$", "", init)
lengthok <- nchar(notrailspace) <= 90
if (!all(lengthok)) {
index <- which(!lengthok)
message(paste(c("The following lines are longer than 90 characters",
paste(index, init[index], sep = ": ")), collapse = "\n"))
message("Mplus will ignore everything after the 90th character on a line.\n",
"Consider breaking the line(s)")
}
if(all(semiok & lengthok)) message("All ok")
return(paste(init, collapse = "\n"))
}
#' Remove variable name length warnings from Mplus output file
#'
#' This function is designed to remove warnings in Mplus output files
#' about variable names being greater than 8 characters. It replaces them
#' with a note that the warnings were removed and a count of how many warnings
#' were removed.
#'
#' This is an internal function and not meant to be directly called by the
#' user under most circumstances. It is called by \code{\link{mplusModeler}}
#' when the \code{varwarnings = FALSE} argument is used.
#'
#' @param file A file name (including path if necessary) to an Mplus output
#' file. Note that you must have read and write privileges on the file
#' for this function to work properly.
#' @return Usually NULL. Called for the side effect of removing warnings in
#' Mplus output files. If \code{\link{file.access}} testing for write permissions
#' returns \code{FALSE}, a character string note that \code{rmVarWarnings}
#' could not run.
#' @seealso \code{\link{mplusModeler}}
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @keywords internal
#' @examples
#' # to do
rmVarWarnings <- function(file) {
dat <- readLines(file)
init <- grep("Variable name contains more than 8 characters.", dat)
stopifnot(all(
grepl("^\\*", dat[init - 1]),
grepl("^[[:space:]]*Only", dat[init + 1]),
grepl("^[[:space:]]*Variable", dat[init + 2])))
index <- sort(rep(init, each = 4) + (-1:2L))
dat[index[1]] <- sprintf("%d Mplus warnings about variable name length removed",
length(init))
dat <- dat[-index[-1]]
if (file.access(file, mode = 2)==0) {
unlink(file)
writeLines(dat, con = file)
} else {
return("Could not access file")
}
}
#' Change directory
#'
#' The function takes a path and changes the current working directory
#' to the path. If the directory specified in the path does not
#' currently exist, it will be created.
#'
#' The function has been designed to be platform independent,
#' although it has had limited testing. Path creation is done using
#' \code{file.path}, the existence of the directory is checked using
#' \code{file.exists} and the directory created with \code{dir.create}.
#' Only the first argument, is required. The other optional arguments
#' are handy when one wants to create many similar directories with a common base.
#'
#' @param base a character string with the base path to the directory. This is required.
#' @param pre an optional character string with the prefix to add to
#' the base path. Non character strings will be coerced to character class.
#' @param num an optional character string, prefixed by \code{pre}.
#' Non character strings will be coerced to character class.
#' @return NULL, changes the current working directory
#' @keywords utilities
#' @export
#' @author Joshua F. Wiley <jwiley.psych@@gmail.com>
#' @examples
#' \dontrun{
#' # an example just using the base
#' cd("~/testdir")
#'
#' # an example using the optional arguments
#' base <- "~/testdir"
#' pre <- "test_"
#'
#' cd(base, pre, 1)
#' cd(base, pre, 2)
#' }
cd <- function(base, pre, num) {
stopifnot(is.character(base))
if (!missing(pre) & !missing(num)) {
pre <- as.character(pre)
num <- as.character(num)
newdir <- file.path(base, paste0(pre, num))
} else {
newdir <- file.path(base)
}
if (!file.exists(newdir)) {
dir.create(newdir)
}
setwd(newdir)
return(invisible(NULL))
}
|
library(testthat)
library(mongopipe)
test_check("mongopipe")
| /tests/testthat.R | no_license | cran/mongopipe | R | false | false | 63 | r | library(testthat)
library(mongopipe)
test_check("mongopipe")
|
################################################################################
### R script to compare several conditions with the SARTools and DESeq2 packages
### Hugo Varet
### March 20th, 2018
### designed to be executed with SARTools 1.6.6
################################################################################
################################################################################
### parameters: to be modified by the user ###
################################################################################
rm(list=ls()) # remove all the objects from the R session
workDir <- setwd("~/Documents/RNASeqProject/SARTools.DESeq2.genes.batch") # working directory for the R session
projectName <- "SARTools.DESeq2.genes" # name of the project
author <- "Julie Berhane" # author of the statistical analysis/report
targetFile <- "./genes.target.txt" # path to the design/target file
rawDir <- "./" # path to the directory containing raw counts files
featuresToRemove <- c("NULL") # names of the features to be removed
# (specific HTSeq-count information and rRNA for example)
# NULL if no feature to remove
varInt <- "Treatment" # factor of interest
condRef <- "Untreated" # reference biological condition
batch <- "batch" # blocking factor: NULL (default) or "batch" for example
idColumn = 1 # column with feature Ids (usually 1)
countColumn = 5 # column with counts (2 for htseq-count, 7 for featurecounts, 5 for RSEM/Salmon, 4 for kallisto)
rowSkip = 0 # rows to skip (not including header)
fitType <- "parametric" # mean-variance relationship: "parametric" (default), "local" or "mean"
cooksCutoff <- TRUE # TRUE/FALSE to perform the outliers detection (default is TRUE)
independentFiltering <- TRUE # TRUE/FALSE to perform independent filtering (default is TRUE)
alpha <- 0.05 # threshold of statistical significance
pAdjustMethod <- "BH" # p-value adjustment method: "BH" (default) or "BY"
typeTrans <- "VST" # transformation for PCA/clustering: "VST" or "rlog"
locfunc <- "median" # "median" (default) or "shorth" to estimate the size factors
colors <- c("dodgerblue","firebrick1", # vector of colors of each biological condition on the plots
"MediumVioletRed","SpringGreen")
forceCairoGraph <- FALSE
################################################################################
### running script ###
################################################################################
setwd(workDir)
if (!require("BiocManager")) install.packages("BiocManager"); library(BiocManager)
if (!require("DESeq2")) BiocManager::install("DESeq2"); library(DESeq2)
if (!require("edgeR")) BiocManager::install("edgeR"); library(edgeR)
if (!require("genefilter")) BiocManager::install("genefilter"); library(genefilter)
# PC Users only, install Rtools https://cran.r-project.org/bin/windows/Rtools/
if (!require("devtools")) install.packages("devtools"); library(devtools)
if (!require("SARTools")) install_github("KField-Bucknell/SARTools", build_vignettes=TRUE, force=TRUE); library(SARTools)
if (forceCairoGraph) options(bitmapType="cairo")
# checking parameters
checkParameters.DESeq2(projectName=projectName,author=author,targetFile=targetFile,
rawDir=rawDir,featuresToRemove=featuresToRemove,varInt=varInt,
condRef=condRef,batch=batch,fitType=fitType,cooksCutoff=cooksCutoff,
independentFiltering=independentFiltering,alpha=alpha,pAdjustMethod=pAdjustMethod,
typeTrans=typeTrans,locfunc=locfunc,colors=colors)
# loading target file
target <- loadTargetFile(targetFile=targetFile, varInt=varInt, condRef=condRef, batch=batch)
# loading counts
counts <- loadCountData(target=target, rawDir=rawDir, featuresToRemove=featuresToRemove,
skip=rowSkip, idColumn=idColumn, countColumn=countColumn)
# description plots
majSequences <- descriptionPlots(counts=counts, group=target[,varInt], col=colors)
# analysis with DESeq2
out.DESeq2 <- run.DESeq2(counts=counts, target=target, varInt=varInt, batch=batch,
locfunc=locfunc, fitType=fitType, pAdjustMethod=pAdjustMethod,
cooksCutoff=cooksCutoff, independentFiltering=independentFiltering, alpha=alpha)
# PCA + clustering
exploreCounts(object=out.DESeq2$dds, group=target[,varInt], typeTrans=typeTrans, col=colors)
# summary of the analysis (boxplots, dispersions, diag size factors, export table, nDiffTotal, histograms, MA plot)
summaryResults <- summarizeResults.DESeq2(out.DESeq2, group=target[,varInt], col=colors,
independentFiltering=independentFiltering,
cooksCutoff=cooksCutoff, alpha=alpha)
# save image of the R session
save.image(file=paste0(projectName, ".RData"))
# generating HTML report
writeReport.DESeq2(target=target, counts=counts, out.DESeq2=out.DESeq2, summaryResults=summaryResults,
majSequences=majSequences, workDir=workDir, projectName=projectName, author=author,
targetFile=targetFile, rawDir=rawDir, featuresToRemove=featuresToRemove, varInt=varInt,
condRef=condRef, batch=batch, fitType=fitType, cooksCutoff=cooksCutoff,
independentFiltering=independentFiltering, alpha=alpha, pAdjustMethod=pAdjustMethod,
typeTrans=typeTrans, locfunc=locfunc, colors=colors)
| /SARTools.DESeq2.transcripts.batch/Julie_genes_SARTools_DESeq2.batch.r | permissive | jfberhane/RNASeqProject | R | false | false | 6,207 | r | ################################################################################
### R script to compare several conditions with the SARTools and DESeq2 packages
### Hugo Varet
### March 20th, 2018
### designed to be executed with SARTools 1.6.6
################################################################################
################################################################################
### parameters: to be modified by the user ###
################################################################################
rm(list=ls()) # remove all the objects from the R session
workDir <- setwd("~/Documents/RNASeqProject/SARTools.DESeq2.genes.batch") # working directory for the R session
projectName <- "SARTools.DESeq2.genes" # name of the project
author <- "Julie Berhane" # author of the statistical analysis/report
targetFile <- "./genes.target.txt" # path to the design/target file
rawDir <- "./" # path to the directory containing raw counts files
featuresToRemove <- c("NULL") # names of the features to be removed
# (specific HTSeq-count information and rRNA for example)
# NULL if no feature to remove
varInt <- "Treatment" # factor of interest
condRef <- "Untreated" # reference biological condition
batch <- "batch" # blocking factor: NULL (default) or "batch" for example
idColumn = 1 # column with feature Ids (usually 1)
countColumn = 5 # column with counts (2 for htseq-count, 7 for featurecounts, 5 for RSEM/Salmon, 4 for kallisto)
rowSkip = 0 # rows to skip (not including header)
fitType <- "parametric" # mean-variance relationship: "parametric" (default), "local" or "mean"
cooksCutoff <- TRUE # TRUE/FALSE to perform the outliers detection (default is TRUE)
independentFiltering <- TRUE # TRUE/FALSE to perform independent filtering (default is TRUE)
alpha <- 0.05 # threshold of statistical significance
pAdjustMethod <- "BH" # p-value adjustment method: "BH" (default) or "BY"
typeTrans <- "VST" # transformation for PCA/clustering: "VST" or "rlog"
locfunc <- "median" # "median" (default) or "shorth" to estimate the size factors
colors <- c("dodgerblue","firebrick1", # vector of colors of each biological condition on the plots
"MediumVioletRed","SpringGreen")
forceCairoGraph <- FALSE
################################################################################
### running script ###
################################################################################
setwd(workDir)
if (!require("BiocManager")) install.packages("BiocManager"); library(BiocManager)
if (!require("DESeq2")) BiocManager::install("DESeq2"); library(DESeq2)
if (!require("edgeR")) BiocManager::install("edgeR"); library(edgeR)
if (!require("genefilter")) BiocManager::install("genefilter"); library(genefilter)
# PC Users only, install Rtools https://cran.r-project.org/bin/windows/Rtools/
if (!require("devtools")) install.packages("devtools"); library(devtools)
if (!require("SARTools")) install_github("KField-Bucknell/SARTools", build_vignettes=TRUE, force=TRUE); library(SARTools)
if (forceCairoGraph) options(bitmapType="cairo")
# checking parameters
checkParameters.DESeq2(projectName=projectName,author=author,targetFile=targetFile,
rawDir=rawDir,featuresToRemove=featuresToRemove,varInt=varInt,
condRef=condRef,batch=batch,fitType=fitType,cooksCutoff=cooksCutoff,
independentFiltering=independentFiltering,alpha=alpha,pAdjustMethod=pAdjustMethod,
typeTrans=typeTrans,locfunc=locfunc,colors=colors)
# loading target file
target <- loadTargetFile(targetFile=targetFile, varInt=varInt, condRef=condRef, batch=batch)
# loading counts
counts <- loadCountData(target=target, rawDir=rawDir, featuresToRemove=featuresToRemove,
skip=rowSkip, idColumn=idColumn, countColumn=countColumn)
# description plots
majSequences <- descriptionPlots(counts=counts, group=target[,varInt], col=colors)
# analysis with DESeq2
out.DESeq2 <- run.DESeq2(counts=counts, target=target, varInt=varInt, batch=batch,
locfunc=locfunc, fitType=fitType, pAdjustMethod=pAdjustMethod,
cooksCutoff=cooksCutoff, independentFiltering=independentFiltering, alpha=alpha)
# PCA + clustering
exploreCounts(object=out.DESeq2$dds, group=target[,varInt], typeTrans=typeTrans, col=colors)
# summary of the analysis (boxplots, dispersions, diag size factors, export table, nDiffTotal, histograms, MA plot)
summaryResults <- summarizeResults.DESeq2(out.DESeq2, group=target[,varInt], col=colors,
independentFiltering=independentFiltering,
cooksCutoff=cooksCutoff, alpha=alpha)
# save image of the R session
save.image(file=paste0(projectName, ".RData"))
# generating HTML report
writeReport.DESeq2(target=target, counts=counts, out.DESeq2=out.DESeq2, summaryResults=summaryResults,
majSequences=majSequences, workDir=workDir, projectName=projectName, author=author,
targetFile=targetFile, rawDir=rawDir, featuresToRemove=featuresToRemove, varInt=varInt,
condRef=condRef, batch=batch, fitType=fitType, cooksCutoff=cooksCutoff,
independentFiltering=independentFiltering, alpha=alpha, pAdjustMethod=pAdjustMethod,
typeTrans=typeTrans, locfunc=locfunc, colors=colors)
|
## The functions below are used to compute and cache the inverse of a matrix
## makeCacheMatrix creates a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the matrix inverse
## 4. get the value of the matrix inverse
makeCacheMatrix <- function(x = matrix()) {
matrix_inv <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inverse) matrix_inv <<- inverse
getinverse <- function() matrix_inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function returns the inverse of the matrix that was
## created with the function above. However, it first checks to see
## if the inverse of the matrix has already been calculated. If so, it gets the
## inverse from the cache and skips the computation. Otherwise, it calculates
## the inverse matrix and sets this to be the value of the inverse in the cache
## using the setinverse function.
cacheSolve <- function(x, ...) {
matrix_inv<- x$getinverse()
if(!is.null(matrix_inv)) {
message("getting cached data")
return(matrix_inv)
}
data <- x$get()
matrix_inv <- solve(data, ...)
x$setinverse(matrix_inv)
matrix_inv
}
| /cachematrix.R | no_license | Eamonnol1981/ProgrammingAssignment2 | R | false | false | 1,437 | r | ## The functions below are used to compute and cache the inverse of a matrix
## makeCacheMatrix creates a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the matrix inverse
## 4. get the value of the matrix inverse
makeCacheMatrix <- function(x = matrix()) {
matrix_inv <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inverse) matrix_inv <<- inverse
getinverse <- function() matrix_inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function returns the inverse of the matrix that was
## created with the function above. However, it first checks to see
## if the inverse of the matrix has already been calculated. If so, it gets the
## inverse from the cache and skips the computation. Otherwise, it calculates
## the inverse matrix and sets this to be the value of the inverse in the cache
## using the setinverse function.
cacheSolve <- function(x, ...) {
matrix_inv<- x$getinverse()
if(!is.null(matrix_inv)) {
message("getting cached data")
return(matrix_inv)
}
data <- x$get()
matrix_inv <- solve(data, ...)
x$setinverse(matrix_inv)
matrix_inv
}
|
# factor numeric --------------------------------------------------------------
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
# Panels with correlation -------------------------------------------------
pairs.panels.new = function (x, smooth = TRUE, scale = FALSE, density = TRUE, ellipses = TRUE,
digits = 2, method = "pearson", pch = 20, lm = FALSE, cor = TRUE,
jiggle = FALSE, factor = 2, hist.col = "grey", show.points = TRUE,
rug = TRUE, breaks = "Sturges", cex.cor = 1, ...)
{
"panel.hist.density" <- function(x, ...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5))
h <- hist(x, breaks = breaks, plot = FALSE)
breaks <- h$breaks
nB <- length(breaks)
y <- h$counts
y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = hist.col)
if (density) {
tryd <- try(d <- density(x, na.rm = TRUE, bw = "nrd",
adjust = 1.2), silent = TRUE)
if (class(tryd) != "try-error") {
d$y <- d$y/max(d$y)
lines(d)
}
}
if (rug)
rug(x)
}
"panel.cor" <- function(x, y, digits = 2, prefix = "", ...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y, use = "pairwise", method = method)
txt <- format(c(round(r, digits), 0.123456789), digits = digits)[1]
txt <- paste(prefix, txt, sep = "")
cex <- cex.cor * 0.8/strwidth(txt)
test <- rcorr(x,y, type = method)
CI <- CIrho(test$r[2], test$n[2], level = 0.95)
Signif <- symnum(test$P[2], corr = FALSE, na = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", ".", " "))
if (scale) {
cex1 <- cex * abs(r)
if (cex1 < 0.25)
cex1 <- 0.25
text(0.5, 0.5, txt, cex = 1)
}
else {
text(.8, .8, Signif, cex=1.2, col=2)
text(0.5, 0.6, bquote("r"~.(paste0(' = ',txt))), cex = 1.2)
text(.5,.3,paste("[",round(CI[2],2),", ",round(CI[3],2),"]",sep=""), cex=1.2)
}
}
"panel.smoother" <- function(x, y, pch = par("pch"), col.smooth = "red",
span = 2/3, iter = 3, ...) {
xm <- mean(x, na.rm = TRUE)
ym <- mean(y, na.rm = TRUE)
xs <- sd(x, na.rm = TRUE)
ys <- sd(y, na.rm = TRUE)
r = cor(x, y, use = "pairwise", method = method)
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
if (show.points)
points(x, y, pch = pch, ...)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
lines(stats::lowess(x[ok], y[ok], f = span, iter = iter),
col = col.smooth, ...)
panel.ellipse1(xm, ym, xs, ys, r, col.smooth = col.smooth,
...)
}
"panel.smoother.no.noellipse" <- function(x, y, pch = par("pch"),
col.smooth = "red", span = 2/3, iter = 3, ...) {
xm <- mean(x, na.rm = TRUE)
ym <- mean(y, na.rm = TRUE)
xs <- sd(x, na.rm = TRUE)
ys <- sd(y, na.rm = TRUE)
r = cor(x, y, use = "pairwise", method = method)
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
if (show.points)
points(x, y, pch = pch, ...)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
lines(stats::lowess(x[ok], y[ok], f = span, iter = iter),
col = col.smooth, ...)
}
"panel.lm" <- function(x, y, pch = par("pch"), col.lm = "red",
...) {
ymin <- min(y)
ymax <- max(y)
xmin <- min(x)
xmax <- max(x)
ylim <- c(min(ymin, xmin), max(ymax, xmax))
xlim <- ylim
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
points(x, y, pch = pch, ylim = ylim, xlim = xlim, ...)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
abline(lm(y[ok] ~ x[ok]), col = col.lm, ...)
}
"panel.lm.ellipse" <- function(x, y, pch = par("pch"), col.lm = "red",
...) {
ymin <- min(y)
ymax <- max(y)
xmin <- min(x)
xmax <- max(x)
ylim <- c(min(ymin, xmin), max(ymax, xmax))
xlim <- ylim
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
points(x, y, pch = pch, ylim = ylim, xlim = xlim, ...)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
abline(lm(y[ok] ~ x[ok]), col = col.lm, ...)
xm <- mean(x, na.rm = TRUE)
ym <- mean(y, na.rm = TRUE)
xs <- sd(x, na.rm = TRUE)
ys <- sd(y, na.rm = TRUE)
r = cor(x, y, use = "pairwise", method = method)
panel.ellipse1(xm, ym, xs, ys, r, col.smooth = col.lm,
...)
}
"panel.ellipse1" <- function(x = 0, y = 0, xs = 1, ys = 1,
r = 0, col.smooth, add = TRUE, segments = 51, ...) {
angles <- (0:segments) * 2 * pi/segments
unit.circle <- cbind(cos(angles), sin(angles))
if (!is.na(r)) {
if (abs(r) > 0)
theta <- sign(r)/sqrt(2)
else theta = 1/sqrt(2)
shape <- diag(c(sqrt(1 + r), sqrt(1 - r))) %*% matrix(c(theta,
theta, -theta, theta), ncol = 2, byrow = TRUE)
ellipse <- unit.circle %*% shape
ellipse[, 1] <- ellipse[, 1] * xs + x
ellipse[, 2] <- ellipse[, 2] * ys + y
points(x, y, pch = 19, col = col.smooth, cex = 1.5)
lines(ellipse, ...)
}
}
"panel.ellipse" <- function(x, y, pch = par("pch"), col.smooth = "red",
...) {
segments = 51
xm <- mean(x, na.rm = TRUE)
ym <- mean(y, na.rm = TRUE)
xs <- sd(x, na.rm = TRUE)
ys <- sd(y, na.rm = TRUE)
r = cor(x, y, use = "pairwise", method = method)
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
if (show.points)
points(x, y, pch = pch, ...)
angles <- (0:segments) * 2 * pi/segments
unit.circle <- cbind(cos(angles), sin(angles))
if (!is.na(r)) {
if (abs(r) > 0)
theta <- sign(r)/sqrt(2)
else theta = 1/sqrt(2)
shape <- diag(c(sqrt(1 + r), sqrt(1 - r))) %*% matrix(c(theta,
theta, -theta, theta), ncol = 2, byrow = TRUE)
ellipse <- unit.circle %*% shape
ellipse[, 1] <- ellipse[, 1] * xs + xm
ellipse[, 2] <- ellipse[, 2] * ys + ym
points(xm, ym, pch = 19, col = col.smooth, cex = 1.5)
if (ellipses)
lines(ellipse, ...)
}
}
old.par <- par(no.readonly = TRUE)
on.exit(par(old.par))
if (missing(cex.cor))
cex.cor <- 1
for (i in 1:ncol(x)) {
if (is.character(x[[i]])) {
x[[i]] <- as.numeric(as.factor(x[[i]]))
colnames(x)[i] <- paste(colnames(x)[i], "*", sep = "")
}
}
if (!lm) {
if (smooth) {
if (ellipses) {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.smoother, pch = pch, ...)
}
else {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.smoother.no.noellipse,
pch = pch, ...)
}
}
else {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.ellipse, pch = pch, ...)
}
}
else {
if (!cor) {
if (ellipses) {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.lm.ellipse,
lower.panel = panel.lm.ellipse, pch = pch,
...)
}
else {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.lm,
lower.panel = panel.lm, pch = pch, ...)
}
}
else {
if (ellipses) {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.lm.ellipse, pch = pch,
...)
}
else {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.lm, pch = pch, ...)
}
}
}
}
# x is a matrix containing the data -------------------------------------------------------------
# method : correlation method. "pearson"" or "spearman"" is supported
# removeTriangle : remove upper or lower triangle
# results : if "html" or "latex"
# the results will be displayed in html or latex format
corstars <-function(x, method=c("pearson", "spearman"), removeTriangle=c("upper", "lower"),
result=c("none", "html", "latex")){
#Compute correlation matrix
require(Hmisc)
x <- as.matrix(x)
correlation_matrix<-rcorr(x, type=method[1])
R <- correlation_matrix$r # Matrix of correlation coeficients
p <- correlation_matrix$P # Matrix of p-value
## Define notions for significance levels; spacing is important.
mystars <- ifelse(p < .0001, "****", ifelse(p < .001, "*** ", ifelse(p < .01, "** ", ifelse(p < .05, "* ", " "))))
## trunctuate the correlation matrix to two decimal
R <- format(round(cbind(rep(-1.11, ncol(x)), R), 2))[,-1]
## build a new matrix that includes the correlations with their apropriate stars
Rnew <- matrix(paste(R, mystars, sep=""), ncol=ncol(x))
diag(Rnew) <- paste(diag(R), " ", sep="")
rownames(Rnew) <- colnames(x)
colnames(Rnew) <- paste(colnames(x), "", sep="")
## remove upper triangle of correlation matrix
if(removeTriangle[1]=="upper"){
Rnew <- as.matrix(Rnew)
Rnew[upper.tri(Rnew, diag = TRUE)] <- ""
Rnew <- as.data.frame(Rnew)
}
## remove lower triangle of correlation matrix
else if(removeTriangle[1]=="lower"){
Rnew <- as.matrix(Rnew)
Rnew[lower.tri(Rnew, diag = TRUE)] <- ""
Rnew <- as.data.frame(Rnew)
}
## remove last column and return the correlation matrix
Rnew <- cbind(Rnew[1:length(Rnew)-1])
if (result[1]=="none") return(Rnew)
else{
if(result[1]=="html") print(xtable(Rnew), type="html")
else print(xtable(Rnew), type="latex")
}
}
| /scripts/functions/fun.panel.R | no_license | AnneOkk/psyCorona-Analysis | R | false | false | 10,161 | r | # factor numeric --------------------------------------------------------------
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
# Panels with correlation -------------------------------------------------
pairs.panels.new = function (x, smooth = TRUE, scale = FALSE, density = TRUE, ellipses = TRUE,
digits = 2, method = "pearson", pch = 20, lm = FALSE, cor = TRUE,
jiggle = FALSE, factor = 2, hist.col = "grey", show.points = TRUE,
rug = TRUE, breaks = "Sturges", cex.cor = 1, ...)
{
"panel.hist.density" <- function(x, ...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5))
h <- hist(x, breaks = breaks, plot = FALSE)
breaks <- h$breaks
nB <- length(breaks)
y <- h$counts
y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = hist.col)
if (density) {
tryd <- try(d <- density(x, na.rm = TRUE, bw = "nrd",
adjust = 1.2), silent = TRUE)
if (class(tryd) != "try-error") {
d$y <- d$y/max(d$y)
lines(d)
}
}
if (rug)
rug(x)
}
"panel.cor" <- function(x, y, digits = 2, prefix = "", ...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y, use = "pairwise", method = method)
txt <- format(c(round(r, digits), 0.123456789), digits = digits)[1]
txt <- paste(prefix, txt, sep = "")
cex <- cex.cor * 0.8/strwidth(txt)
test <- rcorr(x,y, type = method)
CI <- CIrho(test$r[2], test$n[2], level = 0.95)
Signif <- symnum(test$P[2], corr = FALSE, na = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", ".", " "))
if (scale) {
cex1 <- cex * abs(r)
if (cex1 < 0.25)
cex1 <- 0.25
text(0.5, 0.5, txt, cex = 1)
}
else {
text(.8, .8, Signif, cex=1.2, col=2)
text(0.5, 0.6, bquote("r"~.(paste0(' = ',txt))), cex = 1.2)
text(.5,.3,paste("[",round(CI[2],2),", ",round(CI[3],2),"]",sep=""), cex=1.2)
}
}
"panel.smoother" <- function(x, y, pch = par("pch"), col.smooth = "red",
span = 2/3, iter = 3, ...) {
xm <- mean(x, na.rm = TRUE)
ym <- mean(y, na.rm = TRUE)
xs <- sd(x, na.rm = TRUE)
ys <- sd(y, na.rm = TRUE)
r = cor(x, y, use = "pairwise", method = method)
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
if (show.points)
points(x, y, pch = pch, ...)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
lines(stats::lowess(x[ok], y[ok], f = span, iter = iter),
col = col.smooth, ...)
panel.ellipse1(xm, ym, xs, ys, r, col.smooth = col.smooth,
...)
}
"panel.smoother.no.noellipse" <- function(x, y, pch = par("pch"),
col.smooth = "red", span = 2/3, iter = 3, ...) {
xm <- mean(x, na.rm = TRUE)
ym <- mean(y, na.rm = TRUE)
xs <- sd(x, na.rm = TRUE)
ys <- sd(y, na.rm = TRUE)
r = cor(x, y, use = "pairwise", method = method)
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
if (show.points)
points(x, y, pch = pch, ...)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
lines(stats::lowess(x[ok], y[ok], f = span, iter = iter),
col = col.smooth, ...)
}
"panel.lm" <- function(x, y, pch = par("pch"), col.lm = "red",
...) {
ymin <- min(y)
ymax <- max(y)
xmin <- min(x)
xmax <- max(x)
ylim <- c(min(ymin, xmin), max(ymax, xmax))
xlim <- ylim
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
points(x, y, pch = pch, ylim = ylim, xlim = xlim, ...)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
abline(lm(y[ok] ~ x[ok]), col = col.lm, ...)
}
"panel.lm.ellipse" <- function(x, y, pch = par("pch"), col.lm = "red",
...) {
ymin <- min(y)
ymax <- max(y)
xmin <- min(x)
xmax <- max(x)
ylim <- c(min(ymin, xmin), max(ymax, xmax))
xlim <- ylim
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
points(x, y, pch = pch, ylim = ylim, xlim = xlim, ...)
ok <- is.finite(x) & is.finite(y)
if (any(ok))
abline(lm(y[ok] ~ x[ok]), col = col.lm, ...)
xm <- mean(x, na.rm = TRUE)
ym <- mean(y, na.rm = TRUE)
xs <- sd(x, na.rm = TRUE)
ys <- sd(y, na.rm = TRUE)
r = cor(x, y, use = "pairwise", method = method)
panel.ellipse1(xm, ym, xs, ys, r, col.smooth = col.lm,
...)
}
"panel.ellipse1" <- function(x = 0, y = 0, xs = 1, ys = 1,
r = 0, col.smooth, add = TRUE, segments = 51, ...) {
angles <- (0:segments) * 2 * pi/segments
unit.circle <- cbind(cos(angles), sin(angles))
if (!is.na(r)) {
if (abs(r) > 0)
theta <- sign(r)/sqrt(2)
else theta = 1/sqrt(2)
shape <- diag(c(sqrt(1 + r), sqrt(1 - r))) %*% matrix(c(theta,
theta, -theta, theta), ncol = 2, byrow = TRUE)
ellipse <- unit.circle %*% shape
ellipse[, 1] <- ellipse[, 1] * xs + x
ellipse[, 2] <- ellipse[, 2] * ys + y
points(x, y, pch = 19, col = col.smooth, cex = 1.5)
lines(ellipse, ...)
}
}
"panel.ellipse" <- function(x, y, pch = par("pch"), col.smooth = "red",
...) {
segments = 51
xm <- mean(x, na.rm = TRUE)
ym <- mean(y, na.rm = TRUE)
xs <- sd(x, na.rm = TRUE)
ys <- sd(y, na.rm = TRUE)
r = cor(x, y, use = "pairwise", method = method)
if (jiggle) {
x <- jitter(x, factor = factor)
y <- jitter(y, factor = factor)
}
if (show.points)
points(x, y, pch = pch, ...)
angles <- (0:segments) * 2 * pi/segments
unit.circle <- cbind(cos(angles), sin(angles))
if (!is.na(r)) {
if (abs(r) > 0)
theta <- sign(r)/sqrt(2)
else theta = 1/sqrt(2)
shape <- diag(c(sqrt(1 + r), sqrt(1 - r))) %*% matrix(c(theta,
theta, -theta, theta), ncol = 2, byrow = TRUE)
ellipse <- unit.circle %*% shape
ellipse[, 1] <- ellipse[, 1] * xs + xm
ellipse[, 2] <- ellipse[, 2] * ys + ym
points(xm, ym, pch = 19, col = col.smooth, cex = 1.5)
if (ellipses)
lines(ellipse, ...)
}
}
old.par <- par(no.readonly = TRUE)
on.exit(par(old.par))
if (missing(cex.cor))
cex.cor <- 1
for (i in 1:ncol(x)) {
if (is.character(x[[i]])) {
x[[i]] <- as.numeric(as.factor(x[[i]]))
colnames(x)[i] <- paste(colnames(x)[i], "*", sep = "")
}
}
if (!lm) {
if (smooth) {
if (ellipses) {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.smoother, pch = pch, ...)
}
else {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.smoother.no.noellipse,
pch = pch, ...)
}
}
else {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.ellipse, pch = pch, ...)
}
}
else {
if (!cor) {
if (ellipses) {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.lm.ellipse,
lower.panel = panel.lm.ellipse, pch = pch,
...)
}
else {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.lm,
lower.panel = panel.lm, pch = pch, ...)
}
}
else {
if (ellipses) {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.lm.ellipse, pch = pch,
...)
}
else {
pairs(x, diag.panel = panel.hist.density, upper.panel = panel.cor,
lower.panel = panel.lm, pch = pch, ...)
}
}
}
}
# x is a matrix containing the data -------------------------------------------------------------
# method : correlation method. "pearson"" or "spearman"" is supported
# removeTriangle : remove upper or lower triangle
# results : if "html" or "latex"
# the results will be displayed in html or latex format
corstars <-function(x, method=c("pearson", "spearman"), removeTriangle=c("upper", "lower"),
result=c("none", "html", "latex")){
#Compute correlation matrix
require(Hmisc)
x <- as.matrix(x)
correlation_matrix<-rcorr(x, type=method[1])
R <- correlation_matrix$r # Matrix of correlation coeficients
p <- correlation_matrix$P # Matrix of p-value
## Define notions for significance levels; spacing is important.
mystars <- ifelse(p < .0001, "****", ifelse(p < .001, "*** ", ifelse(p < .01, "** ", ifelse(p < .05, "* ", " "))))
## trunctuate the correlation matrix to two decimal
R <- format(round(cbind(rep(-1.11, ncol(x)), R), 2))[,-1]
## build a new matrix that includes the correlations with their apropriate stars
Rnew <- matrix(paste(R, mystars, sep=""), ncol=ncol(x))
diag(Rnew) <- paste(diag(R), " ", sep="")
rownames(Rnew) <- colnames(x)
colnames(Rnew) <- paste(colnames(x), "", sep="")
## remove upper triangle of correlation matrix
if(removeTriangle[1]=="upper"){
Rnew <- as.matrix(Rnew)
Rnew[upper.tri(Rnew, diag = TRUE)] <- ""
Rnew <- as.data.frame(Rnew)
}
## remove lower triangle of correlation matrix
else if(removeTriangle[1]=="lower"){
Rnew <- as.matrix(Rnew)
Rnew[lower.tri(Rnew, diag = TRUE)] <- ""
Rnew <- as.data.frame(Rnew)
}
## remove last column and return the correlation matrix
Rnew <- cbind(Rnew[1:length(Rnew)-1])
if (result[1]=="none") return(Rnew)
else{
if(result[1]=="html") print(xtable(Rnew), type="html")
else print(xtable(Rnew), type="latex")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.