content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#--------------------------------------------------#
# fonction repartition des especes en deux groupes #
# les plus detectable et les moins detectable #
# script applicable que pour les D5 #
# pour les PCoA #
# lionel.bonsacquet #
#--------------------------------------------------# # executer dans le make_PCoA
# 11 especes moins detectable donc 11*1000 coordonnees a stocker au plus dans
# une colonne de chaque matrice
Fct_Grp_detectD5_pcoa<-function(distance="chao") {
#-- Pour stocker les resultats
source(file.path("R","Noms-Fichiers.R"))
Nbr_Fichier<-length(c(V_Fichier_D5_GrpDetect))
M_Grp_Detect_Axe1_pcoa<-matrix(NA,nrow = 10000,ncol = Nbr_Fichier)
colnames(M_Grp_Detect_Axe1_pcoa)<-as.character(V_Fichier_D5_GrpDetect)
M_Grp_Detect_Axe2_pcoa<-matrix(NA,nrow = 10000,ncol = Nbr_Fichier)
colnames(M_Grp_Detect_Axe2_pcoa)<-as.character(V_Fichier_D5_GrpDetect)
M_Grp_Detect_Axe1_pcoa_Naive<-matrix(NA,nrow = 10000,ncol = Nbr_Fichier)
colnames(M_Grp_Detect_Axe1_pcoa_Naive)<-as.character(V_Fichier_D5_GrpDetect)
M_Grp_Detect_Axe2_pcoa_Naive<-matrix(NA,nrow = 10000,ncol = Nbr_Fichier)
colnames(M_Grp_Detect_Axe2_pcoa_Naive)<-as.character(V_Fichier_D5_GrpDetect)
#-- chargement des donnees des coordonnees des especes
load(file.path("Outcome","out-regroupement","PCoA",paste("PCoA_Regroup_Coord_Sp_",distance,".Rdata",sep = "")))
#-- la boucle avec appel de la detection des sp a chaque simulation
for (n in c(V_Fichier_D5)) {
print(n)
#-- les donnees de detection (des 1000 simulations du fichier)
Sim<-load(file.path("Outcome","out-simul","ACP",paste("ACP_Simul_",n,".Rdata",sep="")))
M_MemDetectSp<-M_MemDetectSp
for (z in 1:1000) {
#-- ordre de detection des sp de moins au plus detectable
Sp_les_Moins_Detect<-order(M_MemDetectSp[z,])[1:10]+20*(z-1) # les dix moins detectable
Sp_les_Plus_Detect<-order(M_MemDetectSp[z,])[11:20]+20*(z-1) # les dix plus detectable
#les lignes a remplir
lignes<-(c((10*(z-1)+1):(10*(z-1)+10)))
#-- repartition des coordonnees des especes dans le deux groupes
a<-NA
ifelse(median(M_Resultat_Coord_Sp_pcoa_axe1[Sp_les_Moins_Detect,n])<0,a<-(-1),a<-1)
M_Grp_Detect_Axe1_pcoa[(lignes),paste(n,"_PlusD",sep="")]<-M_Resultat_Coord_Sp_pcoa_axe1[Sp_les_Plus_Detect,n]*a
M_Grp_Detect_Axe1_pcoa[(lignes),paste(n,"_MoinsD",sep="")]<-M_Resultat_Coord_Sp_pcoa_axe1[Sp_les_Moins_Detect,n]*a
a<-NA
ifelse(median(M_Resultat_Coord_Sp_pcoa_Naive_axe1[Sp_les_Moins_Detect,n])<0,a<-(-1),a<-1)
M_Grp_Detect_Axe1_pcoa_Naive[(lignes),paste(n,"_PlusD",sep="")]<-M_Resultat_Coord_Sp_pcoa_Naive_axe1[Sp_les_Plus_Detect,n]*a
M_Grp_Detect_Axe1_pcoa_Naive[(lignes),paste(n,"_MoinsD",sep="")]<-M_Resultat_Coord_Sp_pcoa_Naive_axe1[Sp_les_Moins_Detect,n]*a
a<-NA
ifelse(median(M_Resultat_Coord_Sp_pcoa_axe2[Sp_les_Moins_Detect,n])<0,a<-(-1),a<-1)
M_Grp_Detect_Axe2_pcoa[(lignes),paste(n,"_PlusD",sep="")]<-M_Resultat_Coord_Sp_pcoa_axe2[Sp_les_Plus_Detect,n]*a
M_Grp_Detect_Axe2_pcoa[(lignes),paste(n,"_MoinsD",sep="")]<-M_Resultat_Coord_Sp_pcoa_axe2[Sp_les_Moins_Detect,n]*a
a<-NA
ifelse(median(M_Resultat_Coord_Sp_pcoa_Naive_axe2[Sp_les_Moins_Detect,n])<0,a<-(-1),a<-1)
M_Grp_Detect_Axe2_pcoa_Naive[lignes,paste(n,"_PlusD",sep="")]<-M_Resultat_Coord_Sp_pcoa_Naive_axe2[Sp_les_Plus_Detect,n]*a
M_Grp_Detect_Axe2_pcoa_Naive[lignes,paste(n,"_MoinsD",sep="")]<-M_Resultat_Coord_Sp_pcoa_Naive_axe2[Sp_les_Moins_Detect,n]*a
}
}
#-- sauvegarde
saveData<-file.path("Outcome","out-regroupement","PCoA",paste("PCoA_",distance,"_Grp_Detect_D5.Rdata",sep = ""))
save(M_Grp_Detect_Axe1_pcoa,M_Grp_Detect_Axe2_pcoa,
M_Grp_Detect_Axe1_pcoa_Naive,M_Grp_Detect_Axe2_pcoa_Naive,
list = c("M_Grp_Detect_Axe1_pcoa","M_Grp_Detect_Axe2_pcoa",
"M_Grp_Detect_Axe1_pcoa_Naive","M_Grp_Detect_Axe2_pcoa_Naive"),
file = saveData)
}
################################################################################
################################################################################
|
/R/Grp_Plus_Moins_detectable_D5_pcoa.R
|
permissive
|
bonsacquet-l/sim-com
|
R
| false
| false
| 4,278
|
r
|
#--------------------------------------------------#
# fonction repartition des especes en deux groupes #
# les plus detectable et les moins detectable #
# script applicable que pour les D5 #
# pour les PCoA #
# lionel.bonsacquet #
#--------------------------------------------------# # executer dans le make_PCoA
# 11 especes moins detectable donc 11*1000 coordonnees a stocker au plus dans
# une colonne de chaque matrice
Fct_Grp_detectD5_pcoa<-function(distance="chao") {
#-- Pour stocker les resultats
source(file.path("R","Noms-Fichiers.R"))
Nbr_Fichier<-length(c(V_Fichier_D5_GrpDetect))
M_Grp_Detect_Axe1_pcoa<-matrix(NA,nrow = 10000,ncol = Nbr_Fichier)
colnames(M_Grp_Detect_Axe1_pcoa)<-as.character(V_Fichier_D5_GrpDetect)
M_Grp_Detect_Axe2_pcoa<-matrix(NA,nrow = 10000,ncol = Nbr_Fichier)
colnames(M_Grp_Detect_Axe2_pcoa)<-as.character(V_Fichier_D5_GrpDetect)
M_Grp_Detect_Axe1_pcoa_Naive<-matrix(NA,nrow = 10000,ncol = Nbr_Fichier)
colnames(M_Grp_Detect_Axe1_pcoa_Naive)<-as.character(V_Fichier_D5_GrpDetect)
M_Grp_Detect_Axe2_pcoa_Naive<-matrix(NA,nrow = 10000,ncol = Nbr_Fichier)
colnames(M_Grp_Detect_Axe2_pcoa_Naive)<-as.character(V_Fichier_D5_GrpDetect)
#-- chargement des donnees des coordonnees des especes
load(file.path("Outcome","out-regroupement","PCoA",paste("PCoA_Regroup_Coord_Sp_",distance,".Rdata",sep = "")))
#-- la boucle avec appel de la detection des sp a chaque simulation
for (n in c(V_Fichier_D5)) {
print(n)
#-- les donnees de detection (des 1000 simulations du fichier)
Sim<-load(file.path("Outcome","out-simul","ACP",paste("ACP_Simul_",n,".Rdata",sep="")))
M_MemDetectSp<-M_MemDetectSp
for (z in 1:1000) {
#-- ordre de detection des sp de moins au plus detectable
Sp_les_Moins_Detect<-order(M_MemDetectSp[z,])[1:10]+20*(z-1) # les dix moins detectable
Sp_les_Plus_Detect<-order(M_MemDetectSp[z,])[11:20]+20*(z-1) # les dix plus detectable
#les lignes a remplir
lignes<-(c((10*(z-1)+1):(10*(z-1)+10)))
#-- repartition des coordonnees des especes dans le deux groupes
a<-NA
ifelse(median(M_Resultat_Coord_Sp_pcoa_axe1[Sp_les_Moins_Detect,n])<0,a<-(-1),a<-1)
M_Grp_Detect_Axe1_pcoa[(lignes),paste(n,"_PlusD",sep="")]<-M_Resultat_Coord_Sp_pcoa_axe1[Sp_les_Plus_Detect,n]*a
M_Grp_Detect_Axe1_pcoa[(lignes),paste(n,"_MoinsD",sep="")]<-M_Resultat_Coord_Sp_pcoa_axe1[Sp_les_Moins_Detect,n]*a
a<-NA
ifelse(median(M_Resultat_Coord_Sp_pcoa_Naive_axe1[Sp_les_Moins_Detect,n])<0,a<-(-1),a<-1)
M_Grp_Detect_Axe1_pcoa_Naive[(lignes),paste(n,"_PlusD",sep="")]<-M_Resultat_Coord_Sp_pcoa_Naive_axe1[Sp_les_Plus_Detect,n]*a
M_Grp_Detect_Axe1_pcoa_Naive[(lignes),paste(n,"_MoinsD",sep="")]<-M_Resultat_Coord_Sp_pcoa_Naive_axe1[Sp_les_Moins_Detect,n]*a
a<-NA
ifelse(median(M_Resultat_Coord_Sp_pcoa_axe2[Sp_les_Moins_Detect,n])<0,a<-(-1),a<-1)
M_Grp_Detect_Axe2_pcoa[(lignes),paste(n,"_PlusD",sep="")]<-M_Resultat_Coord_Sp_pcoa_axe2[Sp_les_Plus_Detect,n]*a
M_Grp_Detect_Axe2_pcoa[(lignes),paste(n,"_MoinsD",sep="")]<-M_Resultat_Coord_Sp_pcoa_axe2[Sp_les_Moins_Detect,n]*a
a<-NA
ifelse(median(M_Resultat_Coord_Sp_pcoa_Naive_axe2[Sp_les_Moins_Detect,n])<0,a<-(-1),a<-1)
M_Grp_Detect_Axe2_pcoa_Naive[lignes,paste(n,"_PlusD",sep="")]<-M_Resultat_Coord_Sp_pcoa_Naive_axe2[Sp_les_Plus_Detect,n]*a
M_Grp_Detect_Axe2_pcoa_Naive[lignes,paste(n,"_MoinsD",sep="")]<-M_Resultat_Coord_Sp_pcoa_Naive_axe2[Sp_les_Moins_Detect,n]*a
}
}
#-- sauvegarde
saveData<-file.path("Outcome","out-regroupement","PCoA",paste("PCoA_",distance,"_Grp_Detect_D5.Rdata",sep = ""))
save(M_Grp_Detect_Axe1_pcoa,M_Grp_Detect_Axe2_pcoa,
M_Grp_Detect_Axe1_pcoa_Naive,M_Grp_Detect_Axe2_pcoa_Naive,
list = c("M_Grp_Detect_Axe1_pcoa","M_Grp_Detect_Axe2_pcoa",
"M_Grp_Detect_Axe1_pcoa_Naive","M_Grp_Detect_Axe2_pcoa_Naive"),
file = saveData)
}
################################################################################
################################################################################
|
testlist <- list(bytes1 = integer(0), pmutation = -41255400998276)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
/mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612802421-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 130
|
r
|
testlist <- list(bytes1 = integer(0), pmutation = -41255400998276)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
#' @include gbLocation-class.R
NULL
#' Class \code{"gbFeature"}
#'
#' \dQuote{gbFeature} is an S4 class that provides a container
#' for GenBank feature tables.
#'
#' @slot .seqinfo An \code{\linkS4class{seqinfo}} object containing the
#' full-lenght sequence of the GenBank record that the feature is part
#' of as an \code{\linkS4class{XStringSet}} object, and sequence metadata
#' as a \code{\linkS4class{gbHeader}} object.
#' @slot .id Identifier (index) of the feature in the GenBank record
#' the feature is part of.
#' @slot key The feature key.
#' @slot location A \code{\linkS4class{gbLocation}} object.
#' @slot qualifiers A named character vector. Name attributes
#' correspond to GenBank qualifier tags.
#'
#' @section Accessor functions:
#' \code{\link{getHeader}}, \code{\link{getSequence}},
#' \code{\link{ranges}}, \code{\link{key}}, \code{\link{index}},
#' \code{\link{qualif}}
#'
#' @seealso
#' \code{\linkS4class{gbFeatureTable}}, \code{\linkS4class{gbRecord}}
#'
#' @export
setClass(
"gbFeature",
slots = list(
.seqinfo = "seqinfo",
.id = "integer",
key = "character",
location = "gbLocation",
qualifiers = "character"
)
)
S4Vectors::setValidity2("gbFeature", function(object) {
TRUE
})
# show -------------------------------------------------------------------
show_gbFeature <- function(object, showInfo = TRUE, write_to_file = FALSE) {
op <- options("useFancyQuotes")
options(useFancyQuotes = FALSE)
on.exit(options(op))
if (write_to_file) {
ws <- 5 ## added whitespace if we write to file
width <- 80
} else {
ws <- 0
width <- getOption("width") - 4
cat("Feature: Location/Qualifiers:\n")
}
loc_fmt <- paste0("%s%-16s%s")
qua_fmt <- paste0("%-16s%s%s = %s")
loc <- linebreak(as(location(object), "character"), width = width,
offset = 17 + ws, indent = 0, split = ",", FORCE = FALSE)
loc_line <- sprintf(loc_fmt, dup(' ', ws), key(object), loc)
if (all_empty(object@qualifiers)) {
qua_line <- ""
} else {
qua <- names(object@qualifiers)
indent <- -(nchar(qua) + 17 + ws + 2)
val <- unlist(.mapply(linebreak,
list(s = dQuote(object@qualifiers), indent = indent),
list(width = width, offset = 16 + ws, FORCE = TRUE)))
qua_line <- sprintf(qua_fmt, "", paste0(dup(' ', ws), "/"), qua, val)
}
ft <- paste0(loc_line, "\n", paste0(qua_line, collapse = "\n"))
if (!write_to_file) {
cat(ft, sep = "\n")
if (showInfo) {
show(.seqinfo(object))
}
}
invisible(ft)
}
setMethod("show", "gbFeature", function(object) {
show_gbFeature(object, showInfo = TRUE, write_to_file = FALSE)
})
# summary ----------------------------------------------------------------
#' @rdname summary-methods
setMethod("summary", "gbFeature", function(object, ...) {
idx <- c("Id", index(object))
key <- c("Feature", key(object))
loc <- c("Location", as(location(object), "character"))
gene <- c("GeneId", geneID(object))
prod <- c("Product", product(object))
note <- c("Note", collapse(as.list(note(object)), '; '))
max_idx_len <- max(nchar(idx))
max_key_len <- max(nchar(key))
max_loc_len <- max(nchar(loc))
max_geneid_len <- max(nchar(gene))
max_prod_len <- max(nchar(prod))
fmt <- paste0('%+', max_idx_len + 1, 's %-', max_key_len + 1, 's%-',
max_loc_len + 1, 's%-', max_geneid_len + 1, 's%-',
max_prod_len + 1, 's%s')
showme <- ellipsize(sprintf(fmt, idx, key, loc, gene, prod, note),
width = getOption("width") - 3)
cat(showme, sep = "\n")
return(invisible(NULL))
})
# Internal getters ----------------------------------------------------------
setMethod('.seqinfo', 'gbFeature', function(x) {
x@.seqinfo
})
setMethod('.locus', 'gbFeature', function(x) {
.locus(.seqinfo(x))
})
setMethod('.header', 'gbFeature', function(x) {
.header(.seqinfo(x))
})
setMethod('.sequence', 'gbFeature', function(x) {
.sequence(.seqinfo(x))
})
setMethod('.dbSource', 'gbFeature', function(x) {
parse_dbsource(getDBSource(x))
})
setMethod(".defline", "gbFeature", function(x) {
paste0("lcl|", key(x), '.', index(x), .dbSource(x), getAccession(x), ' ',
getDefinition(x))
})
# getters ----------------------------------------------------------------
#' @rdname accessors
setMethod("getLocus", "gbFeature", function(x) getLocus(.seqinfo(x)) )
#' @rdname accessors
setMethod("getLength", "gbFeature", function(x) getLength(.seqinfo(x)) )
#' @rdname accessors
setMethod("getMoltype", "gbFeature", function(x) getMoltype(.seqinfo(x)) )
#' @rdname accessors
setMethod("getTopology", "gbFeature", function(x) getTopology(.seqinfo(x)) )
#' @rdname accessors
setMethod("getDivision", "gbFeature", function(x) getDivision(.seqinfo(x)) )
#' @rdname accessors
setMethod("getDate", "gbFeature", function(x) getDate(.seqinfo(x)) )
#' @rdname accessors
setMethod("getDefinition", "gbFeature", function(x) getDefinition(.seqinfo(x)) )
#' @rdname accessors
setMethod("getAccession", "gbFeature", function(x) getAccession(.seqinfo(x)) )
#' @rdname accessors
setMethod("getVersion", "gbFeature", function(x) getVersion(.seqinfo(x)) )
#' @param db Which database identifier (default: 'gi')
#' @rdname accessors
setMethod("getGeneID", "gbFeature", function(x, db = 'gi') getGeneID(.seqinfo(x), db = db) )
#' @rdname accessors
setMethod("getDBLink", "gbFeature", function(x) getDBLink(.seqinfo(x)) )
#' @rdname accessors
setMethod("getDBSource", "gbFeature", function(x) getDBSource(.seqinfo(x)) )
#' @rdname accessors
setMethod("getSource", "gbFeature", function(x) getSource(.seqinfo(x)) )
#' @rdname accessors
setMethod("getOrganism", "gbFeature", function(x) getOrganism(.seqinfo(x)) )
#' @rdname accessors
setMethod("getTaxonomy", "gbFeature", function(x) getTaxonomy(.seqinfo(x)) )
#' @rdname accessors
setMethod("getReference", "gbFeature", function(x) getReference(.seqinfo(x)) )
#' @rdname accessors
setMethod("getKeywords", "gbFeature", function(x) getKeywords(.seqinfo(x)) )
#' @rdname accessors
setMethod("getComment", "gbFeature", function(x) getComment(.seqinfo(x)) )
#' @rdname getHeader-methods
setMethod("header", "gbFeature", function(x) .header(.seqinfo(x)))
#' @rdname getHeader-methods
setMethod("getHeader", "gbFeature", function(x) .header(.seqinfo(x)))
#' @rdname getSequence-methods
setMethod("getSequence", "gbFeature", function(x) .seq_access(x))
#' @rdname ranges
setMethod("ranges", "gbFeature", function(x, include = "none", exclude = "", join = FALSE) {
.GRanges(x, include = include, exclude = exclude, join = join)
})
#' @rdname start
setMethod("start", "gbFeature", function(x, join = FALSE) {
start(x@location, join = join)
})
.gbFeature_replace_start <- function(x, check = TRUE, value) {
start(x@location, check = check) <- value
if (check) {
validObject(x)
}
x
}
#' @rdname start
setReplaceMethod("start", "gbFeature", function(x, ..., value)
.gbFeature_replace_start(x, ..., value = value)
)
#' @rdname end
setMethod("end", "gbFeature", function(x, join = FALSE) {
end(x@location, join = join)
})
.gbFeature_replace_end <- function(x, check = TRUE, value) {
end(x@location, check = check) <- value
if (check) {
validObject(x)
}
x
}
#' @rdname end
setReplaceMethod("end", "gbFeature", function(x, ..., value)
.gbFeature_replace_end(x, ..., value = value)
)
#' @rdname strand
setMethod("strand", "gbFeature", function(x, join = FALSE) {
strand(x@location, join = join)
})
#' @rdname strand
setReplaceMethod("strand", "gbFeature", function(x, ..., value) {
strand(x@location, ...) <- value
x
})
#' @rdname span
setMethod("span", "gbFeature", function(x, join = FALSE) {
span(x@location, join = join)
})
#' @rdname span
setMethod("joint_range", "gbFeature", function(x) {
joint_range(x@location)
})
#' @rdname dbxref-methods
setMethod("dbxref", "gbFeature", function(x, db = NULL, ...) {
dbx <- "db_xref"
if (!is.null(db)) {
dbx <- paste0(dbx, ".", db)
}
.qual_access(x, which = dbx, ...)
})
#' @rdname location-methods
setMethod("location", "gbFeature", function(x) x@location)
#' @rdname fuzzy
setMethod("fuzzy", "gbFeature", function(x) fuzzy(x@location))
#' @rdname index-methods
setMethod("index", "gbFeature", function(x) x@.id)
#' @rdname key-methods
setMethod("key", "gbFeature", function(x) structure(x@key, names = NULL) )
#' @rdname key-methods
setReplaceMethod("key", "gbFeature", function(x, check = TRUE, value) {
x <- initialize(x, key = value)
if (check)
validObject(x)
x
})
#' @rdname qualif-methods
setMethod("qualif", "gbFeature", function(x, which, fixed = FALSE, use.names = TRUE) {
if (missing(which)) {
x@qualifiers
} else {
.qual_access(x, which, fixed, use.names)
}
})
#' @rdname qualif-methods
setReplaceMethod("qualif", "gbFeature", function(x, which, check = TRUE, value) {
assertthat::assert_that(!missing(which))
x@qualifiers[which] <- value
if (check)
validObject(x)
x
})
# listers ----------------------------------------------------------------
#' @rdname qualifList-methods
setMethod("qualifList", "gbFeature", function(x) {
names(x@qualifiers)
})
# testers ----------------------------------------------------------------
#' @rdname hasKey-methods
setMethod("hasKey", "gbFeature", function(x, key) {
!is.na(charmatch(key, x@key))
})
#' @rdname hasQualif-methods
setMethod("hasQualif", "gbFeature", function(x, qualifier) {
!is.na(charmatch(qualifier, names(x@qualifiers)))
})
# shift ---------------------------------------------------------------
#' @rdname shift
setMethod("shift", "gbFeature", function(x, shift = 0L, ...) {
x@location <- shift(x@location, shift)
x
})
# subsetting ----------------------------------------------------------
#' @rdname extract-methods
setMethod("[[", c("gbFeature", "character", "missing"), function(x, i, j) {
if (i %in% c("key", "location", ".id")) {
slot(x, i)
} else {
x@qualifiers[i]
}
})
#' @param name The name of the element to extract.
#' @rdname extract-methods
setMethod("$", "gbFeature", function(x, name) {
if (name %in% c("key", "location", ".id")) {
slot(x, name)
} else {
x@qualifiers[name]
}
})
|
/R/gbFeature-class.R
|
no_license
|
awenocur/biofiles
|
R
| false
| false
| 10,347
|
r
|
#' @include gbLocation-class.R
NULL
#' Class \code{"gbFeature"}
#'
#' \dQuote{gbFeature} is an S4 class that provides a container
#' for GenBank feature tables.
#'
#' @slot .seqinfo An \code{\linkS4class{seqinfo}} object containing the
#' full-lenght sequence of the GenBank record that the feature is part
#' of as an \code{\linkS4class{XStringSet}} object, and sequence metadata
#' as a \code{\linkS4class{gbHeader}} object.
#' @slot .id Identifier (index) of the feature in the GenBank record
#' the feature is part of.
#' @slot key The feature key.
#' @slot location A \code{\linkS4class{gbLocation}} object.
#' @slot qualifiers A named character vector. Name attributes
#' correspond to GenBank qualifier tags.
#'
#' @section Accessor functions:
#' \code{\link{getHeader}}, \code{\link{getSequence}},
#' \code{\link{ranges}}, \code{\link{key}}, \code{\link{index}},
#' \code{\link{qualif}}
#'
#' @seealso
#' \code{\linkS4class{gbFeatureTable}}, \code{\linkS4class{gbRecord}}
#'
#' @export
setClass(
"gbFeature",
slots = list(
.seqinfo = "seqinfo",
.id = "integer",
key = "character",
location = "gbLocation",
qualifiers = "character"
)
)
S4Vectors::setValidity2("gbFeature", function(object) {
TRUE
})
# show -------------------------------------------------------------------
show_gbFeature <- function(object, showInfo = TRUE, write_to_file = FALSE) {
op <- options("useFancyQuotes")
options(useFancyQuotes = FALSE)
on.exit(options(op))
if (write_to_file) {
ws <- 5 ## added whitespace if we write to file
width <- 80
} else {
ws <- 0
width <- getOption("width") - 4
cat("Feature: Location/Qualifiers:\n")
}
loc_fmt <- paste0("%s%-16s%s")
qua_fmt <- paste0("%-16s%s%s = %s")
loc <- linebreak(as(location(object), "character"), width = width,
offset = 17 + ws, indent = 0, split = ",", FORCE = FALSE)
loc_line <- sprintf(loc_fmt, dup(' ', ws), key(object), loc)
if (all_empty(object@qualifiers)) {
qua_line <- ""
} else {
qua <- names(object@qualifiers)
indent <- -(nchar(qua) + 17 + ws + 2)
val <- unlist(.mapply(linebreak,
list(s = dQuote(object@qualifiers), indent = indent),
list(width = width, offset = 16 + ws, FORCE = TRUE)))
qua_line <- sprintf(qua_fmt, "", paste0(dup(' ', ws), "/"), qua, val)
}
ft <- paste0(loc_line, "\n", paste0(qua_line, collapse = "\n"))
if (!write_to_file) {
cat(ft, sep = "\n")
if (showInfo) {
show(.seqinfo(object))
}
}
invisible(ft)
}
setMethod("show", "gbFeature", function(object) {
show_gbFeature(object, showInfo = TRUE, write_to_file = FALSE)
})
# summary ----------------------------------------------------------------
#' @rdname summary-methods
setMethod("summary", "gbFeature", function(object, ...) {
idx <- c("Id", index(object))
key <- c("Feature", key(object))
loc <- c("Location", as(location(object), "character"))
gene <- c("GeneId", geneID(object))
prod <- c("Product", product(object))
note <- c("Note", collapse(as.list(note(object)), '; '))
max_idx_len <- max(nchar(idx))
max_key_len <- max(nchar(key))
max_loc_len <- max(nchar(loc))
max_geneid_len <- max(nchar(gene))
max_prod_len <- max(nchar(prod))
fmt <- paste0('%+', max_idx_len + 1, 's %-', max_key_len + 1, 's%-',
max_loc_len + 1, 's%-', max_geneid_len + 1, 's%-',
max_prod_len + 1, 's%s')
showme <- ellipsize(sprintf(fmt, idx, key, loc, gene, prod, note),
width = getOption("width") - 3)
cat(showme, sep = "\n")
return(invisible(NULL))
})
# Internal getters ----------------------------------------------------------
setMethod('.seqinfo', 'gbFeature', function(x) {
x@.seqinfo
})
setMethod('.locus', 'gbFeature', function(x) {
.locus(.seqinfo(x))
})
setMethod('.header', 'gbFeature', function(x) {
.header(.seqinfo(x))
})
setMethod('.sequence', 'gbFeature', function(x) {
.sequence(.seqinfo(x))
})
setMethod('.dbSource', 'gbFeature', function(x) {
parse_dbsource(getDBSource(x))
})
setMethod(".defline", "gbFeature", function(x) {
paste0("lcl|", key(x), '.', index(x), .dbSource(x), getAccession(x), ' ',
getDefinition(x))
})
# getters ----------------------------------------------------------------
#' @rdname accessors
setMethod("getLocus", "gbFeature", function(x) getLocus(.seqinfo(x)) )
#' @rdname accessors
setMethod("getLength", "gbFeature", function(x) getLength(.seqinfo(x)) )
#' @rdname accessors
setMethod("getMoltype", "gbFeature", function(x) getMoltype(.seqinfo(x)) )
#' @rdname accessors
setMethod("getTopology", "gbFeature", function(x) getTopology(.seqinfo(x)) )
#' @rdname accessors
setMethod("getDivision", "gbFeature", function(x) getDivision(.seqinfo(x)) )
#' @rdname accessors
setMethod("getDate", "gbFeature", function(x) getDate(.seqinfo(x)) )
#' @rdname accessors
setMethod("getDefinition", "gbFeature", function(x) getDefinition(.seqinfo(x)) )
#' @rdname accessors
setMethod("getAccession", "gbFeature", function(x) getAccession(.seqinfo(x)) )
#' @rdname accessors
setMethod("getVersion", "gbFeature", function(x) getVersion(.seqinfo(x)) )
#' @param db Which database identifier (default: 'gi')
#' @rdname accessors
setMethod("getGeneID", "gbFeature", function(x, db = 'gi') getGeneID(.seqinfo(x), db = db) )
#' @rdname accessors
setMethod("getDBLink", "gbFeature", function(x) getDBLink(.seqinfo(x)) )
#' @rdname accessors
setMethod("getDBSource", "gbFeature", function(x) getDBSource(.seqinfo(x)) )
#' @rdname accessors
setMethod("getSource", "gbFeature", function(x) getSource(.seqinfo(x)) )
#' @rdname accessors
setMethod("getOrganism", "gbFeature", function(x) getOrganism(.seqinfo(x)) )
#' @rdname accessors
setMethod("getTaxonomy", "gbFeature", function(x) getTaxonomy(.seqinfo(x)) )
#' @rdname accessors
setMethod("getReference", "gbFeature", function(x) getReference(.seqinfo(x)) )
#' @rdname accessors
setMethod("getKeywords", "gbFeature", function(x) getKeywords(.seqinfo(x)) )
#' @rdname accessors
setMethod("getComment", "gbFeature", function(x) getComment(.seqinfo(x)) )
#' @rdname getHeader-methods
setMethod("header", "gbFeature", function(x) .header(.seqinfo(x)))
#' @rdname getHeader-methods
setMethod("getHeader", "gbFeature", function(x) .header(.seqinfo(x)))
#' @rdname getSequence-methods
setMethod("getSequence", "gbFeature", function(x) .seq_access(x))
#' @rdname ranges
setMethod("ranges", "gbFeature", function(x, include = "none", exclude = "", join = FALSE) {
.GRanges(x, include = include, exclude = exclude, join = join)
})
#' @rdname start
setMethod("start", "gbFeature", function(x, join = FALSE) {
start(x@location, join = join)
})
.gbFeature_replace_start <- function(x, check = TRUE, value) {
start(x@location, check = check) <- value
if (check) {
validObject(x)
}
x
}
#' @rdname start
setReplaceMethod("start", "gbFeature", function(x, ..., value)
.gbFeature_replace_start(x, ..., value = value)
)
#' @rdname end
setMethod("end", "gbFeature", function(x, join = FALSE) {
end(x@location, join = join)
})
.gbFeature_replace_end <- function(x, check = TRUE, value) {
end(x@location, check = check) <- value
if (check) {
validObject(x)
}
x
}
#' @rdname end
setReplaceMethod("end", "gbFeature", function(x, ..., value)
.gbFeature_replace_end(x, ..., value = value)
)
#' @rdname strand
setMethod("strand", "gbFeature", function(x, join = FALSE) {
strand(x@location, join = join)
})
#' @rdname strand
setReplaceMethod("strand", "gbFeature", function(x, ..., value) {
strand(x@location, ...) <- value
x
})
#' @rdname span
setMethod("span", "gbFeature", function(x, join = FALSE) {
span(x@location, join = join)
})
#' @rdname span
setMethod("joint_range", "gbFeature", function(x) {
joint_range(x@location)
})
#' @rdname dbxref-methods
setMethod("dbxref", "gbFeature", function(x, db = NULL, ...) {
dbx <- "db_xref"
if (!is.null(db)) {
dbx <- paste0(dbx, ".", db)
}
.qual_access(x, which = dbx, ...)
})
#' @rdname location-methods
setMethod("location", "gbFeature", function(x) x@location)
#' @rdname fuzzy
setMethod("fuzzy", "gbFeature", function(x) fuzzy(x@location))
#' @rdname index-methods
setMethod("index", "gbFeature", function(x) x@.id)
#' @rdname key-methods
setMethod("key", "gbFeature", function(x) structure(x@key, names = NULL) )
#' @rdname key-methods
setReplaceMethod("key", "gbFeature", function(x, check = TRUE, value) {
x <- initialize(x, key = value)
if (check)
validObject(x)
x
})
#' @rdname qualif-methods
setMethod("qualif", "gbFeature", function(x, which, fixed = FALSE, use.names = TRUE) {
if (missing(which)) {
x@qualifiers
} else {
.qual_access(x, which, fixed, use.names)
}
})
#' @rdname qualif-methods
setReplaceMethod("qualif", "gbFeature", function(x, which, check = TRUE, value) {
assertthat::assert_that(!missing(which))
x@qualifiers[which] <- value
if (check)
validObject(x)
x
})
# listers ----------------------------------------------------------------
#' @rdname qualifList-methods
setMethod("qualifList", "gbFeature", function(x) {
names(x@qualifiers)
})
# testers ----------------------------------------------------------------
#' @rdname hasKey-methods
setMethod("hasKey", "gbFeature", function(x, key) {
!is.na(charmatch(key, x@key))
})
#' @rdname hasQualif-methods
setMethod("hasQualif", "gbFeature", function(x, qualifier) {
!is.na(charmatch(qualifier, names(x@qualifiers)))
})
# shift ---------------------------------------------------------------
#' @rdname shift
setMethod("shift", "gbFeature", function(x, shift = 0L, ...) {
x@location <- shift(x@location, shift)
x
})
# subsetting ----------------------------------------------------------
#' @rdname extract-methods
setMethod("[[", c("gbFeature", "character", "missing"), function(x, i, j) {
if (i %in% c("key", "location", ".id")) {
slot(x, i)
} else {
x@qualifiers[i]
}
})
#' @param name The name of the element to extract.
#' @rdname extract-methods
setMethod("$", "gbFeature", function(x, name) {
if (name %in% c("key", "location", ".id")) {
slot(x, name)
} else {
x@qualifiers[name]
}
})
|
library(survival) # for survival analysis
library(glmnet) # for LASSO regularization
#library(ROCR) # for ROC analysis
#library(gpclib) # for plotting confidence intervals for x and y by calculating intersection of two polygons
library(Hmisc) # c-index
library(plyr) # data manipulation
output.folder = "./outtemp/" # name of the folder where plots and tables will be saved (use "" for current folder, or use e.g., "./foldername/")
source("R_myfunctions.R") # functions
############
# DATA
############
# log ratios (stim to costim) of relative frequencies
#dat = read.csv("CMVdata_44subjects_log2ratio.csv", check.names = FALSE)
dat = read.csv("CMVdata_44subjects_log2ratio_UPDATED.csv", check.names = FALSE) # data updates: the 32 patients originally with CMVstutus=0 in the 44 original cohort had been updated to new censor date or death.
rownames(dat) = dat[,1]
dat = as.matrix(dat[,-1])
# same for relative frequencies: will be used only for descriptive analysis
dat.RF = read.csv("CMVdata_44subjects_relfreq_UPDATED.csv", check.names = FALSE) # data updates: the 32 patients originally with CMVstutus=0 in the 44 original cohort had been updated to new censor date or death.
rownames(dat.RF) = dat.RF[,1]
dat.RF = as.matrix(dat.RF[,-1])
# validation cohort (previously on prophy but got off prophy after updates)
datvalid = read.csv("CMVdata_validation18_log2ratio.csv", check.names = F, head = T)
head(datvalid[,1:5])
##################
######## Descriptive analysis for log ratios of relative frequencies
######## survival outcome matrix Y
colnames(dat)[1:2] # [1] "offprophyCMVfreedays" "CMVstatus"
Y = Surv(dat[,1], dat[,2])
## Median (off-prophylaxis) follow-up time among censored
summary(Y[Y[,2]==0,1])
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 107.0 350.8 534.0 597.3 781.5 1761.0
## Median (off-prophylaxis) follow-up time
plot(survfit(Surv(Y[,1],1-Y[,2]) ~ 1))
survfit(Surv(Y[,1],1-Y[,2]) ~ 1) # reverse Kaplan-Meier estimate
# same as summary(survfit(Surv(Y[,1],1-Y[,2]) ~ 1))$table[5]
# records n.max n.start events median 0.95LCL 0.95UCL
# 44 44 44 32 539 502 777
######### repeat the same thing for the validation cohort of 18 patients
## Median (off-prophylaxis) follow-up time among censored n=15
summary((datvalid$cmv_freedays - datvalid$Total_prophy_days)[datvalid$CMVstatus==0])
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1.0 85.5 154.0 156.0 207.5 373.0
## Median (off-prophylaxis) follow-up time
plot(survfit(Surv(datvalid$cmv_freedays - datvalid$Total_prophy_days,1-datvalid$CMVstatus) ~ 1))
survfit(Surv(datvalid$cmv_freedays - datvalid$Total_prophy_days,1-datvalid$CMVstatus) ~ 1) # reverse Kaplan-Meier estimate
# same as summary(survfit(Surv(datvalid$cmv_freedays - datvalid$Total_prophy_days,1-datvalid$CMVstatus) ~ 1))$table[5]
# records n.max n.start events median 0.95LCL 0.95UCL
# 18 18 18 15 154 151 274
######## univariate analysis - concordance and score test
(conc.pv.44 <- t(sapply(data.frame(dat[, -c(1:2)], check.names = F), function(x) {s = summary(coxph(Y ~ x)); c(s$concordance, sctest = s$sctest[c("df", "pvalue")])})))[order(conc.pv.44[,4]),]
as.matrix((conc.pv2.44 <- sapply(data.frame(dat[, -c(1:2)], check.names = F), function(x) wilcox.test(x~Y[,2])$p.))[order(conc.pv2.44)])
conc.pv.44[order(conc.pv.44[,4]),]
# write.csv(conc.pv.44[order(conc.pv.44[,4]),], file = "table_univariate.csv", row.names = T)
##################################
######## Descriptive analysis using relative frequencies (not log ratios of relative frequencies)
RF.basic = dat.RF[, 5:68] # 64 cell subsets
RF.basic.ie1 = RF.basic[, 1:32] # 32 cell subsets
RF.basic.pp65 = RF.basic[, 33:64] # 32 cell subsets
######### bar plots for CMV-/CMV+ ratio (within 6 month)
is.case = Y[,"status"]==1 & Y[,"time"]<180
cbind(Y)[order(Y[,"time"]),]
#: four of the 32 CMV- patients have follow-up time < 180 days (107, 124, 125, 129 days)
#: one of the 12 CMV+ patient has time-to-event = 494
which.case = which(is.case==1)
which.control = which(is.case==0)
RF.basic.colMeans = ddply(as.data.frame(RF.basic), .(is.case), colMeans) #column 1: is.case (levels 1=FALSE, 2=TRUE)
RF.basic.diff = log(RF.basic.colMeans[1, -1])-log(RF.basic.colMeans[2, -1])
diff.CD4.ie1 = RF.basic.diff[, grep("IE-1/CD4/", colnames(RF.basic.diff))]
diff.CD4.pp65 = RF.basic.diff[, grep("pp65/CD4/", colnames(RF.basic.diff))]
diff.CD8.ie1 = RF.basic.diff[, grep("IE-1/CD8/", colnames(RF.basic.diff))]
diff.CD8.pp65 = RF.basic.diff[, grep("pp65/CD8/", colnames(RF.basic.diff))]
colnames(diff.CD4.ie1) = gsub(".+/CD(4|8)/", "", colnames(diff.CD4.ie1))
colnames(diff.CD4.pp65) = gsub(".+/CD(4|8)/", "", colnames(diff.CD4.pp65))
colnames(diff.CD8.ie1) = gsub(".+/CD(4|8)/", "", colnames(diff.CD8.ie1))
colnames(diff.CD8.pp65) = gsub(".+/CD(4|8)/", "", colnames(diff.CD8.pp65))
combinations.ordered = c("C+I-2-T-", "C-I+2-T-", "C-I-2+T-", "C-I-2-T+", "C+I+2-T-", "C+I-2+T-", "C+I-2-T+", "C-I+2+T-", "C-I+2-T+", "C-I-2+T+", "C+I+2+T-", "C+I+2-T+", "C+I-2+T+", "C-I+2+T+", "C+I+2+T+")
combi_names_long = function(nm) { #switch from short names C+I+2+T+ to long names CD107+INFgamma+IL2+TNFalpha+
nm = gsub("C\\+", "CD107\\+", nm)
nm = gsub("C\\-", "CD107\\-", nm)
nm = gsub("I", "INF*gamma", nm)
nm = gsub("2", "IL2", nm)
nm = gsub("T", "TNF*alpha", nm)
nm = gsub("$", "phantom()", nm)
nm
}
combinations.ordered.long = combi_names_long(combinations.ordered)
##### bar plots
col1="darkgreen"; col2="blue"; col3 = "orange"; col4 = "red"; col.ordered = rep(c(col1, col2, col3, col4), c(4, 6, 4, 1))
NAMES.ARG = parse(text = combinations.ordered.long)
NAMES.ARG.space = parse(text = gsub("phantom\\(\\)", "phantom(00)", combinations.ordered.long))
lab.ratio = "Ratio of mean relative frequencies\n (CMV within 6 months to no CMV)"
barplot_ratio.3 = function(x, YAXT = T, XLAB = lab.ratio, CEX = .9) { # horizontal
x = rev(x)
NAMES.ARG = rev(NAMES.ARG.space)
col.ordered = rev(col.ordered)
bar = barplot(x, names.arg = NAMES.ARG, las = 2, xaxt = "n", yaxt = "n", col = col.ordered, space = .3, xlab = XLAB, horiz = T)
abline(v = 1, lty = 2)
axis(1, tck = 0.02)
if (YAXT) text(0, bar, labels = NAMES.ARG, cex = CEX, xpd = NA, adj = 1)
#mtext(side = 2, line = 1, xpd = NA, at = bar)
}
pdf(file = paste(output.folder, "FigureX_ratio_of_meanRFs_pp65ANDie1_horizontal.pdf", sep = ""), width = 8, height = 8) # horizontal
par(mfrow = c(2, 2), mgp = c(1.5,0.2,0), mar = c(5,4,4,2)-c(2,4,1.5,1.5)+.1, oma = c(0,10.5,0,0))
barplot_ratio(c(t(exp(diff.CD4.pp65[, combinations.ordered]))), XLAB = "")
title(main = "CD4+ pp65 stimulation", xlab = "CMV-/CMV+ ratio")
barplot_ratio(c(t(exp(diff.CD8.pp65[, combinations.ordered]))), XLAB = "", YAXT = F)
title(main = "CD8+ pp65 stimulation", xlab = "CMV-/CMV+ ratio")
barplot_ratio(c(t(exp(diff.CD4.ie1[, combinations.ordered]))), XLAB = "")
title(main = "CD4+ IE-1 stimulation", xlab = "CMV-/CMV+ ratio")
barplot_ratio(c(t(exp(diff.CD8.ie1[, combinations.ordered]))), XLAB = "", YAXT = F)
title(main = "CD8+ IE-1 stimulation", xlab = "CMV-/CMV+ ratio")
dev.off()
#########
######################
######## log ratio variables for main analysis
######## split into groups (basic, basic.ie1, ..., maturational, ...)
# log (base 2) ratios for CD8/IFNg
LR.CD8IFNg = dat[, 3:4] # 2 cell subsets (ie1 and pp65)
# log (base 2) ratios for basic cell subsets
LR.basic = dat[, 5:68] # 64 cell subsets
LR.basic.ie1 = LR.basic[, 1:32] # 32 cell subsets
LR.basic.pp65 = LR.basic[, 33:64] # 32 cell subsets
# log (base 2) ratios for maturational cell subsets
LR.matu = dat[, 69:388] # 320 cell subsets
LR.matu.ie1 = LR.matu[, 1:160] # 160 cell subsets
LR.matu.pp65 = LR.matu[, 161:320] # 160 cell subsets
################
# MAIN ANALYSIS
################
######## fit the Cox model with adaptive LASSO
# fold id for leave-one-out cross-validation for tuning regularization parameters:
foldid.loo = seq(nrow(LR.basic)) # or 1:44
# fit the model, print coefficients, and save log risk score plot (as "plot_logriskscore_....pdf"):
family = "cox"
fit.CD8IFNg = fit.finalmodel(x = LR.CD8IFNg, y = Y, family = family, plot.name = "CD8IFNg", nopenalty = TRUE, foldid.list = list(foldid.loo))
fit.CD8IFNg.ie1 = fit.finalmodel(x = LR.CD8IFNg[,"IE-1/CD8/IFNg", drop = F], y = Y, family = family, plot.name = "CD8IFNg.ie1", nopenalty = TRUE, foldid.list = list(foldid.loo))
fit.basic = fit.finalmodel(x = LR.basic, y = Y, family = family, plot.name = "basic", foldid.list = list(foldid.loo))
fit.basic.ie1 = fit.finalmodel(x = LR.basic.ie1, y = Y, family = family, plot.name = "basic_ie1", foldid.list = list(foldid.loo))
fit.basic.pp65 = fit.finalmodel(x = LR.basic.pp65, y = Y, family = family, plot.name = "basic_pp65_vline", foldid.list = list(foldid.loo), vline = -1.126087)
fit.matu = fit.finalmodel(x = LR.matu, y = Y, family = family, plot.name = "maturational", foldid.list = list(foldid.loo))
fit.matu.ie1 = fit.finalmodel(x = LR.matu.ie1, y = Y, family = family, plot.name = "maturational_ie1", foldid.list = list(foldid.loo))
fit.matu.pp65 = fit.finalmodel(x = LR.matu.pp65, y = Y, family = family, plot.name = "maturational_pp65", foldid.list = list(foldid.loo))
######## find best cutoff for the basic pp65 model
cutoff_pp65 = find.cutoff(pred = fit.basic.pp65$fitted, label = Y[, "status"], time = Y[,"time"], type.measure.cutoff = "concordance", best.only = FALSE)
# : best cutoff log risk = -1.192406 (c-index=0.8388626 with SE=0.07128725)# with unupdated data: -1.126087
cutoff_reliableHR_pp65 = sort(fit.basic.pp65$fitted)[match(1, Y[order(fit.basic.pp65$fitted), "status"])]
# : the smallest cutoff for which both (high- and low- risk) groups have at least one event
# (otherwise, HR estimate is unreliable)
# = -1.177128 # with unupdated data: -1.116892
cutoff_pp65_best = max(cutoff_pp65$Best, cutoff_reliableHR_pp65)
# : # = -1.177128 # with unupdated data: -1.116892
cutoff_pp65$All[cutoff_pp65$All[,2] > cutoff_pp65$All[25,2] - cutoff_pp65$All[25,3],]
# with updated data: ll=-1.37818283, ul=-0.09193318
# with unupdated data: ll=-1.3480484, ul=-0.1073164
### vertical line(s) to be placed in plots
#after data updates:
vline_pp65 = mean(c(-1.19240577, -1.17712796))
vrange_pp65 = c(mean(c(-1.37818283, -1.35583526)), mean(c(-0.09193318, 0.01816602)))
# vline_pp65 = -1.192406
# vrange_pp65 = c(-1.37818283, -0.09193318)
# #before data updates:
# vline_pp65 = mean(c(-1.126087088, -1.116891907))
# vrange_pp65 = c(mean(c(-1.348048426, -1.313782273)), mean(c(-0.107316396, -0.009552492)))
# # vline_pp65 = -1.126087088
# # vrange_pp65 = c(-1.348048426, -0.107316396)
########## coefficients and relative importance for fit.basic.pp65 (Table X)
finalcoef = fit.basic.pp65$coefficients
finalcoef.scale = scale(LR.basic.pp65[, names(finalcoef)])
finalcoef.adj = finalcoef*attr(finalcoef.scale, "scaled:scale")
finalcoef.adj.perc = abs(finalcoef.adj)/(max(abs(finalcoef.adj))) * 100
cbind(coef = as.vector(finalcoef[order(-abs(finalcoef*attr(finalcoef.scale, "scaled:scale")))]), rel.imp = finalcoef.adj.perc[order(-finalcoef.adj.perc)])
########### Validation data of 18 patients (medium risk, no history CMV (same characteristics as original 44 but independent of original 44)
datval = datvalid
Xb.val = as.matrix(datval[, names(finalcoef)]) %*% finalcoef
pdf(paste(output.folder, "Rplot_validation18.pdf", sep = ""))
op = par(mar = par("mar")-c(0,0,3,0))
plot.concordance(y = Surv((datval$cmv_freedays-datval$Total_prophy_days), datval$CMVstatus), fitt = Xb.val, pch = c(pch1, pch2), col = c(col1.heavy, col2.heavy), legend = c("CMV infection", "Censoring"), log.y=F)
par(op)
dev.off()
# c-index 0.88 (with original 17) 0.9230769 (with 18)
funconc(time = (datval$cmv_freedays-datval$Total_prophy_days), status = datval$CMVstatus, score = Xb.val, more = T)
######## perform 10 x stratified 5-fold cross-validation
family = "cox"
set.seed(100);cv.CD8IFNg = run.cv(x = LR.CD8IFNg, y = Y, family = family, nopenalty = TRUE, nrepeat = 10, nfolds = 5)
set.seed(108);cv.CD8IFNg.ie1 = run.cv(x = LR.CD8IFNg[,1,drop = F], y = Y, family = family, nopenalty = TRUE, nrepeat = 10, nfolds = 5)
set.seed(101);cv.basic = run.cv(x = LR.basic, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(102);cv.basic.ie1 = run.cv(x = LR.basic.ie1, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(103);cv.basic.pp65 = run.cv(x = LR.basic.pp65, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(104);cv.matu = run.cv(x = LR.matu, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(105);cv.matu.ie1 = run.cv(x = LR.matu.ie1, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(106);cv.matu.pp65 = run.cv(x = LR.matu.pp65, y = Y, family = family, nrepeat = 10, nfolds = 5)
# resubstitution c-index etc (values saved as "table_resubstitution....txt")
my.perf(fit.object.list = list(fit.CD8IFNg.ie1, fit.CD8IFNg, fit.basic, fit.basic.ie1, fit.basic.pp65, fit.matu, fit.matu.ie1, fit.matu.pp65),
fit.name.list = list("CD8 IFNg IE-1", "CD8 IFNg", "basic", "basic IE-1", "basic pp65", "maturational", "maturational IE-1", "maturational pp65"),
prefix = "resubstitution_updated_", type.response = "time-to-event", label = Y[,"status"], timelabel = Y[, "time"], is.cv = F, plot.se = F)
# average cross-validation c-index (values saved as "table_cv_....txt", a plot saved as "plot_ROC_...pdf")
set.seed(100);my.perf(fit.object.list = list(cv.CD8IFNg, cv.basic, cv.basic.ie1, cv.basic.pp65, cv.matu, cv.matu.ie1, cv.matu.pp65),
fit.name.list = list("CD8 IFNg", "basic", "basic IE-1", "basic pp65", "maturational", "maturational IE-1", "maturational pp65"),
prefix = "cv_updated_", type.response = "time-to-event", label = Y[,"status"], timelabel = Y[, "time"], is.cv = T, plot.se = F)
set.seed(200);my.perf(fit.object.list = list(cv.CD8IFNg.ie1),
fit.name.list = list("CD8 IFNg IE-1"),
prefix = "cv_updated_", type.response = "time-to-event", label = Y[,"status"], timelabel = Y[, "time"], is.cv = T, plot.se = F)
######## Summary (c-index and plots) for paper submission (basic_pp65 for original data, mock-Quantiferon for original data, and basic_pp65 for validation data)
col.final = c("red", "blue") # c(col1.heavy, col2.heavy)
pdf(paste(output.folder, "Figure_basic_pp65_NOLINE.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594 (0.09053199)
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_basic_pp65_CONSERVATIVE.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), vline = vrange_pp65[1], log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594 (0.09053199)
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_basic_pp65_BEST.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), vline = vline_pp65, log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_basic_pp65_RANGE.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), vline = vline_pp65, vrange = vrange_pp65, log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_CD8_INFg.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.CD8IFNg$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.CD8IFNg$fitted, more = T) #0.5829384 (0.0870618) # before updates: 0.5940054 (0.09053199)
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_CD8_INFg_IE1.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.CD8IFNg.ie1$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.CD8IFNg.ie1$fitted, more = T) #0.5924171 (0.0870618)
par(op)
dev.off()
######
pdf(paste(output.folder, "FigureXX_Comparison.pdf", sep = ""), width = 7, height = 4)
par(mfrow = c(1, 2), mgp = c(2,0.5,0), mar = c(5,4,4,2)-c(2,4,3.5,1.5)+.1, oma = c(0,3,0,0), xpd = NA)
#par(mfrow = c(1,2), mar = c(5,4,4,2)-c(1,0,3,0)+.1)#, mar = c(5,4,4,2)-c(1,0,3,0)+.1)
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, cex.axis = .8, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594 (0.09053199)
plot.concordance(y = Y, fitt = fit.CD8IFNg$fitted, ylab = "", pch = c(pch1, pch2), col = col.final, yaxt = F, cex.axis = .8, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.CD8IFNg$fitted, more = T) #0.5829384 (0.0870618) # before updates: 0.5940054 (0.09053199)
dev.off()
#
pdf(paste(output.folder, "FigureXXX_Cutoff.pdf", sep = ""), width = 3.8, height = 4)
par(mgp = c(2,0.5,0), mar = c(5,4,4,2)-c(2,1.1,3.5,1.5)+.1)
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, cex.axis = .8, legend = c("CMV infection", "Censoring"), vline = vline_pp65, vrange = vrange_pp65, log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594 (0.09053199)
dev.off()
#
pdf(paste(output.folder, "FigureXXXXX_Validation.pdf", sep = ""), width = 3.8, height = 4)
par(mgp = c(2,0.5,0), mar = c(5,4,4,2)-c(2,1.1,3.5,1.5)+.1)
plot.concordance(y = Surv((datval$cmv_freedays-datval$Total_prophy_days), datval$CMVstatus), fitt = Xb.val, pch = c(pch1, pch2), col = col.final, cex.axis = .8, legend = c("CMV infection", "Censoring"), vline = vline_pp65, vrange = vrange_pp65, log.y=F)
funconc(time = (datval$cmv_freedays-datval$Total_prophy_days), status = datval$CMVstatus, score = Xb.val) #0.9230769 for both before and after data updates
dev.off()
#######
######## perform bootstrap analysis for the basic.pp65 model
set.seed(1001); boot.basic.pp65 = run.boot(x = LR.basic.pp65, y = Y, B = 500, maxit = 1000000)
# This is the final figure (will be manually renamed as FIGURE XXXX)
my.dendro(boot.basic.pp65, freq.th = 50, varnames = combi_names_long(gsub("pp65/", "", colnames(LR.basic.pp65))), names.finalcoef = combi_names_long(gsub("pp65/", "", names(finalcoef))), plot.name = "basic_pp65_fullname", horiz = T, longwidth = 6, shortwidth = 4, horizmar = c(0,0,0,17)+.1, cex = 1, B = 500, grid = F, height = F, col.pos = "red", plotmath = T)
##### Power analysis and sample size determination for CTOT proposal
# estimated hazard ratio with best cutoff point
cox_pp65 = coxph(Y ~ I(fit.basic.pp65$fitted > cutoff_pp65_best))
hr_pp65 = exp(coef(cox_pp65)) # estimated hazard ratio
#after data updates: 28.36322 #before data updates: 30.30421
ci_pp65 = exp(confint(cox_pp65))
# after data updates:
# 2.5 % 97.5 %
# 3.522205 228.4002
# before data updates:
# 2.5 % 97.5 %
# 3.849437 238.5661
cutoff_CD8IFNg = find.cutoff(pred = fit.CD8IFNg$fitted, label = Y[, "status"], time = Y[,"time"], type.measure.cutoff = "concordance", best.only = FALSE)
# best cutoff log risk = 0.3577959 # before data updates: 0.3625796
cutoff_reliableHR_CD8IFNg = sort(fit.CD8IFNg$fitted)[match(1, Y[order(fit.CD8IFNg$fitted), "status"])]
cutoff_CD8IFNg_best = max(cutoff_CD8IFNg$Best, cutoff_reliableHR_CD8IFNg)
cox_CD8IFNg = coxph(Y ~ I(fit.CD8IFNg$fitted > cutoff_CD8IFNg_best))
hr_CD8IFNg = exp(coef(cox_CD8IFNg)) # estimated hazard ratio
# after data updates: 4.135228 # before data updates: 2.589347
ci_CD8IFNg = exp(confint(cox_CD8IFNg))
# after data updates:
# 2.5 % 97.5 %
# 1.295074 13.20397
# before data updates:
# 2.5 % 97.5 %
#0.777368 8.624896
library(xtable)
#6-month mortality
table(upper.group = fit.basic.pp65$fitted > cutoff_pp65_best, CMV.in6mon = Y[, "status"] == 1 & Y[, "time"] < 180)
# CMV.in6mon
# upper.group FALSE TRUE
# FALSE 25 1
# TRUE 8 10
#table(upper.group = fit.basic.pp65$fitted > cutoff_pp65_best, CMV.status = Y[, "status"] == 1)
cens.obs = which( Y[, "status"] == 0 & Y[, "time"] < 180 )
table(upper.group = fit.basic.pp65$fitted[-cens.obs] > cutoff_pp65_best, CMV.in6mon = Y[-cens.obs, "status"] == 1 & Y[-cens.obs, "time"] < 180)
# CMV.in6mon
# upper.group FALSE TRUE
# FALSE 23 1
# TRUE 6 10
#table(upper.group = fit.basic.pp65$fitted[-cens.obs] > cutoff_pp65_best, CMV.status = Y[-cens.obs, "status"] == 1)
#after data updates:
#10/18 [1] 0.5555556
#10/16 [1] 0.625
# 18/44 [1] 0.4090909
# 16/40 [1] 0.4
#before data updates:
#10/18 [1] 0.5555556
#10/13 [1] 0.7692308
##### upper.group: control group
##### lower.group: intervention group (reduced mortality)
# compute % reduction in mc (= tref-year mortality in ctl) given mc, and hr (of trt to ctl)
compute.r = function(hr = 1/5, mc = .6) ((1-mc)^hr - (1-mc))/mc * 100
morts = 0.6 # observed 6-month mortality in upper.group = 0.56 ~ 0.63 # before data updates: 0.56 ~ 0.77
hratios = c(5, 10, 30, 60) # hr of upper to lower = 28.36322 with a 95% CI (3.522205, 228.4002) #before data updates: 30.3 with a 95% CI (3.8, 238.6)
mylty = c(2,3,1,4); mycol = c("black", "black", "red", "black")
reds = compute.r(1/hratios) # % reduction in mc by intervention
n = seq(20, 70, .1)
nc.props = seq(20, 50, 10)/100 # observed upper group 18/44 = 0.4090909
mycex = 1
for (nc.prop in nc.props) {
pdf(paste(output.folder, "power_highriskgroup_prop", round(nc.prop*100), "_mortality", round(morts*100),".pdf", sep = ""), width = 4, height = 4)
#op = par(mfrow = c(2, 2), oma = c(3,0,1,0), mar = c(4,3,3,1)) # number of plots per page = length of morts
op = par(mar = par("mar")-c(1,0,3,1))
for (mort in morts) {
plot(0, 0, xlim=range(n), ylim=c(.5,1),
xlab="Total sample size",
ylab="Power", type="n", cex = mycex)
# title(paste("6-month CMV in high-risk group ", round(mort*100), "%", sep = ""))
for (i in seq_along(reds)) {
power = sapply(n, function(n) cpower(tref = .5, mc = mort, r = reds[i], accrual = .5, tmin = .5, nc = nc.prop*n, ni = (1-nc.prop)*n, pr = FALSE))
lines(n, power, lty = mylty[i], col = mycol[i])
}
#abline(h=c(0.8,0.9,0.95), col = "grey")
#points(c(27.57131, 36.91024, 45.64754), c(.8,.9,.95))
legend("bottomright", legend = paste("hazard ratio =", hratios), text.col = mycol, lty = mylty, col = mycol, cex = mycex)
}
#mtitle(paste("High-risk group sample size ", round(nc.prop*100), "%", sep = ""), ll=paste(" alpha=.05, 2-tailed"), cex.l=1, cex = 1.5)
par(op)
dev.off()
}
red = reds[3] # from hratio=30
nc.prop = .4
mort = .6
cbind(power=c(.8,.9,.95), samplesize=sapply(c(.8,.9,.95), function(power) uniroot(function(x) cpower(tref=.5, mc=mort, r=red, accrual=.5, tmin=.5, nc = nc.prop*x, ni = (1-nc.prop)*x, pr=FALSE) - power, c(1,40000))$root))
# power samplesize
# [1,] 0.80 27.57131
# [2,] 0.90 36.91024
# [3,] 0.95 45.64754
red = reds[1] # from hratio=5
nc.prop = .4
mort = .6
cbind(power=c(.8,.9,.95), samplesize=sapply(c(.8,.9,.95), function(power) uniroot(function(x) cpower(tref=.5, mc=mort, r=red, accrual=.5, tmin=.5, nc = nc.prop*x, ni = (1-nc.prop)*x, pr=FALSE) - power, c(1,40000))$root))
# power samplesize
# [1,] 0.80 31.36941
# [2,] 0.90 41.99483
# [3,] 0.95 51.93573
|
/R_main_CTOT.R
|
no_license
|
dkwon/CTOT
|
R
| false
| false
| 24,298
|
r
|
library(survival) # for survival analysis
library(glmnet) # for LASSO regularization
#library(ROCR) # for ROC analysis
#library(gpclib) # for plotting confidence intervals for x and y by calculating intersection of two polygons
library(Hmisc) # c-index
library(plyr) # data manipulation
output.folder = "./outtemp/" # name of the folder where plots and tables will be saved (use "" for current folder, or use e.g., "./foldername/")
source("R_myfunctions.R") # functions
############
# DATA
############
# log ratios (stim to costim) of relative frequencies
#dat = read.csv("CMVdata_44subjects_log2ratio.csv", check.names = FALSE)
dat = read.csv("CMVdata_44subjects_log2ratio_UPDATED.csv", check.names = FALSE) # data updates: the 32 patients originally with CMVstutus=0 in the 44 original cohort had been updated to new censor date or death.
rownames(dat) = dat[,1]
dat = as.matrix(dat[,-1])
# same for relative frequencies: will be used only for descriptive analysis
dat.RF = read.csv("CMVdata_44subjects_relfreq_UPDATED.csv", check.names = FALSE) # data updates: the 32 patients originally with CMVstutus=0 in the 44 original cohort had been updated to new censor date or death.
rownames(dat.RF) = dat.RF[,1]
dat.RF = as.matrix(dat.RF[,-1])
# validation cohort (previously on prophy but got off prophy after updates)
datvalid = read.csv("CMVdata_validation18_log2ratio.csv", check.names = F, head = T)
head(datvalid[,1:5])
##################
######## Descriptive analysis for log ratios of relative frequencies
######## survival outcome matrix Y
colnames(dat)[1:2] # [1] "offprophyCMVfreedays" "CMVstatus"
Y = Surv(dat[,1], dat[,2])
## Median (off-prophylaxis) follow-up time among censored
summary(Y[Y[,2]==0,1])
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 107.0 350.8 534.0 597.3 781.5 1761.0
## Median (off-prophylaxis) follow-up time
plot(survfit(Surv(Y[,1],1-Y[,2]) ~ 1))
survfit(Surv(Y[,1],1-Y[,2]) ~ 1) # reverse Kaplan-Meier estimate
# same as summary(survfit(Surv(Y[,1],1-Y[,2]) ~ 1))$table[5]
# records n.max n.start events median 0.95LCL 0.95UCL
# 44 44 44 32 539 502 777
######### repeat the same thing for the validation cohort of 18 patients
## Median (off-prophylaxis) follow-up time among censored n=15
summary((datvalid$cmv_freedays - datvalid$Total_prophy_days)[datvalid$CMVstatus==0])
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1.0 85.5 154.0 156.0 207.5 373.0
## Median (off-prophylaxis) follow-up time
plot(survfit(Surv(datvalid$cmv_freedays - datvalid$Total_prophy_days,1-datvalid$CMVstatus) ~ 1))
survfit(Surv(datvalid$cmv_freedays - datvalid$Total_prophy_days,1-datvalid$CMVstatus) ~ 1) # reverse Kaplan-Meier estimate
# same as summary(survfit(Surv(datvalid$cmv_freedays - datvalid$Total_prophy_days,1-datvalid$CMVstatus) ~ 1))$table[5]
# records n.max n.start events median 0.95LCL 0.95UCL
# 18 18 18 15 154 151 274
######## univariate analysis - concordance and score test
(conc.pv.44 <- t(sapply(data.frame(dat[, -c(1:2)], check.names = F), function(x) {s = summary(coxph(Y ~ x)); c(s$concordance, sctest = s$sctest[c("df", "pvalue")])})))[order(conc.pv.44[,4]),]
as.matrix((conc.pv2.44 <- sapply(data.frame(dat[, -c(1:2)], check.names = F), function(x) wilcox.test(x~Y[,2])$p.))[order(conc.pv2.44)])
conc.pv.44[order(conc.pv.44[,4]),]
# write.csv(conc.pv.44[order(conc.pv.44[,4]),], file = "table_univariate.csv", row.names = T)
##################################
######## Descriptive analysis using relative frequencies (not log ratios of relative frequencies)
RF.basic = dat.RF[, 5:68] # 64 cell subsets
RF.basic.ie1 = RF.basic[, 1:32] # 32 cell subsets
RF.basic.pp65 = RF.basic[, 33:64] # 32 cell subsets
######### bar plots for CMV-/CMV+ ratio (within 6 month)
is.case = Y[,"status"]==1 & Y[,"time"]<180
cbind(Y)[order(Y[,"time"]),]
#: four of the 32 CMV- patients have follow-up time < 180 days (107, 124, 125, 129 days)
#: one of the 12 CMV+ patient has time-to-event = 494
which.case = which(is.case==1)
which.control = which(is.case==0)
RF.basic.colMeans = ddply(as.data.frame(RF.basic), .(is.case), colMeans) #column 1: is.case (levels 1=FALSE, 2=TRUE)
RF.basic.diff = log(RF.basic.colMeans[1, -1])-log(RF.basic.colMeans[2, -1])
diff.CD4.ie1 = RF.basic.diff[, grep("IE-1/CD4/", colnames(RF.basic.diff))]
diff.CD4.pp65 = RF.basic.diff[, grep("pp65/CD4/", colnames(RF.basic.diff))]
diff.CD8.ie1 = RF.basic.diff[, grep("IE-1/CD8/", colnames(RF.basic.diff))]
diff.CD8.pp65 = RF.basic.diff[, grep("pp65/CD8/", colnames(RF.basic.diff))]
colnames(diff.CD4.ie1) = gsub(".+/CD(4|8)/", "", colnames(diff.CD4.ie1))
colnames(diff.CD4.pp65) = gsub(".+/CD(4|8)/", "", colnames(diff.CD4.pp65))
colnames(diff.CD8.ie1) = gsub(".+/CD(4|8)/", "", colnames(diff.CD8.ie1))
colnames(diff.CD8.pp65) = gsub(".+/CD(4|8)/", "", colnames(diff.CD8.pp65))
combinations.ordered = c("C+I-2-T-", "C-I+2-T-", "C-I-2+T-", "C-I-2-T+", "C+I+2-T-", "C+I-2+T-", "C+I-2-T+", "C-I+2+T-", "C-I+2-T+", "C-I-2+T+", "C+I+2+T-", "C+I+2-T+", "C+I-2+T+", "C-I+2+T+", "C+I+2+T+")
combi_names_long = function(nm) { #switch from short names C+I+2+T+ to long names CD107+INFgamma+IL2+TNFalpha+
nm = gsub("C\\+", "CD107\\+", nm)
nm = gsub("C\\-", "CD107\\-", nm)
nm = gsub("I", "INF*gamma", nm)
nm = gsub("2", "IL2", nm)
nm = gsub("T", "TNF*alpha", nm)
nm = gsub("$", "phantom()", nm)
nm
}
combinations.ordered.long = combi_names_long(combinations.ordered)
##### bar plots
col1="darkgreen"; col2="blue"; col3 = "orange"; col4 = "red"; col.ordered = rep(c(col1, col2, col3, col4), c(4, 6, 4, 1))
NAMES.ARG = parse(text = combinations.ordered.long)
NAMES.ARG.space = parse(text = gsub("phantom\\(\\)", "phantom(00)", combinations.ordered.long))
lab.ratio = "Ratio of mean relative frequencies\n (CMV within 6 months to no CMV)"
barplot_ratio.3 = function(x, YAXT = T, XLAB = lab.ratio, CEX = .9) { # horizontal
x = rev(x)
NAMES.ARG = rev(NAMES.ARG.space)
col.ordered = rev(col.ordered)
bar = barplot(x, names.arg = NAMES.ARG, las = 2, xaxt = "n", yaxt = "n", col = col.ordered, space = .3, xlab = XLAB, horiz = T)
abline(v = 1, lty = 2)
axis(1, tck = 0.02)
if (YAXT) text(0, bar, labels = NAMES.ARG, cex = CEX, xpd = NA, adj = 1)
#mtext(side = 2, line = 1, xpd = NA, at = bar)
}
pdf(file = paste(output.folder, "FigureX_ratio_of_meanRFs_pp65ANDie1_horizontal.pdf", sep = ""), width = 8, height = 8) # horizontal
par(mfrow = c(2, 2), mgp = c(1.5,0.2,0), mar = c(5,4,4,2)-c(2,4,1.5,1.5)+.1, oma = c(0,10.5,0,0))
barplot_ratio(c(t(exp(diff.CD4.pp65[, combinations.ordered]))), XLAB = "")
title(main = "CD4+ pp65 stimulation", xlab = "CMV-/CMV+ ratio")
barplot_ratio(c(t(exp(diff.CD8.pp65[, combinations.ordered]))), XLAB = "", YAXT = F)
title(main = "CD8+ pp65 stimulation", xlab = "CMV-/CMV+ ratio")
barplot_ratio(c(t(exp(diff.CD4.ie1[, combinations.ordered]))), XLAB = "")
title(main = "CD4+ IE-1 stimulation", xlab = "CMV-/CMV+ ratio")
barplot_ratio(c(t(exp(diff.CD8.ie1[, combinations.ordered]))), XLAB = "", YAXT = F)
title(main = "CD8+ IE-1 stimulation", xlab = "CMV-/CMV+ ratio")
dev.off()
#########
######################
######## log ratio variables for main analysis
######## split into groups (basic, basic.ie1, ..., maturational, ...)
# log (base 2) ratios for CD8/IFNg
LR.CD8IFNg = dat[, 3:4] # 2 cell subsets (ie1 and pp65)
# log (base 2) ratios for basic cell subsets
LR.basic = dat[, 5:68] # 64 cell subsets
LR.basic.ie1 = LR.basic[, 1:32] # 32 cell subsets
LR.basic.pp65 = LR.basic[, 33:64] # 32 cell subsets
# log (base 2) ratios for maturational cell subsets
LR.matu = dat[, 69:388] # 320 cell subsets
LR.matu.ie1 = LR.matu[, 1:160] # 160 cell subsets
LR.matu.pp65 = LR.matu[, 161:320] # 160 cell subsets
################
# MAIN ANALYSIS
################
######## fit the Cox model with adaptive LASSO
# fold id for leave-one-out cross-validation for tuning regularization parameters:
foldid.loo = seq(nrow(LR.basic)) # or 1:44
# fit the model, print coefficients, and save log risk score plot (as "plot_logriskscore_....pdf"):
family = "cox"
fit.CD8IFNg = fit.finalmodel(x = LR.CD8IFNg, y = Y, family = family, plot.name = "CD8IFNg", nopenalty = TRUE, foldid.list = list(foldid.loo))
fit.CD8IFNg.ie1 = fit.finalmodel(x = LR.CD8IFNg[,"IE-1/CD8/IFNg", drop = F], y = Y, family = family, plot.name = "CD8IFNg.ie1", nopenalty = TRUE, foldid.list = list(foldid.loo))
fit.basic = fit.finalmodel(x = LR.basic, y = Y, family = family, plot.name = "basic", foldid.list = list(foldid.loo))
fit.basic.ie1 = fit.finalmodel(x = LR.basic.ie1, y = Y, family = family, plot.name = "basic_ie1", foldid.list = list(foldid.loo))
fit.basic.pp65 = fit.finalmodel(x = LR.basic.pp65, y = Y, family = family, plot.name = "basic_pp65_vline", foldid.list = list(foldid.loo), vline = -1.126087)
fit.matu = fit.finalmodel(x = LR.matu, y = Y, family = family, plot.name = "maturational", foldid.list = list(foldid.loo))
fit.matu.ie1 = fit.finalmodel(x = LR.matu.ie1, y = Y, family = family, plot.name = "maturational_ie1", foldid.list = list(foldid.loo))
fit.matu.pp65 = fit.finalmodel(x = LR.matu.pp65, y = Y, family = family, plot.name = "maturational_pp65", foldid.list = list(foldid.loo))
######## find best cutoff for the basic pp65 model
cutoff_pp65 = find.cutoff(pred = fit.basic.pp65$fitted, label = Y[, "status"], time = Y[,"time"], type.measure.cutoff = "concordance", best.only = FALSE)
# : best cutoff log risk = -1.192406 (c-index=0.8388626 with SE=0.07128725)# with unupdated data: -1.126087
cutoff_reliableHR_pp65 = sort(fit.basic.pp65$fitted)[match(1, Y[order(fit.basic.pp65$fitted), "status"])]
# : the smallest cutoff for which both (high- and low- risk) groups have at least one event
# (otherwise, HR estimate is unreliable)
# = -1.177128 # with unupdated data: -1.116892
cutoff_pp65_best = max(cutoff_pp65$Best, cutoff_reliableHR_pp65)
# : # = -1.177128 # with unupdated data: -1.116892
cutoff_pp65$All[cutoff_pp65$All[,2] > cutoff_pp65$All[25,2] - cutoff_pp65$All[25,3],]
# with updated data: ll=-1.37818283, ul=-0.09193318
# with unupdated data: ll=-1.3480484, ul=-0.1073164
### vertical line(s) to be placed in plots
#after data updates:
vline_pp65 = mean(c(-1.19240577, -1.17712796))
vrange_pp65 = c(mean(c(-1.37818283, -1.35583526)), mean(c(-0.09193318, 0.01816602)))
# vline_pp65 = -1.192406
# vrange_pp65 = c(-1.37818283, -0.09193318)
# #before data updates:
# vline_pp65 = mean(c(-1.126087088, -1.116891907))
# vrange_pp65 = c(mean(c(-1.348048426, -1.313782273)), mean(c(-0.107316396, -0.009552492)))
# # vline_pp65 = -1.126087088
# # vrange_pp65 = c(-1.348048426, -0.107316396)
########## coefficients and relative importance for fit.basic.pp65 (Table X)
finalcoef = fit.basic.pp65$coefficients
finalcoef.scale = scale(LR.basic.pp65[, names(finalcoef)])
finalcoef.adj = finalcoef*attr(finalcoef.scale, "scaled:scale")
finalcoef.adj.perc = abs(finalcoef.adj)/(max(abs(finalcoef.adj))) * 100
cbind(coef = as.vector(finalcoef[order(-abs(finalcoef*attr(finalcoef.scale, "scaled:scale")))]), rel.imp = finalcoef.adj.perc[order(-finalcoef.adj.perc)])
########### Validation data of 18 patients (medium risk, no history CMV (same characteristics as original 44 but independent of original 44)
datval = datvalid
Xb.val = as.matrix(datval[, names(finalcoef)]) %*% finalcoef
pdf(paste(output.folder, "Rplot_validation18.pdf", sep = ""))
op = par(mar = par("mar")-c(0,0,3,0))
plot.concordance(y = Surv((datval$cmv_freedays-datval$Total_prophy_days), datval$CMVstatus), fitt = Xb.val, pch = c(pch1, pch2), col = c(col1.heavy, col2.heavy), legend = c("CMV infection", "Censoring"), log.y=F)
par(op)
dev.off()
# c-index 0.88 (with original 17) 0.9230769 (with 18)
funconc(time = (datval$cmv_freedays-datval$Total_prophy_days), status = datval$CMVstatus, score = Xb.val, more = T)
######## perform 10 x stratified 5-fold cross-validation
family = "cox"
set.seed(100);cv.CD8IFNg = run.cv(x = LR.CD8IFNg, y = Y, family = family, nopenalty = TRUE, nrepeat = 10, nfolds = 5)
set.seed(108);cv.CD8IFNg.ie1 = run.cv(x = LR.CD8IFNg[,1,drop = F], y = Y, family = family, nopenalty = TRUE, nrepeat = 10, nfolds = 5)
set.seed(101);cv.basic = run.cv(x = LR.basic, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(102);cv.basic.ie1 = run.cv(x = LR.basic.ie1, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(103);cv.basic.pp65 = run.cv(x = LR.basic.pp65, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(104);cv.matu = run.cv(x = LR.matu, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(105);cv.matu.ie1 = run.cv(x = LR.matu.ie1, y = Y, family = family, nrepeat = 10, nfolds = 5)
set.seed(106);cv.matu.pp65 = run.cv(x = LR.matu.pp65, y = Y, family = family, nrepeat = 10, nfolds = 5)
# resubstitution c-index etc (values saved as "table_resubstitution....txt")
my.perf(fit.object.list = list(fit.CD8IFNg.ie1, fit.CD8IFNg, fit.basic, fit.basic.ie1, fit.basic.pp65, fit.matu, fit.matu.ie1, fit.matu.pp65),
fit.name.list = list("CD8 IFNg IE-1", "CD8 IFNg", "basic", "basic IE-1", "basic pp65", "maturational", "maturational IE-1", "maturational pp65"),
prefix = "resubstitution_updated_", type.response = "time-to-event", label = Y[,"status"], timelabel = Y[, "time"], is.cv = F, plot.se = F)
# average cross-validation c-index (values saved as "table_cv_....txt", a plot saved as "plot_ROC_...pdf")
set.seed(100);my.perf(fit.object.list = list(cv.CD8IFNg, cv.basic, cv.basic.ie1, cv.basic.pp65, cv.matu, cv.matu.ie1, cv.matu.pp65),
fit.name.list = list("CD8 IFNg", "basic", "basic IE-1", "basic pp65", "maturational", "maturational IE-1", "maturational pp65"),
prefix = "cv_updated_", type.response = "time-to-event", label = Y[,"status"], timelabel = Y[, "time"], is.cv = T, plot.se = F)
set.seed(200);my.perf(fit.object.list = list(cv.CD8IFNg.ie1),
fit.name.list = list("CD8 IFNg IE-1"),
prefix = "cv_updated_", type.response = "time-to-event", label = Y[,"status"], timelabel = Y[, "time"], is.cv = T, plot.se = F)
######## Summary (c-index and plots) for paper submission (basic_pp65 for original data, mock-Quantiferon for original data, and basic_pp65 for validation data)
col.final = c("red", "blue") # c(col1.heavy, col2.heavy)
pdf(paste(output.folder, "Figure_basic_pp65_NOLINE.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594 (0.09053199)
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_basic_pp65_CONSERVATIVE.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), vline = vrange_pp65[1], log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594 (0.09053199)
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_basic_pp65_BEST.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), vline = vline_pp65, log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_basic_pp65_RANGE.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), vline = vline_pp65, vrange = vrange_pp65, log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_CD8_INFg.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.CD8IFNg$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.CD8IFNg$fitted, more = T) #0.5829384 (0.0870618) # before updates: 0.5940054 (0.09053199)
par(op)
dev.off()
#
pdf(paste(output.folder, "Figure_CD8_INFg_IE1.pdf", sep = ""), width = 5, height = 5)
op = par(mar = par("mar")-c(1,0,3,0))
plot.concordance(y = Y, fitt = fit.CD8IFNg.ie1$fitted, pch = c(pch1, pch2), col = col.final, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.CD8IFNg.ie1$fitted, more = T) #0.5924171 (0.0870618)
par(op)
dev.off()
######
pdf(paste(output.folder, "FigureXX_Comparison.pdf", sep = ""), width = 7, height = 4)
par(mfrow = c(1, 2), mgp = c(2,0.5,0), mar = c(5,4,4,2)-c(2,4,3.5,1.5)+.1, oma = c(0,3,0,0), xpd = NA)
#par(mfrow = c(1,2), mar = c(5,4,4,2)-c(1,0,3,0)+.1)#, mar = c(5,4,4,2)-c(1,0,3,0)+.1)
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, cex.axis = .8, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594 (0.09053199)
plot.concordance(y = Y, fitt = fit.CD8IFNg$fitted, ylab = "", pch = c(pch1, pch2), col = col.final, yaxt = F, cex.axis = .8, legend = c("CMV infection", "Censoring"), log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.CD8IFNg$fitted, more = T) #0.5829384 (0.0870618) # before updates: 0.5940054 (0.09053199)
dev.off()
#
pdf(paste(output.folder, "FigureXXX_Cutoff.pdf", sep = ""), width = 3.8, height = 4)
par(mgp = c(2,0.5,0), mar = c(5,4,4,2)-c(2,1.1,3.5,1.5)+.1)
plot.concordance(y = Y, fitt = fit.basic.pp65$fitted, pch = c(pch1, pch2), col = col.final, cex.axis = .8, legend = c("CMV infection", "Censoring"), vline = vline_pp65, vrange = vrange_pp65, log.y=T)
funconc(time = Y[,1], status = Y[,2], score = fit.basic.pp65$fitted, more = T) #0.8791469 (0.0870618) # before updates: 0.8746594 (0.09053199)
dev.off()
#
pdf(paste(output.folder, "FigureXXXXX_Validation.pdf", sep = ""), width = 3.8, height = 4)
par(mgp = c(2,0.5,0), mar = c(5,4,4,2)-c(2,1.1,3.5,1.5)+.1)
plot.concordance(y = Surv((datval$cmv_freedays-datval$Total_prophy_days), datval$CMVstatus), fitt = Xb.val, pch = c(pch1, pch2), col = col.final, cex.axis = .8, legend = c("CMV infection", "Censoring"), vline = vline_pp65, vrange = vrange_pp65, log.y=F)
funconc(time = (datval$cmv_freedays-datval$Total_prophy_days), status = datval$CMVstatus, score = Xb.val) #0.9230769 for both before and after data updates
dev.off()
#######
######## perform bootstrap analysis for the basic.pp65 model
set.seed(1001); boot.basic.pp65 = run.boot(x = LR.basic.pp65, y = Y, B = 500, maxit = 1000000)
# This is the final figure (will be manually renamed as FIGURE XXXX)
my.dendro(boot.basic.pp65, freq.th = 50, varnames = combi_names_long(gsub("pp65/", "", colnames(LR.basic.pp65))), names.finalcoef = combi_names_long(gsub("pp65/", "", names(finalcoef))), plot.name = "basic_pp65_fullname", horiz = T, longwidth = 6, shortwidth = 4, horizmar = c(0,0,0,17)+.1, cex = 1, B = 500, grid = F, height = F, col.pos = "red", plotmath = T)
##### Power analysis and sample size determination for CTOT proposal
# estimated hazard ratio with best cutoff point
cox_pp65 = coxph(Y ~ I(fit.basic.pp65$fitted > cutoff_pp65_best))
hr_pp65 = exp(coef(cox_pp65)) # estimated hazard ratio
#after data updates: 28.36322 #before data updates: 30.30421
ci_pp65 = exp(confint(cox_pp65))
# after data updates:
# 2.5 % 97.5 %
# 3.522205 228.4002
# before data updates:
# 2.5 % 97.5 %
# 3.849437 238.5661
cutoff_CD8IFNg = find.cutoff(pred = fit.CD8IFNg$fitted, label = Y[, "status"], time = Y[,"time"], type.measure.cutoff = "concordance", best.only = FALSE)
# best cutoff log risk = 0.3577959 # before data updates: 0.3625796
cutoff_reliableHR_CD8IFNg = sort(fit.CD8IFNg$fitted)[match(1, Y[order(fit.CD8IFNg$fitted), "status"])]
cutoff_CD8IFNg_best = max(cutoff_CD8IFNg$Best, cutoff_reliableHR_CD8IFNg)
cox_CD8IFNg = coxph(Y ~ I(fit.CD8IFNg$fitted > cutoff_CD8IFNg_best))
hr_CD8IFNg = exp(coef(cox_CD8IFNg)) # estimated hazard ratio
# after data updates: 4.135228 # before data updates: 2.589347
ci_CD8IFNg = exp(confint(cox_CD8IFNg))
# after data updates:
# 2.5 % 97.5 %
# 1.295074 13.20397
# before data updates:
# 2.5 % 97.5 %
#0.777368 8.624896
library(xtable)
#6-month mortality
table(upper.group = fit.basic.pp65$fitted > cutoff_pp65_best, CMV.in6mon = Y[, "status"] == 1 & Y[, "time"] < 180)
# CMV.in6mon
# upper.group FALSE TRUE
# FALSE 25 1
# TRUE 8 10
#table(upper.group = fit.basic.pp65$fitted > cutoff_pp65_best, CMV.status = Y[, "status"] == 1)
cens.obs = which( Y[, "status"] == 0 & Y[, "time"] < 180 )
table(upper.group = fit.basic.pp65$fitted[-cens.obs] > cutoff_pp65_best, CMV.in6mon = Y[-cens.obs, "status"] == 1 & Y[-cens.obs, "time"] < 180)
# CMV.in6mon
# upper.group FALSE TRUE
# FALSE 23 1
# TRUE 6 10
#table(upper.group = fit.basic.pp65$fitted[-cens.obs] > cutoff_pp65_best, CMV.status = Y[-cens.obs, "status"] == 1)
#after data updates:
#10/18 [1] 0.5555556
#10/16 [1] 0.625
# 18/44 [1] 0.4090909
# 16/40 [1] 0.4
#before data updates:
#10/18 [1] 0.5555556
#10/13 [1] 0.7692308
##### upper.group: control group
##### lower.group: intervention group (reduced mortality)
# compute % reduction in mc (= tref-year mortality in ctl) given mc, and hr (of trt to ctl)
compute.r = function(hr = 1/5, mc = .6) ((1-mc)^hr - (1-mc))/mc * 100
morts = 0.6 # observed 6-month mortality in upper.group = 0.56 ~ 0.63 # before data updates: 0.56 ~ 0.77
hratios = c(5, 10, 30, 60) # hr of upper to lower = 28.36322 with a 95% CI (3.522205, 228.4002) #before data updates: 30.3 with a 95% CI (3.8, 238.6)
mylty = c(2,3,1,4); mycol = c("black", "black", "red", "black")
reds = compute.r(1/hratios) # % reduction in mc by intervention
n = seq(20, 70, .1)
nc.props = seq(20, 50, 10)/100 # observed upper group 18/44 = 0.4090909
mycex = 1
for (nc.prop in nc.props) {
pdf(paste(output.folder, "power_highriskgroup_prop", round(nc.prop*100), "_mortality", round(morts*100),".pdf", sep = ""), width = 4, height = 4)
#op = par(mfrow = c(2, 2), oma = c(3,0,1,0), mar = c(4,3,3,1)) # number of plots per page = length of morts
op = par(mar = par("mar")-c(1,0,3,1))
for (mort in morts) {
plot(0, 0, xlim=range(n), ylim=c(.5,1),
xlab="Total sample size",
ylab="Power", type="n", cex = mycex)
# title(paste("6-month CMV in high-risk group ", round(mort*100), "%", sep = ""))
for (i in seq_along(reds)) {
power = sapply(n, function(n) cpower(tref = .5, mc = mort, r = reds[i], accrual = .5, tmin = .5, nc = nc.prop*n, ni = (1-nc.prop)*n, pr = FALSE))
lines(n, power, lty = mylty[i], col = mycol[i])
}
#abline(h=c(0.8,0.9,0.95), col = "grey")
#points(c(27.57131, 36.91024, 45.64754), c(.8,.9,.95))
legend("bottomright", legend = paste("hazard ratio =", hratios), text.col = mycol, lty = mylty, col = mycol, cex = mycex)
}
#mtitle(paste("High-risk group sample size ", round(nc.prop*100), "%", sep = ""), ll=paste(" alpha=.05, 2-tailed"), cex.l=1, cex = 1.5)
par(op)
dev.off()
}
red = reds[3] # from hratio=30
nc.prop = .4
mort = .6
cbind(power=c(.8,.9,.95), samplesize=sapply(c(.8,.9,.95), function(power) uniroot(function(x) cpower(tref=.5, mc=mort, r=red, accrual=.5, tmin=.5, nc = nc.prop*x, ni = (1-nc.prop)*x, pr=FALSE) - power, c(1,40000))$root))
# power samplesize
# [1,] 0.80 27.57131
# [2,] 0.90 36.91024
# [3,] 0.95 45.64754
red = reds[1] # from hratio=5
nc.prop = .4
mort = .6
cbind(power=c(.8,.9,.95), samplesize=sapply(c(.8,.9,.95), function(power) uniroot(function(x) cpower(tref=.5, mc=mort, r=red, accrual=.5, tmin=.5, nc = nc.prop*x, ni = (1-nc.prop)*x, pr=FALSE) - power, c(1,40000))$root))
# power samplesize
# [1,] 0.80 31.36941
# [2,] 0.90 41.99483
# [3,] 0.95 51.93573
|
#' @title Generation of One Continuous Variable with a Mixture Distribution Using the Power Method Transformation
#'
#' @description This function simulates one continuous mixture variable. Mixture distributions describe random variables that
#' are drawn from more than one component distribution. For a random variable \eqn{Y_{mix}} from a finite continuous mixture
#' distribution with \eqn{k} components, the probability density function (PDF) can be described by:
#'
#' \deqn{h_Y(y) = \sum_{i=1}^{k} \pi_i f_{Yi}(y), \sum_{i=1}^{k} \pi_i = 1.}
#'
#' The \eqn{\pi_i} are mixing parameters which determine the weight of each component distribution \eqn{f_{Yi}(y)} in the overall
#' probability distribution. As long as each component has a valid PDF, the overall distribution \eqn{h_Y(y)} has a valid PDF.
#' The main assumption is statistical independence between the process of randomly selecting the component distribution and the
#' distributions themselves. Each component \eqn{Y_i} is generated using either Fleishman's third-order (\code{method} = "Fleishman",
#' \doi{10.1007/BF02293811}) or Headrick's fifth-order (\code{method} = "Polynomial",
#' \doi{10.1016/S0167-9473(02)00072-5}) power method transformation (PMT). It works by matching standardized
#' cumulants -- the first four (mean, variance, skew, and standardized kurtosis) for Fleishman's method, or the first six (mean,
#' variance, skew, standardized kurtosis, and standardized fifth and sixth cumulants) for Headrick's method. The transformation is
#' expressed as follows:
#'
#' \deqn{Y = c_0 + c_1 * Z + c_2 * Z^2 + c_3 * Z^3 + c_4 * Z^4 + c_5 * Z^5, Z \sim N(0,1),}
#'
#' where \eqn{c_4} and \eqn{c_5} both equal \eqn{0} for Fleishman's method. The real constants are calculated by \cr
#' \code{\link[SimMultiCorrData]{find_constants}}. These components are then transformed to the desired mixture variable using a
#' random multinomial variable generated based on the mixing probabilities. There are no parameter input checks in order to decrease
#' simulation time. All inputs should be checked prior to simulation with \code{\link[SimCorrMix]{validpar}}. Summaries for the
#' simulation results can be obtained with \code{\link[SimCorrMix]{summary_var}}.
#'
#' Mixture distributions provide a useful way for describing heterogeneity in a population, especially when an outcome is a
#' composite response from multiple sources. The vignette \bold{Variable Types} provides more information about simulation of mixture
#' variables and the required parameters. The vignette \bold{Expected Cumulants and Correlations for Continuous Mixture Variables}
#' gives the equations for the expected cumulants of a mixture variable. In addition, Headrick & Kowalchuk (2007,
#' \doi{10.1080/10629360600605065}) outlined a general method for comparing a simulated distribution \eqn{Y} to a given theoretical
#' distribution \eqn{Y^*}. These steps can be found in the \bold{Continuous Mixture Distributions} vignette.
#'
#' @section Overview of Simulation Process:
#' 1) A check is performed to see if any distributions are repeated within the parameter inputs, i.e. if the mixture variable
#' contains 2 components with the same standardized cumulants. These are noted so that the constants are only calculated once.
#'
#' 2) The constants are calculated for each component variable using \code{\link[SimMultiCorrData]{find_constants}}. If no
#' solutions are found that generate a valid power method PDF, the function will return constants that produce an invalid PDF
#' (or a stop error if no solutions can be found). Possible solutions include: 1) changing the seed, or 2) using a \code{mix_Six}
#' list with vectors of sixth cumulant correction values (if \code{method} = "Polynomial"). Errors regarding constant
#' calculation are the most probable cause of function failure.
#'
#' 3) A matrix \code{X_cont} of dim \code{n x length(mix_pis)} of standard normal variables is generated and singular-value decomposition is done to
#' remove any correlation. The \code{constants} are applied to \code{X_cont} to create the component variables \code{Y} with the desired distributions.
#'
#' 4) A random multinomial variable \code{M = rmultinom(n, size = 1, prob = mix_pis)} is generated using \code{stats::rmultinom}.
#' The continuous mixture variable \code{Y_mix} is created from the component variables \code{Y} based on this multinomial variable.
#' That is, if \code{M[i, k_i] = 1}, then \code{Y_mix[i] = Y[i, k_i]}. A location-scale transformation is done on \code{Y_mix} to give it mean \code{means} and variance \code{vars}.
#'
#' @section Reasons for Function Errors:
#' 1) The most likely cause for function errors is that no solutions to \code{\link[SimMultiCorrData]{fleish}} or
#' \code{\link[SimMultiCorrData]{poly}} converged when using \code{\link[SimMultiCorrData]{find_constants}}. If this happens,
#' the simulation will stop. It may help to first use \code{\link[SimMultiCorrData]{find_constants}} for each component variable to
#' determine if a sixth cumulant correction value is needed. The solutions can be used as starting values (see \code{cstart} below).
#' If the standardized cumulants are obtained from \code{calc_theory}, the user may need to use rounded values as inputs (i.e.
#' \code{skews = round(skews, 8)}). For example, in order to ensure that skew is exactly 0 for symmetric distributions.
#'
#' 2) The kurtosis may be outside the region of possible values. There is an associated lower boundary for kurtosis associated
#' with a given skew (for Fleishman's method) or skew and fifth and sixth cumulants (for Headrick's method). Use
#' \code{\link[SimMultiCorrData]{calc_lower_skurt}} to determine the boundary for a given set of cumulants.
#'
#' @param n the sample size (i.e. the length of the simulated variable; default = 10000)
#' @param method the method used to generate the component variables. "Fleishman" uses Fleishman's third-order polynomial transformation
#' and "Polynomial" uses Headrick's fifth-order transformation.
#' @param means mean for the mixture variable (default = 0)
#' @param vars variance for the mixture variable (default = 1)
#' @param mix_pis a vector of mixing probabilities that sum to 1 for the component distributions
#' @param mix_mus a vector of means for the component distributions
#' @param mix_sigmas a vector of standard deviations for the component distributions
#' @param mix_skews a vector of skew values for the component distributions
#' @param mix_skurts a vector of standardized kurtoses for the component distributions
#' @param mix_fifths a vector of standardized fifth cumulants for the component distributions; keep NULL if using \code{method} = "Fleishman"
#' to generate continuous variables
#' @param mix_sixths a vector of standardized sixth cumulants for the component distributions; keep NULL if using \code{method} = "Fleishman"
#' to generate continuous variables
#' @param mix_Six a list of vectors of sixth cumulant correction values for the component distributions of \eqn{Y_{mix}};
#' use \code{NULL} if no correction is desired for a given component; if no correction is desired for any component keep as
#' \code{mix_Six = list()} (not necessary for \code{method} = "Fleishman")
#' @param seed the seed value for random number generation (default = 1234)
#' @param cstart a list of length equal to the total number of mixture components containing initial values for root-solving
#' algorithm used in \code{\link[SimMultiCorrData]{find_constants}}. If user specified, each list element must be input as a matrix.
#' For \code{method} = "Fleishman", each should have 3 columns for \eqn{c_1, c_2, c_3};
#' for \code{method} = "Polynomial", each should have 5 columns for \eqn{c_1, c_2, c_3, c_4, c_5}. If no starting values are specified for
#' a given component, that list element should be \code{NULL}.
#' @param quiet if FALSE prints total simulation time
#' @import SimMultiCorrData
#' @importFrom stats cor dbeta dbinom dchisq density dexp df dgamma dlnorm dlogis dmultinom dnbinom dnorm dpois dt dunif dweibull ecdf
#' median pbeta pbinom pchisq pexp pf pgamma plnorm plogis pnbinom pnorm ppois pt punif pweibull qbeta qbinom qchisq qexp qf qgamma
#' qlnorm qlogis qnbinom qnorm qpois qt quantile qunif qweibull rbeta rbinom rchisq rexp rf rgamma rlnorm rlogis rmultinom rnbinom
#' rnorm rpois rt runif rweibull sd uniroot var
#' @import utils
#' @import BB
#' @import nleqslv
#' @export
#' @keywords simulation continuous mixture Fleishman Headrick
#' @seealso \code{\link[SimMultiCorrData]{find_constants}}, \code{\link[SimCorrMix]{validpar}}, \code{\link[SimCorrMix]{summary_var}}
#' @return A list with the following components:
#' @return \code{constants} a data.frame of the constants
#' @return \code{Y_comp} a data.frame of the components of the mixture variable
#' @return \code{Y_mix} a data.frame of the generated mixture variable
#' @return \code{sixth_correction} the sixth cumulant correction values for \code{Y_comp}
#' @return \code{valid.pdf} "TRUE" if constants generate a valid PDF, else "FALSE"
#' @return \code{Time} the total simulation time in minutes
#' @references See references for \code{\link[SimCorrMix]{SimCorrMix}}.
#'
#' @examples
#' # Mixture of Normal(-2, 1) and Normal(2, 1)
#' Nmix <- contmixvar1(n = 1000, "Polynomial", means = 0, vars = 1,
#' mix_pis = c(0.4, 0.6), mix_mus = c(-2, 2), mix_sigmas = c(1, 1),
#' mix_skews = c(0, 0), mix_skurts = c(0, 0), mix_fifths = c(0, 0),
#' mix_sixths = c(0, 0))
#' \dontrun{
#' # Mixture of Beta(6, 3), Beta(4, 1.5), and Beta(10, 20)
#' Stcum1 <- calc_theory("Beta", c(6, 3))
#' Stcum2 <- calc_theory("Beta", c(4, 1.5))
#' Stcum3 <- calc_theory("Beta", c(10, 20))
#' mix_pis <- c(0.5, 0.2, 0.3)
#' mix_mus <- c(Stcum1[1], Stcum2[1], Stcum3[1])
#' mix_sigmas <- c(Stcum1[2], Stcum2[2], Stcum3[2])
#' mix_skews <- c(Stcum1[3], Stcum2[3], Stcum3[3])
#' mix_skurts <- c(Stcum1[4], Stcum2[4], Stcum3[4])
#' mix_fifths <- c(Stcum1[5], Stcum2[5], Stcum3[5])
#' mix_sixths <- c(Stcum1[6], Stcum2[6], Stcum3[6])
#' mix_Six <- list(seq(0.01, 10, 0.01), c(0.01, 0.02, 0.03),
#' seq(0.01, 10, 0.01))
#' Bstcum <- calc_mixmoments(mix_pis, mix_mus, mix_sigmas, mix_skews,
#' mix_skurts, mix_fifths, mix_sixths)
#' Bmix <- contmixvar1(n = 10000, "Polynomial", Bstcum[1], Bstcum[2]^2,
#' mix_pis, mix_mus, mix_sigmas, mix_skews, mix_skurts, mix_fifths,
#' mix_sixths, mix_Six)
#' Bsum <- summary_var(Y_comp = Bmix$Y_comp, Y_mix = Bmix$Y_mix, means = means,
#' vars = vars, mix_pis = mix_pis, mix_mus = mix_mus,
#' mix_sigmas = mix_sigmas, mix_skews = mix_skews, mix_skurts = mix_skurts,
#' mix_fifths = mix_fifths, mix_sixths = mix_sixths)
#' }
contmixvar1 <- function(n = 10000, method = c("Fleishman", "Polynomial"),
means = 0, vars = 1, mix_pis = NULL, mix_mus = NULL,
mix_sigmas = NULL, mix_skews = NULL,
mix_skurts = NULL, mix_fifths = NULL,
mix_sixths = NULL, mix_Six = list(), seed = 1234,
cstart = list(), quiet = FALSE) {
start.time <- Sys.time()
csame.dist <- NULL
for (i in 2:length(mix_skews)) {
if (mix_skews[i] %in% mix_skews[1:(i - 1)]) {
csame <- which(mix_skews[1:(i - 1)] == mix_skews[i])
for (j in 1:length(csame)) {
if (method == "Polynomial") {
if ((mix_skurts[i] == mix_skurts[csame[j]]) &
(mix_fifths[i] == mix_fifths[csame[j]]) &
(mix_sixths[i] == mix_sixths[csame[j]])) {
csame.dist <- rbind(csame.dist, c(csame[j], i))
break
}
}
if (method == "Fleishman") {
if (mix_skurts[i] == mix_skurts[csame[j]]) {
csame.dist <- rbind(csame.dist, c(csame[j], i))
break
}
}
}
}
}
SixCorr <- numeric(length(mix_pis))
Valid.PDF <- numeric(length(mix_pis))
if (method == "Fleishman") {
constants <- matrix(NA, nrow = length(mix_pis), ncol = 4)
colnames(constants) <- c("c0", "c1", "c2", "c3")
}
if (method == "Polynomial") {
constants <- matrix(NA, nrow = length(mix_pis), ncol = 6)
colnames(constants) <- c("c0", "c1", "c2", "c3", "c4", "c5")
}
for (i in 1:length(mix_pis)) {
if (!is.null(csame.dist)) {
rind <- which(csame.dist[, 2] == i)
if (length(rind) > 0) {
constants[i, ] <- constants[csame.dist[rind, 1], ]
SixCorr[i] <- SixCorr[csame.dist[rind, 1]]
Valid.PDF[i] <- Valid.PDF[csame.dist[rind, 1]]
}
}
if (sum(is.na(constants[i, ])) > 0) {
if (length(mix_Six) == 0) Six2 <- NULL else
Six2 <- mix_Six[[i]]
if (length(cstart) == 0) cstart2 <- NULL else
cstart2 <- cstart[[i]]
cons <-
suppressWarnings(find_constants(method = method, skews = mix_skews[i],
skurts = mix_skurts[i], fifths = mix_fifths[i],
sixths = mix_sixths[i], Six = Six2, cstart = cstart2, n = 25,
seed = seed))
if (length(cons) == 1 | is.null(cons)) {
stop(paste("Constants can not be found for component ", i,
".", sep = ""))
}
con_solution <- cons$constants
SixCorr[i] <- ifelse(is.null(cons$SixCorr1), NA, cons$SixCorr1)
Valid.PDF[i] <- cons$valid
constants[i, ] <- con_solution
}
}
set.seed(seed)
X_cont <- matrix(rnorm(length(mix_pis) * n), n)
X_cont <- scale(X_cont, TRUE, FALSE)
X_cont <- X_cont %*% svd(X_cont, nu = 0)$v
X_cont <- scale(X_cont, FALSE, TRUE)
Y <- matrix(1, nrow = n, ncol = length(mix_pis))
Yb <- matrix(1, nrow = n, ncol = length(mix_pis))
for (i in 1:length(mix_pis)) {
if (method == "Fleishman") {
Y[, i] <- constants[i, 1] + constants[i, 2] * X_cont[, i] +
constants[i, 3] * X_cont[, i]^2 + constants[i, 4] * X_cont[, i]^3
}
if (method == "Polynomial") {
Y[, i] <- constants[i, 1] + constants[i, 2] * X_cont[, i] +
constants[i, 3] * X_cont[, i]^2 + constants[i, 4] * X_cont[, i]^3 +
constants[i, 5] * X_cont[, i]^4 + constants[i, 6] * X_cont[, i]^5
}
Yb[, i] <- mix_mus[i] + mix_sigmas[i] * Y[, i]
}
set.seed(seed)
M <- rmultinom(n, size = 1, prob = mix_pis)
Y_mix <- apply(t(M) * Yb, 1, sum)
Y_mix <- scale(Y_mix)
Y_mix <- matrix(means + sqrt(vars) * Y_mix, n, 1)
stop.time <- Sys.time()
Time <- round(difftime(stop.time, start.time, units = "min"), 3)
if (quiet == FALSE) cat("Total Simulation time:", Time, "minutes \n")
result <- list(constants = as.data.frame(constants),
Y_comp = Yb, Y_mix = Y_mix, sixth_correction = SixCorr,
valid.pdf = Valid.PDF, Time = Time)
result
}
|
/R/contmixvar1.R
|
no_license
|
minghao2016/SimCorrMix
|
R
| false
| false
| 14,942
|
r
|
#' @title Generation of One Continuous Variable with a Mixture Distribution Using the Power Method Transformation
#'
#' @description This function simulates one continuous mixture variable. Mixture distributions describe random variables that
#' are drawn from more than one component distribution. For a random variable \eqn{Y_{mix}} from a finite continuous mixture
#' distribution with \eqn{k} components, the probability density function (PDF) can be described by:
#'
#' \deqn{h_Y(y) = \sum_{i=1}^{k} \pi_i f_{Yi}(y), \sum_{i=1}^{k} \pi_i = 1.}
#'
#' The \eqn{\pi_i} are mixing parameters which determine the weight of each component distribution \eqn{f_{Yi}(y)} in the overall
#' probability distribution. As long as each component has a valid PDF, the overall distribution \eqn{h_Y(y)} has a valid PDF.
#' The main assumption is statistical independence between the process of randomly selecting the component distribution and the
#' distributions themselves. Each component \eqn{Y_i} is generated using either Fleishman's third-order (\code{method} = "Fleishman",
#' \doi{10.1007/BF02293811}) or Headrick's fifth-order (\code{method} = "Polynomial",
#' \doi{10.1016/S0167-9473(02)00072-5}) power method transformation (PMT). It works by matching standardized
#' cumulants -- the first four (mean, variance, skew, and standardized kurtosis) for Fleishman's method, or the first six (mean,
#' variance, skew, standardized kurtosis, and standardized fifth and sixth cumulants) for Headrick's method. The transformation is
#' expressed as follows:
#'
#' \deqn{Y = c_0 + c_1 * Z + c_2 * Z^2 + c_3 * Z^3 + c_4 * Z^4 + c_5 * Z^5, Z \sim N(0,1),}
#'
#' where \eqn{c_4} and \eqn{c_5} both equal \eqn{0} for Fleishman's method. The real constants are calculated by \cr
#' \code{\link[SimMultiCorrData]{find_constants}}. These components are then transformed to the desired mixture variable using a
#' random multinomial variable generated based on the mixing probabilities. There are no parameter input checks in order to decrease
#' simulation time. All inputs should be checked prior to simulation with \code{\link[SimCorrMix]{validpar}}. Summaries for the
#' simulation results can be obtained with \code{\link[SimCorrMix]{summary_var}}.
#'
#' Mixture distributions provide a useful way for describing heterogeneity in a population, especially when an outcome is a
#' composite response from multiple sources. The vignette \bold{Variable Types} provides more information about simulation of mixture
#' variables and the required parameters. The vignette \bold{Expected Cumulants and Correlations for Continuous Mixture Variables}
#' gives the equations for the expected cumulants of a mixture variable. In addition, Headrick & Kowalchuk (2007,
#' \doi{10.1080/10629360600605065}) outlined a general method for comparing a simulated distribution \eqn{Y} to a given theoretical
#' distribution \eqn{Y^*}. These steps can be found in the \bold{Continuous Mixture Distributions} vignette.
#'
#' @section Overview of Simulation Process:
#' 1) A check is performed to see if any distributions are repeated within the parameter inputs, i.e. if the mixture variable
#' contains 2 components with the same standardized cumulants. These are noted so that the constants are only calculated once.
#'
#' 2) The constants are calculated for each component variable using \code{\link[SimMultiCorrData]{find_constants}}. If no
#' solutions are found that generate a valid power method PDF, the function will return constants that produce an invalid PDF
#' (or a stop error if no solutions can be found). Possible solutions include: 1) changing the seed, or 2) using a \code{mix_Six}
#' list with vectors of sixth cumulant correction values (if \code{method} = "Polynomial"). Errors regarding constant
#' calculation are the most probable cause of function failure.
#'
#' 3) A matrix \code{X_cont} of dim \code{n x length(mix_pis)} of standard normal variables is generated and singular-value decomposition is done to
#' remove any correlation. The \code{constants} are applied to \code{X_cont} to create the component variables \code{Y} with the desired distributions.
#'
#' 4) A random multinomial variable \code{M = rmultinom(n, size = 1, prob = mix_pis)} is generated using \code{stats::rmultinom}.
#' The continuous mixture variable \code{Y_mix} is created from the component variables \code{Y} based on this multinomial variable.
#' That is, if \code{M[i, k_i] = 1}, then \code{Y_mix[i] = Y[i, k_i]}. A location-scale transformation is done on \code{Y_mix} to give it mean \code{means} and variance \code{vars}.
#'
#' @section Reasons for Function Errors:
#' 1) The most likely cause for function errors is that no solutions to \code{\link[SimMultiCorrData]{fleish}} or
#' \code{\link[SimMultiCorrData]{poly}} converged when using \code{\link[SimMultiCorrData]{find_constants}}. If this happens,
#' the simulation will stop. It may help to first use \code{\link[SimMultiCorrData]{find_constants}} for each component variable to
#' determine if a sixth cumulant correction value is needed. The solutions can be used as starting values (see \code{cstart} below).
#' If the standardized cumulants are obtained from \code{calc_theory}, the user may need to use rounded values as inputs (i.e.
#' \code{skews = round(skews, 8)}). For example, in order to ensure that skew is exactly 0 for symmetric distributions.
#'
#' 2) The kurtosis may be outside the region of possible values. There is an associated lower boundary for kurtosis associated
#' with a given skew (for Fleishman's method) or skew and fifth and sixth cumulants (for Headrick's method). Use
#' \code{\link[SimMultiCorrData]{calc_lower_skurt}} to determine the boundary for a given set of cumulants.
#'
#' @param n the sample size (i.e. the length of the simulated variable; default = 10000)
#' @param method the method used to generate the component variables. "Fleishman" uses Fleishman's third-order polynomial transformation
#' and "Polynomial" uses Headrick's fifth-order transformation.
#' @param means mean for the mixture variable (default = 0)
#' @param vars variance for the mixture variable (default = 1)
#' @param mix_pis a vector of mixing probabilities that sum to 1 for the component distributions
#' @param mix_mus a vector of means for the component distributions
#' @param mix_sigmas a vector of standard deviations for the component distributions
#' @param mix_skews a vector of skew values for the component distributions
#' @param mix_skurts a vector of standardized kurtoses for the component distributions
#' @param mix_fifths a vector of standardized fifth cumulants for the component distributions; keep NULL if using \code{method} = "Fleishman"
#' to generate continuous variables
#' @param mix_sixths a vector of standardized sixth cumulants for the component distributions; keep NULL if using \code{method} = "Fleishman"
#' to generate continuous variables
#' @param mix_Six a list of vectors of sixth cumulant correction values for the component distributions of \eqn{Y_{mix}};
#' use \code{NULL} if no correction is desired for a given component; if no correction is desired for any component keep as
#' \code{mix_Six = list()} (not necessary for \code{method} = "Fleishman")
#' @param seed the seed value for random number generation (default = 1234)
#' @param cstart a list of length equal to the total number of mixture components containing initial values for root-solving
#' algorithm used in \code{\link[SimMultiCorrData]{find_constants}}. If user specified, each list element must be input as a matrix.
#' For \code{method} = "Fleishman", each should have 3 columns for \eqn{c_1, c_2, c_3};
#' for \code{method} = "Polynomial", each should have 5 columns for \eqn{c_1, c_2, c_3, c_4, c_5}. If no starting values are specified for
#' a given component, that list element should be \code{NULL}.
#' @param quiet if FALSE prints total simulation time
#' @import SimMultiCorrData
#' @importFrom stats cor dbeta dbinom dchisq density dexp df dgamma dlnorm dlogis dmultinom dnbinom dnorm dpois dt dunif dweibull ecdf
#' median pbeta pbinom pchisq pexp pf pgamma plnorm plogis pnbinom pnorm ppois pt punif pweibull qbeta qbinom qchisq qexp qf qgamma
#' qlnorm qlogis qnbinom qnorm qpois qt quantile qunif qweibull rbeta rbinom rchisq rexp rf rgamma rlnorm rlogis rmultinom rnbinom
#' rnorm rpois rt runif rweibull sd uniroot var
#' @import utils
#' @import BB
#' @import nleqslv
#' @export
#' @keywords simulation continuous mixture Fleishman Headrick
#' @seealso \code{\link[SimMultiCorrData]{find_constants}}, \code{\link[SimCorrMix]{validpar}}, \code{\link[SimCorrMix]{summary_var}}
#' @return A list with the following components:
#' @return \code{constants} a data.frame of the constants
#' @return \code{Y_comp} a data.frame of the components of the mixture variable
#' @return \code{Y_mix} a data.frame of the generated mixture variable
#' @return \code{sixth_correction} the sixth cumulant correction values for \code{Y_comp}
#' @return \code{valid.pdf} "TRUE" if constants generate a valid PDF, else "FALSE"
#' @return \code{Time} the total simulation time in minutes
#' @references See references for \code{\link[SimCorrMix]{SimCorrMix}}.
#'
#' @examples
#' # Mixture of Normal(-2, 1) and Normal(2, 1)
#' Nmix <- contmixvar1(n = 1000, "Polynomial", means = 0, vars = 1,
#' mix_pis = c(0.4, 0.6), mix_mus = c(-2, 2), mix_sigmas = c(1, 1),
#' mix_skews = c(0, 0), mix_skurts = c(0, 0), mix_fifths = c(0, 0),
#' mix_sixths = c(0, 0))
#' \dontrun{
#' # Mixture of Beta(6, 3), Beta(4, 1.5), and Beta(10, 20)
#' Stcum1 <- calc_theory("Beta", c(6, 3))
#' Stcum2 <- calc_theory("Beta", c(4, 1.5))
#' Stcum3 <- calc_theory("Beta", c(10, 20))
#' mix_pis <- c(0.5, 0.2, 0.3)
#' mix_mus <- c(Stcum1[1], Stcum2[1], Stcum3[1])
#' mix_sigmas <- c(Stcum1[2], Stcum2[2], Stcum3[2])
#' mix_skews <- c(Stcum1[3], Stcum2[3], Stcum3[3])
#' mix_skurts <- c(Stcum1[4], Stcum2[4], Stcum3[4])
#' mix_fifths <- c(Stcum1[5], Stcum2[5], Stcum3[5])
#' mix_sixths <- c(Stcum1[6], Stcum2[6], Stcum3[6])
#' mix_Six <- list(seq(0.01, 10, 0.01), c(0.01, 0.02, 0.03),
#' seq(0.01, 10, 0.01))
#' Bstcum <- calc_mixmoments(mix_pis, mix_mus, mix_sigmas, mix_skews,
#' mix_skurts, mix_fifths, mix_sixths)
#' Bmix <- contmixvar1(n = 10000, "Polynomial", Bstcum[1], Bstcum[2]^2,
#' mix_pis, mix_mus, mix_sigmas, mix_skews, mix_skurts, mix_fifths,
#' mix_sixths, mix_Six)
#' Bsum <- summary_var(Y_comp = Bmix$Y_comp, Y_mix = Bmix$Y_mix, means = means,
#' vars = vars, mix_pis = mix_pis, mix_mus = mix_mus,
#' mix_sigmas = mix_sigmas, mix_skews = mix_skews, mix_skurts = mix_skurts,
#' mix_fifths = mix_fifths, mix_sixths = mix_sixths)
#' }
contmixvar1 <- function(n = 10000, method = c("Fleishman", "Polynomial"),
means = 0, vars = 1, mix_pis = NULL, mix_mus = NULL,
mix_sigmas = NULL, mix_skews = NULL,
mix_skurts = NULL, mix_fifths = NULL,
mix_sixths = NULL, mix_Six = list(), seed = 1234,
cstart = list(), quiet = FALSE) {
start.time <- Sys.time()
csame.dist <- NULL
for (i in 2:length(mix_skews)) {
if (mix_skews[i] %in% mix_skews[1:(i - 1)]) {
csame <- which(mix_skews[1:(i - 1)] == mix_skews[i])
for (j in 1:length(csame)) {
if (method == "Polynomial") {
if ((mix_skurts[i] == mix_skurts[csame[j]]) &
(mix_fifths[i] == mix_fifths[csame[j]]) &
(mix_sixths[i] == mix_sixths[csame[j]])) {
csame.dist <- rbind(csame.dist, c(csame[j], i))
break
}
}
if (method == "Fleishman") {
if (mix_skurts[i] == mix_skurts[csame[j]]) {
csame.dist <- rbind(csame.dist, c(csame[j], i))
break
}
}
}
}
}
SixCorr <- numeric(length(mix_pis))
Valid.PDF <- numeric(length(mix_pis))
if (method == "Fleishman") {
constants <- matrix(NA, nrow = length(mix_pis), ncol = 4)
colnames(constants) <- c("c0", "c1", "c2", "c3")
}
if (method == "Polynomial") {
constants <- matrix(NA, nrow = length(mix_pis), ncol = 6)
colnames(constants) <- c("c0", "c1", "c2", "c3", "c4", "c5")
}
for (i in 1:length(mix_pis)) {
if (!is.null(csame.dist)) {
rind <- which(csame.dist[, 2] == i)
if (length(rind) > 0) {
constants[i, ] <- constants[csame.dist[rind, 1], ]
SixCorr[i] <- SixCorr[csame.dist[rind, 1]]
Valid.PDF[i] <- Valid.PDF[csame.dist[rind, 1]]
}
}
if (sum(is.na(constants[i, ])) > 0) {
if (length(mix_Six) == 0) Six2 <- NULL else
Six2 <- mix_Six[[i]]
if (length(cstart) == 0) cstart2 <- NULL else
cstart2 <- cstart[[i]]
cons <-
suppressWarnings(find_constants(method = method, skews = mix_skews[i],
skurts = mix_skurts[i], fifths = mix_fifths[i],
sixths = mix_sixths[i], Six = Six2, cstart = cstart2, n = 25,
seed = seed))
if (length(cons) == 1 | is.null(cons)) {
stop(paste("Constants can not be found for component ", i,
".", sep = ""))
}
con_solution <- cons$constants
SixCorr[i] <- ifelse(is.null(cons$SixCorr1), NA, cons$SixCorr1)
Valid.PDF[i] <- cons$valid
constants[i, ] <- con_solution
}
}
set.seed(seed)
X_cont <- matrix(rnorm(length(mix_pis) * n), n)
X_cont <- scale(X_cont, TRUE, FALSE)
X_cont <- X_cont %*% svd(X_cont, nu = 0)$v
X_cont <- scale(X_cont, FALSE, TRUE)
Y <- matrix(1, nrow = n, ncol = length(mix_pis))
Yb <- matrix(1, nrow = n, ncol = length(mix_pis))
for (i in 1:length(mix_pis)) {
if (method == "Fleishman") {
Y[, i] <- constants[i, 1] + constants[i, 2] * X_cont[, i] +
constants[i, 3] * X_cont[, i]^2 + constants[i, 4] * X_cont[, i]^3
}
if (method == "Polynomial") {
Y[, i] <- constants[i, 1] + constants[i, 2] * X_cont[, i] +
constants[i, 3] * X_cont[, i]^2 + constants[i, 4] * X_cont[, i]^3 +
constants[i, 5] * X_cont[, i]^4 + constants[i, 6] * X_cont[, i]^5
}
Yb[, i] <- mix_mus[i] + mix_sigmas[i] * Y[, i]
}
set.seed(seed)
M <- rmultinom(n, size = 1, prob = mix_pis)
Y_mix <- apply(t(M) * Yb, 1, sum)
Y_mix <- scale(Y_mix)
Y_mix <- matrix(means + sqrt(vars) * Y_mix, n, 1)
stop.time <- Sys.time()
Time <- round(difftime(stop.time, start.time, units = "min"), 3)
if (quiet == FALSE) cat("Total Simulation time:", Time, "minutes \n")
result <- list(constants = as.data.frame(constants),
Y_comp = Yb, Y_mix = Y_mix, sixth_correction = SixCorr,
valid.pdf = Valid.PDF, Time = Time)
result
}
|
tag_count_file <- "/Users/jimmy.odonnell/Desktop/Analysis_20151013_1719/tag_count.txt"
tag_counts <- read.table(file = tag_count_file,
header = TRUE,
sep = " "
)
head(tag_counts)
# All combinations of primary and secondary index are considered.
# omit combinations which have a very low number of reads relative to other samples
# samples with fewer than this proportion of the mean number of reads will be excluded
lower_percent_threshold <- 0.05
low_frequency_samples <- which(tag_counts$left_tagged < mean(tag_counts$left_tagged)*lower_percent_threshold)
low_frequency_data <- tag_counts[low_frequency_samples, ]
tag_counts <- tag_counts[-low_frequency_samples,]
plot(
sort(tag_counts$left_tagged)
)
plot(
sort(
(tag_counts$left_tagged - tag_counts$right_tagged) / tag_counts$left_tagged
)
)
tag_rate <- tag_counts[, "right_tagged"] / tag_counts[,"left_tagged"]
boxplot(tag_rate, ylim = c(0, 1))
mean(tag_rate)
sd(tag_rate)
range(tag_rate)
nrow(tag_counts)
|
/beta/tag_counts.R
|
no_license
|
reikopm/mbonlive_banzai
|
R
| false
| false
| 1,065
|
r
|
tag_count_file <- "/Users/jimmy.odonnell/Desktop/Analysis_20151013_1719/tag_count.txt"
tag_counts <- read.table(file = tag_count_file,
header = TRUE,
sep = " "
)
head(tag_counts)
# All combinations of primary and secondary index are considered.
# omit combinations which have a very low number of reads relative to other samples
# samples with fewer than this proportion of the mean number of reads will be excluded
lower_percent_threshold <- 0.05
low_frequency_samples <- which(tag_counts$left_tagged < mean(tag_counts$left_tagged)*lower_percent_threshold)
low_frequency_data <- tag_counts[low_frequency_samples, ]
tag_counts <- tag_counts[-low_frequency_samples,]
plot(
sort(tag_counts$left_tagged)
)
plot(
sort(
(tag_counts$left_tagged - tag_counts$right_tagged) / tag_counts$left_tagged
)
)
tag_rate <- tag_counts[, "right_tagged"] / tag_counts[,"left_tagged"]
boxplot(tag_rate, ylim = c(0, 1))
mean(tag_rate)
sd(tag_rate)
range(tag_rate)
nrow(tag_counts)
|
install.packages("covid19.analytics")
library(covid19.analytics)
ag <-covid19.data(case = 'aggregated')
View(ag)
tsc<-covid19.data(case = 'ts-confirmed')
report.summary(Nentries = 10,graphical.output = F)
report.summary(Nentries = 10,graphical.output = T)
tots.per.location(tsc,geo.loc='US')
tots.per.location(tsc,geo.loc='TR')
tots.per.location(tsc,geo.loc='TURKEY')
live.map(tsc)
growth.rate(tsc, geo.loc = 'TURKEY')
generate.SIR.model(tsc, 'TURKEY')
|
/ts-Analitics.R
|
no_license
|
sametgumus212/coronavirus
|
R
| false
| false
| 466
|
r
|
install.packages("covid19.analytics")
library(covid19.analytics)
ag <-covid19.data(case = 'aggregated')
View(ag)
tsc<-covid19.data(case = 'ts-confirmed')
report.summary(Nentries = 10,graphical.output = F)
report.summary(Nentries = 10,graphical.output = T)
tots.per.location(tsc,geo.loc='US')
tots.per.location(tsc,geo.loc='TR')
tots.per.location(tsc,geo.loc='TURKEY')
live.map(tsc)
growth.rate(tsc, geo.loc = 'TURKEY')
generate.SIR.model(tsc, 'TURKEY')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modify.operators.R
\name{rSegregate}
\alias{rSegregate}
\title{Segregate values in a raster into layers}
\usage{
rSegregate(obj, by = NULL, flatten = FALSE, background = NULL)
}
\arguments{
\item{obj}{[\code{RasterLayer(1)}]\cr The object to modify.}
\item{by}{[\code{RasterLayer(1)} | \code{matrix(1)}]\cr additional object by
which \code{obj} should be segregated. If left empty, the distinct values
of \code{obj} will be taken.}
\item{flatten}{[\code{logical(1)}]\cr should all values be set to value 1
(\code{TRUE}) or should the original \code{obj} values be retained
(\code{FALSE}, default)?}
\item{background}{[\code{integerish(1)}]\cr the value any cell with value NA
should have.}
}
\value{
a \code{RasterStack} of the same dimensions as \code{obj}, in which
the elements specified in \code{by} or the distinct values of \code{obj}
have each been assigned to a seperate layer.
}
\description{
Distinct values in a raster will be assigned to layers in a raster stack.
}
\examples{
input <- rtData$continuous
patches <- rPatches(rBinarise(input, thresh = 30), background = 0)
myPatches <- rSegregate(patches)
visualise(myPatches[[c(2, 3, 12, 16)]])
# when flattening, all values are set to 1
myPatches2 <- rSegregate(patches, flatten = TRUE)
visualise(myPatches2[[c(2, 3, 12, 16)]])
# cut out by 'patches'
patchValues <- rSegregate(input, by = patches)
visualise(patchValues[[c(2, 3, 12, 16)]])
}
\seealso{
Other operators to modify a raster: \code{\link{rBlend}},
\code{\link{rReduce}}, \code{\link{rRescale}}
}
\concept{operators to modify a raster}
|
/man/rSegregate.Rd
|
no_license
|
gisdevelope/rasterTools
|
R
| false
| true
| 1,648
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modify.operators.R
\name{rSegregate}
\alias{rSegregate}
\title{Segregate values in a raster into layers}
\usage{
rSegregate(obj, by = NULL, flatten = FALSE, background = NULL)
}
\arguments{
\item{obj}{[\code{RasterLayer(1)}]\cr The object to modify.}
\item{by}{[\code{RasterLayer(1)} | \code{matrix(1)}]\cr additional object by
which \code{obj} should be segregated. If left empty, the distinct values
of \code{obj} will be taken.}
\item{flatten}{[\code{logical(1)}]\cr should all values be set to value 1
(\code{TRUE}) or should the original \code{obj} values be retained
(\code{FALSE}, default)?}
\item{background}{[\code{integerish(1)}]\cr the value any cell with value NA
should have.}
}
\value{
a \code{RasterStack} of the same dimensions as \code{obj}, in which
the elements specified in \code{by} or the distinct values of \code{obj}
have each been assigned to a seperate layer.
}
\description{
Distinct values in a raster will be assigned to layers in a raster stack.
}
\examples{
input <- rtData$continuous
patches <- rPatches(rBinarise(input, thresh = 30), background = 0)
myPatches <- rSegregate(patches)
visualise(myPatches[[c(2, 3, 12, 16)]])
# when flattening, all values are set to 1
myPatches2 <- rSegregate(patches, flatten = TRUE)
visualise(myPatches2[[c(2, 3, 12, 16)]])
# cut out by 'patches'
patchValues <- rSegregate(input, by = patches)
visualise(patchValues[[c(2, 3, 12, 16)]])
}
\seealso{
Other operators to modify a raster: \code{\link{rBlend}},
\code{\link{rReduce}}, \code{\link{rRescale}}
}
\concept{operators to modify a raster}
|
# Put custom tests in this file.
# Uncommenting the following line of code will disable
# auto-detection of new variables and thus prevent swirl from
# executing every command twice, which can slow things down.
# AUTO_DETECT_NEWVAR <- FALSE
# However, this means that you should detect user-created
# variables when appropriate. The answer test, creates_new_var()
# can be used for for the purpose, but it also re-evaluates the
# expression which the user entered, so care must be taken.
# Get the swirl state
getState <- function(){
# Whenever swirl is running, its callback is at the top of its call stack.
# Swirl's state, named e, is stored in the environment of the callback.
environment(sys.function(1))$e
}
# Retrieve the log from swirl's state
getLog <- function(){
getState()$log
}
Choix_sujet_etudiant<-function(num_etud,nb_sujet=5){
#return(floor((num_etud-floor(num_etud/100)*100)/20))
set.seed(num_etud)
#return(sample(1:nb_sujet,size=2)[2])
#Pour garantir que les redoublants n'ont pas le meme sujet que l annee derniere
#L'annee prochaine reprendre l'instruction precedente pouir le garantir
sujet_prec<-sample(1:nb_sujet,size=2)[2]
sujet_indice<-sample(1:(nb_sujet-1),1)
sujet_possibles<-setdiff(1:nb_sujet,sujet_prec)
return(sujet_possibles[sujet_indice])
}
test_egalite<-function(x,y){
res<-sum(abs(x-y))
return((res<=1e-15)&!is.nan(res))
}
genere_data<-function(vs){
data<-replicate(vs$m2, mean(rbinom(vs$n,1,vs$p0)))
iddata<-sample(1:vs$m1,6,replace=FALSE)
data[iddata[1]]<-max((floor(vs$pinf*vs$n)-1)/vs$n,0)
data[iddata[2]]<-max((floor(vs$pinf*vs$n)-4)/vs$n,0)
data[iddata[3]]<-(ceiling(vs$pinf*vs$n)+1)/vs$n
data[iddata[4]]<-(floor(vs$psup*vs$n)-1)/vs$n
data[iddata[5]]<-min((ceiling(vs$psup*vs$n)+1)/vs$n,1)
data[iddata[6]]<-min((ceiling(vs$psup*vs$n)+4)/vs$n,1)
return(data)
}
num_etud<-function(){
###Les differents sujets
variable_sujet<-list(
prop=c(1,0,1,0,1,0),
n=c(100,100,200,200,300,300),
niv_confiance=c(85,85,85,80,80,80),
Y=c("Y^A","Y^B","Y^C","Y^D","Y^X","Y^Z"),
y=c("y^A","y^B","y^C","y^D","y^X","y^Z"),
nom_data=c("yA","y_B","y_C_obs","yD","y_X","y_Z_obs"),
pageWebsujet=c("https://toltex.imag.fr/VAM/TP3/sujet.html","https://toltex.imag.fr/VAM/TP3/sujet_tp.html","https://toltex.imag.fr/VAM/TP3/sujet__tp.html","https://toltex.imag.fr/VAM/TP3/sujet3.html","https://toltex.imag.fr/VAM/TP3/sujet_tp3.html","https://toltex.imag.fr/VAM/TP3/sujet__tp3.html")
)
####
e <- get("e", parent.frame())
num_etud <- as.integer(e$val)
res<-TRUE
if (is.na(num_etud)|(num_etud<0)){
res<-FALSE
} else {
confirmation<-paste("Vous confirmez que votre num\xE9ro d'\xE9tudiant est", num_etud, "?",sep=" ")
Encoding(confirmation)<- "latin1"
message(confirmation)
res<-readline("Tapez 1, si c'est bien le cas, sinon appuyez sur n'importe quelle autre touche. Puis validez.")==1
if (res){
e$num_etud<-num_etud
e$num_sujet <- Choix_sujet_etudiant(num_etud,length(variable_sujet[[1]]))
set.seed(num_etud)
vs<-variable_sujet
for (i in 1:length(vs)){
vs[[i]]<-vs[[i]][e$num_sujet]
}
e$vs<-vs
p0<-runif(1,0.3,0.5)
p1<-(1-p0)*runif(1,0.3,0.4)
p2<-(1-p0-p1)*runif(1,0.2,0.5)
p3<-(1-p0-p1-p2)*0.5
p4<-1-p0-p1-p2-p3
z<-rmultinom(vs$n,1,c(p0,p1,p2,p3,p4))
y<-which(z==1)-(0:(vs$n-1))*5-1
if(vs$prop) assign(vs$nom_data,as.integer(y>0),.GlobalEnv) else assign(vs$nom_data,y,.GlobalEnv)
e$log$skipped<-c()#pour corriger un bugg swirl: quand on fait deux leçons d'affile, il y a FALSE à l'initialisation de skipped, alors que ce n'est pas le cas pour la première leçon ???
e$log$mon_skip<-e$log$skipped
}
}
return(res)
}
submit_log <- function(){
e <- get("e", parent.frame())
res<-FALSE
selection <- getState()$val
if(selection %in% 1:5){
res<-TRUE
nom_etud <- readline("Quel est votre nom de famille ? ")
demande_prenom<-"Quel est votre pr\xE9nom ? "
Encoding(demande_prenom) <- "latin1"
prenom_etud <- readline(demande_prenom)
# Please edit the link below
pre_fill_link1 <- "https://docs.google.com/forms/d/e/1FAIpQLSe0vu3khlVduduY6VOb7bRKwlJ-suMTkHa3BHFQ2gkF-3vcdA/viewform?usp=pp_url&entry.2090562688="
pre_fill_link2 <- "https://docs.google.com/forms/d/e/1FAIpQLSfeJPzm2QmCWIeHekmH0NWkDmgdo8gG_ElDHR_f5IMdAGdH8w/viewform?usp=pp_url&entry.1874543433="
pre_fill_link3 <- "https://docs.google.com/forms/d/e/1FAIpQLSfy4qN-m-bEt2Ppw5s39hSr-Ur3fVLOKbp42srLKwWD-bSkNg/viewform?usp=pp_url&entry.1243347104="
pre_fill_link4 <- "https://docs.google.com/forms/d/e/1FAIpQLSd-CYVKRMjXDdDlxctH1RQ1oeUXJLnl3r-trT4Pr2TN5u8TnQ/viewform?usp=pp_url&entry.108469289="
pre_fill_link5 <- "https://docs.google.com/forms/d/e/1FAIpQLSe8Wlj-6QfeI6mcOZPC6UqugH7HtM09Bj7qcJbt7d2hASZupw/viewform?usp=pp_url&entry.1000315833="
pre_fill_link <- switch(selection,
pre_fill_link1,
pre_fill_link2,
pre_fill_link3,
pre_fill_link4,
pre_fill_link5
)
# Do not edit the code below
if(!grepl("=$", pre_fill_link)){
pre_fill_link <- paste0(pre_fill_link, "=")
}
p <- function(x, p, f, l = length(x)){if(l < p){x <- c(x, rep(f, p - l))};x}
e$log$skipped[1:length(e$log$mon_skip)]<-e$log$mon_skip
temp <- tempfile()
log_ <- getLog()
nrow_ <- max(unlist(lapply(log_, length)))
log_tbl <- data.frame( p(log_$question_number, nrow_, NA),
p(log_$correct, nrow_, NA),
p(log_$attempt, nrow_, NA),
p(log_$skipped, nrow_, NA),
p(log_$datetime, nrow_, NA),
stringsAsFactors = FALSE)
names(log_tbl) <- c(e$num_etud, nom_etud, prenom_etud,log_$lesson_name,e$num_sujet)
write.csv(log_tbl, file = temp, row.names = FALSE)
encoded_log <- base64encode(temp)
e <- get("e", parent.frame())
e$encoded_log<-encoded_log
e$log_tbl<-log_tbl
e$url_googleForm<-paste0(pre_fill_link, encoded_log)
#browseURL(paste0(pre_fill_link, encoded_log)
readline("Swirl va maintenant ouvrir un Google Form dans votre navigateur web. Tapez sur la touche Entrée.")
browseURL(e$url_googleForm)
e <- get("e", parent.frame())
if(selection %in% c(2,3,4)) e$adresse_email<-"laurent.doyen@univ-grenoble-alpes.fr" else e$adresse_email<-"marie-jose.marcoux@univ-grenoble-alpes.fr"
e$sujet_email<-paste0("**TP3-TC-CI**"," G",selection,", ",log_$lesson_name,", ", nom_etud,collapse="")
e$corp_email<-encoded_log
}
return(res)
}
submit_log_alt <- function(){
e <- get("e", parent.frame())
res<-FALSE
selection <- getState()$val
#if(selection %in% 1:5){
res<-TRUE
nom_etud <- readline("Quel est votre nom de famille ? ")
demande_prenom<-"Quel est votre pr\xE9nom ? "
Encoding(demande_prenom) <- "latin1"
prenom_etud <- readline(demande_prenom)
# Please edit the link below
#pre_fill_link1 <- "https://docs.google.com/forms/d/e/1FAIpQLSeWzSmQyQa5YE-MUL_0DxzD5RShhaKbWBS63Bu0AmdbxwmI2w/viewform?usp=pp_url&entry.1536247898="
#pre_fill_link2 <- "https://docs.google.com/forms/d/e/1FAIpQLSfgztQT4bQTcgAuTlpMtVD5vQfAcLz5TWXqdS-D24Ctog4TFg/viewform?usp=pp_url&entry.1449157816="
#pre_fill_link3 <- "https://docs.google.com/forms/d/e/1FAIpQLSc-MLNgzzLzS6znCGlIMnSpBwbfqsbmJYGItyOxL0ucInW3YQ/viewform?usp=pp_url&entry.947620631="
#pre_fill_link4 <- "https://docs.google.com/forms/d/e/1FAIpQLSdHFMGd0kZ0K3n3wWX85Ka1FMonKLm1dF409NbjgIL0U0kMKA/viewform?usp=pp_url&entry.1829019151="
#pre_fill_link5 <- "https://docs.google.com/forms/d/e/1FAIpQLSdXGObsIGsQlhgQ4UwxknYANU2EAlm8cbakMVxpNFD9kmsmgg/viewform?usp=pp_url&entry.958732492="
#pre_fill_link <- switch(selection,
# pre_fill_link1,
# pre_fill_link2,
# pre_fill_link3,
# pre_fill_link4,
# pre_fill_link5
#)
pre_fill_link<-"https://docs.google.com/forms/d/e/1FAIpQLSd3Myo0EalPM3qCfmMQw2xpITKslfNrnjFiHBBu4_Uo4BIVYg/viewform?usp=pp_url&entry.1066847456="
# Do not edit the code below
if(!grepl("=$", pre_fill_link)){
pre_fill_link <- paste0(pre_fill_link, "=")
}
p <- function(x, p, f, l = length(x)){if(l < p){x <- c(x, rep(f, p - l))};x}
e$log$skipped[1:length(e$log$mon_skip)]<-e$log$mon_skip
temp <- tempfile()
log_ <- getLog()
nrow_ <- max(unlist(lapply(log_, length)))
log_tbl <- data.frame( p(log_$question_number, nrow_, NA),
p(log_$correct, nrow_, NA),
p(log_$attempt, nrow_, NA),
p(log_$skipped, nrow_, NA),
p(log_$datetime, nrow_, NA),
stringsAsFactors = FALSE)
names(log_tbl) <- c(e$num_etud, nom_etud, prenom_etud,log_$lesson_name,e$num_sujet)
write.csv(log_tbl, file = temp, row.names = FALSE)
encoded_log <- base64encode(temp)
e <- get("e", parent.frame())
e$encoded_log<-encoded_log
e$log_tbl<-log_tbl
e$url_googleForm<-paste0(pre_fill_link, encoded_log)
#browseURL(paste0(pre_fill_link, encoded_log)
readline("Swirl va maintenant ouvrir un Google Form dans votre navigateur web. Tapez sur la touche Entrée.")
browseURL(e$url_googleForm)
e <- get("e", parent.frame())
#if(selection %in% c(1,2,3)) e$adresse_email<-"laurent.doyen@iut2.univ-grenoble-alpes.fr" else e$adresse_email<-"marie-jose.martinez@iut2.univ-grenoble-alpes.fr"
e$adresse_email<-"laurent.doyen@iut2.univ-grenoble-alpes.fr"
e$sujet_email<-paste0("**TP3-TC-CI** Alt, ",log_$lesson_name,", ", nom_etud,collapse="")
e$corp_email<-encoded_log
#}
return(res)
}
googleForm_log<-function(){
e <- get("e", parent.frame())
if(regexpr("Google Form",e$val)!=-1){
res<-FALSE
browseURL(e$url_googleForm)
} else {
res<-TRUE
readline("Swirl va maintenant ouvrir un email dans votre logiciel de messagerie. Tapez sur la touche Entrée.")
email(e$adresse_email,e$sujet_email,e$corp_email)
}
return(res)
}
email_log<-function(){
e <- get("e", parent.frame())
res<-TRUE
if(regexpr("email",e$val)!=-1){
res<-FALSE
email(e$adresse_email,e$sujet_email,e$corp_email)
}
return(res)
}
sauve_log<-function(){
demande<-"Appuyez sur Entr\xE9, puis choississez un r\xE9pertoire dans lequel sauver votre cl\xE9. Attention, dans les salles machine de l'IUT, choississez un r\xE9pertoire personnel."
Encoding(demande) <- "latin1"
rep <- readline(demande)
path <- choose_dir()
if(length(path)==0){
return(FALSE)
} else {
setwd(path)
e <- get("e", parent.frame())
encoded_log<-e$encoded_log
log_tbl<-e$log_tbl
log_ <- getLog()
e$fichier<-paste0("TP2",log_$lesson_name,".R")
save(log_tbl,encoded_log,file=e$fichier)
demande<-paste0("Votre cl\xE9, est sauv\xE9 dans le fichier ",e$fichier," Tapez sur la touche Entr\xE9e pour continuer.")
Encoding(demande) <- "latin1"
rep <- readline(demande)
return(TRUE)
}
}
qsauve_log<-function(){
e <- get("e", parent.frame())
if(e$val=="Oui"){
return(TRUE)
} else {
demande<-"Appuyez sur Entr\xE9, puis choississez un r\xE9pertoire dans lequel sauver votre cl\xE9. Attention, dans les salles machine de l'IUT, choississez un r\xE9pertoire personnel."
Encoding(demande) <- "latin1"
rep <- readline(demande)
path <- choose_dir()
if(length(path)==0){
return(FALSE)
} else {
setwd(path)
e <- get("e", parent.frame())
encoded_log<-e$encoded_log
log_tbl<-e$log_tbl
save(log_tbl,encoded_log,file=e$fichier)
demande<-paste0("Votre cl\xE9, est sauv\xE9 dans le fichier ",e$fichier," Tapez sur la touche Entr\xE9e pour continuer.")
Encoding(demande) <- "latin1"
rep <- readline(demande)
return(FALSE)
}
}
}
#answear test to known if the value of the answear is between b_inf and b_sup
test_between <- function(b_inf,b_sup){
n<-length(b_inf)
res<-TRUE
e <- get("e", parent.frame())
e<-e$val
for(i in 1:n){
res<-res&(e[i] >= b_inf[i])&(e[i] <= b_sup[i])
}
return(res)
}
ouvrir_sujet_TP<-function(){
e <- get("e", parent.frame())
selection <- getState()$val
res<-FALSE
if(selection == "Oui"){
browseURL(e$vs$pageWebsujet)
res<-TRUE
}
return(res)
}
test_passer<-function(){
e <- get("e", parent.frame())
res<-(e$expr=="passer()")
if(length(e$log$mon_skip)>0) e$log$skipped[1:length(e$log$mon_skip)]<-e$log$mon_skip
e$log$mon_skip<-e$log$skipped
e$log$mon_skip[length(e$log$mon_skip)+1]<-res
return(res)
}
qualite_estimation<-function(){
e <- get("e", parent.frame())
moy<-mean(get(e$vs$nom_data))
if(e$vs$prop) {
res<-sqrt(moy*(1-moy)/e$vs$n)
} else {
res<-sd(get(e$vs$nom_data))/sqrt(e$vs$n)
}
return(test_egalite(e$val,res))
}
int_conf<-function(){
e <- get("e", parent.frame())
moy<-mean(get(e$vs$nom_data))
if(e$vs$prop) {
es<-sqrt(moy*(1-moy)/e$vs$n)
} else {
es<-sd(get(e$vs$nom_data))/sqrt(e$vs$n)
}
return(test_egalite(e$val,moy+c(-1,1)*qnorm((100-(100-e$vs$niv_confiance)/2)/100)*es))
}
|
/Intervalles_de_confiance/customTests.R
|
no_license
|
ldoyen/TP3-TC-CI
|
R
| false
| false
| 12,969
|
r
|
# Put custom tests in this file.
# Uncommenting the following line of code will disable
# auto-detection of new variables and thus prevent swirl from
# executing every command twice, which can slow things down.
# AUTO_DETECT_NEWVAR <- FALSE
# However, this means that you should detect user-created
# variables when appropriate. The answer test, creates_new_var()
# can be used for for the purpose, but it also re-evaluates the
# expression which the user entered, so care must be taken.
# Get the swirl state
getState <- function(){
# Whenever swirl is running, its callback is at the top of its call stack.
# Swirl's state, named e, is stored in the environment of the callback.
environment(sys.function(1))$e
}
# Retrieve the log from swirl's state
getLog <- function(){
getState()$log
}
Choix_sujet_etudiant<-function(num_etud,nb_sujet=5){
#return(floor((num_etud-floor(num_etud/100)*100)/20))
set.seed(num_etud)
#return(sample(1:nb_sujet,size=2)[2])
#Pour garantir que les redoublants n'ont pas le meme sujet que l annee derniere
#L'annee prochaine reprendre l'instruction precedente pouir le garantir
sujet_prec<-sample(1:nb_sujet,size=2)[2]
sujet_indice<-sample(1:(nb_sujet-1),1)
sujet_possibles<-setdiff(1:nb_sujet,sujet_prec)
return(sujet_possibles[sujet_indice])
}
test_egalite<-function(x,y){
res<-sum(abs(x-y))
return((res<=1e-15)&!is.nan(res))
}
genere_data<-function(vs){
data<-replicate(vs$m2, mean(rbinom(vs$n,1,vs$p0)))
iddata<-sample(1:vs$m1,6,replace=FALSE)
data[iddata[1]]<-max((floor(vs$pinf*vs$n)-1)/vs$n,0)
data[iddata[2]]<-max((floor(vs$pinf*vs$n)-4)/vs$n,0)
data[iddata[3]]<-(ceiling(vs$pinf*vs$n)+1)/vs$n
data[iddata[4]]<-(floor(vs$psup*vs$n)-1)/vs$n
data[iddata[5]]<-min((ceiling(vs$psup*vs$n)+1)/vs$n,1)
data[iddata[6]]<-min((ceiling(vs$psup*vs$n)+4)/vs$n,1)
return(data)
}
num_etud<-function(){
###Les differents sujets
variable_sujet<-list(
prop=c(1,0,1,0,1,0),
n=c(100,100,200,200,300,300),
niv_confiance=c(85,85,85,80,80,80),
Y=c("Y^A","Y^B","Y^C","Y^D","Y^X","Y^Z"),
y=c("y^A","y^B","y^C","y^D","y^X","y^Z"),
nom_data=c("yA","y_B","y_C_obs","yD","y_X","y_Z_obs"),
pageWebsujet=c("https://toltex.imag.fr/VAM/TP3/sujet.html","https://toltex.imag.fr/VAM/TP3/sujet_tp.html","https://toltex.imag.fr/VAM/TP3/sujet__tp.html","https://toltex.imag.fr/VAM/TP3/sujet3.html","https://toltex.imag.fr/VAM/TP3/sujet_tp3.html","https://toltex.imag.fr/VAM/TP3/sujet__tp3.html")
)
####
e <- get("e", parent.frame())
num_etud <- as.integer(e$val)
res<-TRUE
if (is.na(num_etud)|(num_etud<0)){
res<-FALSE
} else {
confirmation<-paste("Vous confirmez que votre num\xE9ro d'\xE9tudiant est", num_etud, "?",sep=" ")
Encoding(confirmation)<- "latin1"
message(confirmation)
res<-readline("Tapez 1, si c'est bien le cas, sinon appuyez sur n'importe quelle autre touche. Puis validez.")==1
if (res){
e$num_etud<-num_etud
e$num_sujet <- Choix_sujet_etudiant(num_etud,length(variable_sujet[[1]]))
set.seed(num_etud)
vs<-variable_sujet
for (i in 1:length(vs)){
vs[[i]]<-vs[[i]][e$num_sujet]
}
e$vs<-vs
p0<-runif(1,0.3,0.5)
p1<-(1-p0)*runif(1,0.3,0.4)
p2<-(1-p0-p1)*runif(1,0.2,0.5)
p3<-(1-p0-p1-p2)*0.5
p4<-1-p0-p1-p2-p3
z<-rmultinom(vs$n,1,c(p0,p1,p2,p3,p4))
y<-which(z==1)-(0:(vs$n-1))*5-1
if(vs$prop) assign(vs$nom_data,as.integer(y>0),.GlobalEnv) else assign(vs$nom_data,y,.GlobalEnv)
e$log$skipped<-c()#pour corriger un bugg swirl: quand on fait deux leçons d'affile, il y a FALSE à l'initialisation de skipped, alors que ce n'est pas le cas pour la première leçon ???
e$log$mon_skip<-e$log$skipped
}
}
return(res)
}
submit_log <- function(){
e <- get("e", parent.frame())
res<-FALSE
selection <- getState()$val
if(selection %in% 1:5){
res<-TRUE
nom_etud <- readline("Quel est votre nom de famille ? ")
demande_prenom<-"Quel est votre pr\xE9nom ? "
Encoding(demande_prenom) <- "latin1"
prenom_etud <- readline(demande_prenom)
# Please edit the link below
pre_fill_link1 <- "https://docs.google.com/forms/d/e/1FAIpQLSe0vu3khlVduduY6VOb7bRKwlJ-suMTkHa3BHFQ2gkF-3vcdA/viewform?usp=pp_url&entry.2090562688="
pre_fill_link2 <- "https://docs.google.com/forms/d/e/1FAIpQLSfeJPzm2QmCWIeHekmH0NWkDmgdo8gG_ElDHR_f5IMdAGdH8w/viewform?usp=pp_url&entry.1874543433="
pre_fill_link3 <- "https://docs.google.com/forms/d/e/1FAIpQLSfy4qN-m-bEt2Ppw5s39hSr-Ur3fVLOKbp42srLKwWD-bSkNg/viewform?usp=pp_url&entry.1243347104="
pre_fill_link4 <- "https://docs.google.com/forms/d/e/1FAIpQLSd-CYVKRMjXDdDlxctH1RQ1oeUXJLnl3r-trT4Pr2TN5u8TnQ/viewform?usp=pp_url&entry.108469289="
pre_fill_link5 <- "https://docs.google.com/forms/d/e/1FAIpQLSe8Wlj-6QfeI6mcOZPC6UqugH7HtM09Bj7qcJbt7d2hASZupw/viewform?usp=pp_url&entry.1000315833="
pre_fill_link <- switch(selection,
pre_fill_link1,
pre_fill_link2,
pre_fill_link3,
pre_fill_link4,
pre_fill_link5
)
# Do not edit the code below
if(!grepl("=$", pre_fill_link)){
pre_fill_link <- paste0(pre_fill_link, "=")
}
p <- function(x, p, f, l = length(x)){if(l < p){x <- c(x, rep(f, p - l))};x}
e$log$skipped[1:length(e$log$mon_skip)]<-e$log$mon_skip
temp <- tempfile()
log_ <- getLog()
nrow_ <- max(unlist(lapply(log_, length)))
log_tbl <- data.frame( p(log_$question_number, nrow_, NA),
p(log_$correct, nrow_, NA),
p(log_$attempt, nrow_, NA),
p(log_$skipped, nrow_, NA),
p(log_$datetime, nrow_, NA),
stringsAsFactors = FALSE)
names(log_tbl) <- c(e$num_etud, nom_etud, prenom_etud,log_$lesson_name,e$num_sujet)
write.csv(log_tbl, file = temp, row.names = FALSE)
encoded_log <- base64encode(temp)
e <- get("e", parent.frame())
e$encoded_log<-encoded_log
e$log_tbl<-log_tbl
e$url_googleForm<-paste0(pre_fill_link, encoded_log)
#browseURL(paste0(pre_fill_link, encoded_log)
readline("Swirl va maintenant ouvrir un Google Form dans votre navigateur web. Tapez sur la touche Entrée.")
browseURL(e$url_googleForm)
e <- get("e", parent.frame())
if(selection %in% c(2,3,4)) e$adresse_email<-"laurent.doyen@univ-grenoble-alpes.fr" else e$adresse_email<-"marie-jose.marcoux@univ-grenoble-alpes.fr"
e$sujet_email<-paste0("**TP3-TC-CI**"," G",selection,", ",log_$lesson_name,", ", nom_etud,collapse="")
e$corp_email<-encoded_log
}
return(res)
}
submit_log_alt <- function(){
e <- get("e", parent.frame())
res<-FALSE
selection <- getState()$val
#if(selection %in% 1:5){
res<-TRUE
nom_etud <- readline("Quel est votre nom de famille ? ")
demande_prenom<-"Quel est votre pr\xE9nom ? "
Encoding(demande_prenom) <- "latin1"
prenom_etud <- readline(demande_prenom)
# Please edit the link below
#pre_fill_link1 <- "https://docs.google.com/forms/d/e/1FAIpQLSeWzSmQyQa5YE-MUL_0DxzD5RShhaKbWBS63Bu0AmdbxwmI2w/viewform?usp=pp_url&entry.1536247898="
#pre_fill_link2 <- "https://docs.google.com/forms/d/e/1FAIpQLSfgztQT4bQTcgAuTlpMtVD5vQfAcLz5TWXqdS-D24Ctog4TFg/viewform?usp=pp_url&entry.1449157816="
#pre_fill_link3 <- "https://docs.google.com/forms/d/e/1FAIpQLSc-MLNgzzLzS6znCGlIMnSpBwbfqsbmJYGItyOxL0ucInW3YQ/viewform?usp=pp_url&entry.947620631="
#pre_fill_link4 <- "https://docs.google.com/forms/d/e/1FAIpQLSdHFMGd0kZ0K3n3wWX85Ka1FMonKLm1dF409NbjgIL0U0kMKA/viewform?usp=pp_url&entry.1829019151="
#pre_fill_link5 <- "https://docs.google.com/forms/d/e/1FAIpQLSdXGObsIGsQlhgQ4UwxknYANU2EAlm8cbakMVxpNFD9kmsmgg/viewform?usp=pp_url&entry.958732492="
#pre_fill_link <- switch(selection,
# pre_fill_link1,
# pre_fill_link2,
# pre_fill_link3,
# pre_fill_link4,
# pre_fill_link5
#)
pre_fill_link<-"https://docs.google.com/forms/d/e/1FAIpQLSd3Myo0EalPM3qCfmMQw2xpITKslfNrnjFiHBBu4_Uo4BIVYg/viewform?usp=pp_url&entry.1066847456="
# Do not edit the code below
if(!grepl("=$", pre_fill_link)){
pre_fill_link <- paste0(pre_fill_link, "=")
}
p <- function(x, p, f, l = length(x)){if(l < p){x <- c(x, rep(f, p - l))};x}
e$log$skipped[1:length(e$log$mon_skip)]<-e$log$mon_skip
temp <- tempfile()
log_ <- getLog()
nrow_ <- max(unlist(lapply(log_, length)))
log_tbl <- data.frame( p(log_$question_number, nrow_, NA),
p(log_$correct, nrow_, NA),
p(log_$attempt, nrow_, NA),
p(log_$skipped, nrow_, NA),
p(log_$datetime, nrow_, NA),
stringsAsFactors = FALSE)
names(log_tbl) <- c(e$num_etud, nom_etud, prenom_etud,log_$lesson_name,e$num_sujet)
write.csv(log_tbl, file = temp, row.names = FALSE)
encoded_log <- base64encode(temp)
e <- get("e", parent.frame())
e$encoded_log<-encoded_log
e$log_tbl<-log_tbl
e$url_googleForm<-paste0(pre_fill_link, encoded_log)
#browseURL(paste0(pre_fill_link, encoded_log)
readline("Swirl va maintenant ouvrir un Google Form dans votre navigateur web. Tapez sur la touche Entrée.")
browseURL(e$url_googleForm)
e <- get("e", parent.frame())
#if(selection %in% c(1,2,3)) e$adresse_email<-"laurent.doyen@iut2.univ-grenoble-alpes.fr" else e$adresse_email<-"marie-jose.martinez@iut2.univ-grenoble-alpes.fr"
e$adresse_email<-"laurent.doyen@iut2.univ-grenoble-alpes.fr"
e$sujet_email<-paste0("**TP3-TC-CI** Alt, ",log_$lesson_name,", ", nom_etud,collapse="")
e$corp_email<-encoded_log
#}
return(res)
}
googleForm_log<-function(){
e <- get("e", parent.frame())
if(regexpr("Google Form",e$val)!=-1){
res<-FALSE
browseURL(e$url_googleForm)
} else {
res<-TRUE
readline("Swirl va maintenant ouvrir un email dans votre logiciel de messagerie. Tapez sur la touche Entrée.")
email(e$adresse_email,e$sujet_email,e$corp_email)
}
return(res)
}
email_log<-function(){
e <- get("e", parent.frame())
res<-TRUE
if(regexpr("email",e$val)!=-1){
res<-FALSE
email(e$adresse_email,e$sujet_email,e$corp_email)
}
return(res)
}
sauve_log<-function(){
demande<-"Appuyez sur Entr\xE9, puis choississez un r\xE9pertoire dans lequel sauver votre cl\xE9. Attention, dans les salles machine de l'IUT, choississez un r\xE9pertoire personnel."
Encoding(demande) <- "latin1"
rep <- readline(demande)
path <- choose_dir()
if(length(path)==0){
return(FALSE)
} else {
setwd(path)
e <- get("e", parent.frame())
encoded_log<-e$encoded_log
log_tbl<-e$log_tbl
log_ <- getLog()
e$fichier<-paste0("TP2",log_$lesson_name,".R")
save(log_tbl,encoded_log,file=e$fichier)
demande<-paste0("Votre cl\xE9, est sauv\xE9 dans le fichier ",e$fichier," Tapez sur la touche Entr\xE9e pour continuer.")
Encoding(demande) <- "latin1"
rep <- readline(demande)
return(TRUE)
}
}
qsauve_log<-function(){
e <- get("e", parent.frame())
if(e$val=="Oui"){
return(TRUE)
} else {
demande<-"Appuyez sur Entr\xE9, puis choississez un r\xE9pertoire dans lequel sauver votre cl\xE9. Attention, dans les salles machine de l'IUT, choississez un r\xE9pertoire personnel."
Encoding(demande) <- "latin1"
rep <- readline(demande)
path <- choose_dir()
if(length(path)==0){
return(FALSE)
} else {
setwd(path)
e <- get("e", parent.frame())
encoded_log<-e$encoded_log
log_tbl<-e$log_tbl
save(log_tbl,encoded_log,file=e$fichier)
demande<-paste0("Votre cl\xE9, est sauv\xE9 dans le fichier ",e$fichier," Tapez sur la touche Entr\xE9e pour continuer.")
Encoding(demande) <- "latin1"
rep <- readline(demande)
return(FALSE)
}
}
}
#answear test to known if the value of the answear is between b_inf and b_sup
test_between <- function(b_inf,b_sup){
n<-length(b_inf)
res<-TRUE
e <- get("e", parent.frame())
e<-e$val
for(i in 1:n){
res<-res&(e[i] >= b_inf[i])&(e[i] <= b_sup[i])
}
return(res)
}
ouvrir_sujet_TP<-function(){
e <- get("e", parent.frame())
selection <- getState()$val
res<-FALSE
if(selection == "Oui"){
browseURL(e$vs$pageWebsujet)
res<-TRUE
}
return(res)
}
test_passer<-function(){
e <- get("e", parent.frame())
res<-(e$expr=="passer()")
if(length(e$log$mon_skip)>0) e$log$skipped[1:length(e$log$mon_skip)]<-e$log$mon_skip
e$log$mon_skip<-e$log$skipped
e$log$mon_skip[length(e$log$mon_skip)+1]<-res
return(res)
}
qualite_estimation<-function(){
e <- get("e", parent.frame())
moy<-mean(get(e$vs$nom_data))
if(e$vs$prop) {
res<-sqrt(moy*(1-moy)/e$vs$n)
} else {
res<-sd(get(e$vs$nom_data))/sqrt(e$vs$n)
}
return(test_egalite(e$val,res))
}
int_conf<-function(){
e <- get("e", parent.frame())
moy<-mean(get(e$vs$nom_data))
if(e$vs$prop) {
es<-sqrt(moy*(1-moy)/e$vs$n)
} else {
es<-sd(get(e$vs$nom_data))/sqrt(e$vs$n)
}
return(test_egalite(e$val,moy+c(-1,1)*qnorm((100-(100-e$vs$niv_confiance)/2)/100)*es))
}
|
make_understorey_n_retrans_coefficient <- function(retrans) {
### assumed to be x %
out <- data.frame(c(1:6), retrans)
colnames(out) <- c("Ring", "retrans_coef")
return(out)
}
|
/modules/retranslocation_coefficients/make_understorey_n_retrans_coefficient.R
|
no_license
|
mingkaijiang/EucFACE_nitrogen_budget
|
R
| false
| false
| 216
|
r
|
make_understorey_n_retrans_coefficient <- function(retrans) {
### assumed to be x %
out <- data.frame(c(1:6), retrans)
colnames(out) <- c("Ring", "retrans_coef")
return(out)
}
|
# Estimate model 2
# Author: MM,LN
# Version: 2019.11.08
# Revision history
# 2011.11.08 Add loop over values of datastub, "m11" and "nondom".
LondonFlag <- 0 # 0 : Only Greater London
# 1 : London + some adjacent counties
allDatastubs <- c("m11","nondom") # "m11" = domestic properties
# "nondom" = non-domestic properties
N0 <- 10000 # N0 = size of fullsample used for estimation
# N0 = 0 then use all data (excluding outliers)
plotflag <- 0 # 0 = no plots. 1 = plot on screen. 2 = plot to device
depvar <- c("logvalue","value","boxcoxvalue")
avgtimeflag <- 1 # 1 = use average time to destinations. 0 = use (drive_time,trans_time)
nregs <- 11
regnames<-data.frame(region_id=seq(1:nregs),
region_str=c("CaMKOx", "CornwallDevon",
"EastMid", "EastEng",
"London", "NE", "NW",
"SE", "SW", "WestMid",
"YorkshireHumber"))
for (ds in 1:2) {
datastub<-allDatastubs[ds]
for (r in 1:11) {
region_id<-regnames$region_id[r]
region_str<-as.character(regnames$region_str[r])
dirs<-B2SetPath(RootDir,CodeDir,DataRoot,region_id,datastub)
# Load data
load(file=paste0(dirs$datadir,"/m2data2.RData"))
# Convert prob_4band="" to prob_4band==NA
i1<-(levels(m2data$prob_4band)=="")
levels(m2data$prob_4band)[i1]<-NA
# Estimate base model: Model 0
# vlist1: land use and other amenties + travel times to (station,coast,aroad,motorway)
# vlist2: (drive,trans) variables
# vlist3: splines in (drive,trans)
vlist1<-A3Model2_vlist1(r,datastub)
if (avgtimeflag==0) {
# List of all variables starting with "drive_"
drivevars<-colnames(m2data)[grep("\\<drive_[^acmnst]",colnames(m2data))]
iAONB<-grep("\\<drive_AONB",drivevars)
if (length(iAONB)>0) drivevars<-drivevars[-iAONB]
# List all variables with "trans_"
transvars<-colnames(m2data)[grep("\\<trans_[^acmnst]",colnames(m2data))]
# Get names of drive/trans and creates character strings for formula
commutevars<-c(drivevars,transvars)
drivesplines<-colnames(m2data)[grep("\\<spline_drive_",colnames(m2data))]
transsplines<-colnames(m2data)[grep("\\<spline_trans_",colnames(m2data))]
commute_splines<-c(drivesplines,transsplines)
} else if (avgtimeflag==1) {
# List of all variables starting with "avgtime_"
commutevars<-colnames(m2data)[grep("\\<avgtime_[^acmnst]",colnames(m2data))]
# Get names of drive/trans and creates character strings for formula
commute_splines<-colnames(m2data)[grep("\\<spline_avgtime_",colnames(m2data))]
}
# Create vlist2: to be included in formula
logfunc<-function(x) paste0("log(1+",x,")")
logcommutevars<-logfunc(commutevars)
vlist2<-""
for (i in 1:length(logcommutevars)) {vlist2<-paste(vlist2,logcommutevars[i], sep="+") }
vlist2spline<-""
for (i in 1:length(commute_splines)) {vlist2spline<-paste(vlist2spline,commute_splines[i], sep="+") }
formula0<-as.formula(paste0(vlist1,vlist2))
formula1<-as.formula(paste0(vlist1,vlist2spline))
for (y in depvar) {
# dependent variables is one of c("logvalue","value","boxcoxvalue")
m2data$location_value <- m2data[,grep(paste0("\\<",y),colnames(m2data))]
# Define subsamples
qnt <- quantile(m2data$location_value, probs=c(.01, .99),na.rm=TRUE)
iFull <- m2data$location_value>qnt[1] & m2data$location_value<qnt[2]
m2ols0<-lm(formula0,data=m2data,subset=iFull,na.action=na.exclude)
m2ols1<-lm(formula1,data=m2data,subset=iFull,na.action=na.exclude)
m2ols0$varlist<-B4GetVarList(names(m2ols0$model))
m2ols1$varlist<-B4GetVarList(names(m2ols1$model))
summary(m2ols0)
if (r!=5) {
if (avgtimeflag==0) {
# use (drive_time,trans_time)
formula5<-A3Model2_specification0(r)
} else if (avgtimeflag==1) {
# use (avgtime)
formula5<-A3Model2_specification1(r,datastub)
}
m2ols5<-lm(formula5,data=m2data[iFull,],na.action=na.exclude)
m2ols5$varlist<-B4GetVarList(names(m2ols5$model))
summary(m2ols5)
} else if (r==5) {
m2ols5<-m2ols1
}
m2ols0$depvar <- y
m2ols1$depvar <- y
m2ols5$depvar <- y
save(m2ols0,file=paste0(dirs$outdir,"/m2",y,"0.RData"))
save(m2ols1,file=paste0(dirs$outdir,"/m2",y,"1.RData"))
save(m2ols5,file=paste0(dirs$outdir,"/m2",y,"5.RData"))
} # for (y in depvar)
} # for (r in 1:nregs) {
} # loop over c("m11","nondom")
|
/code/currentversion/model/A3Model2.R
|
no_license
|
UCL/provis
|
R
| false
| false
| 4,630
|
r
|
# Estimate model 2
# Author: MM,LN
# Version: 2019.11.08
# Revision history
# 2011.11.08 Add loop over values of datastub, "m11" and "nondom".
LondonFlag <- 0 # 0 : Only Greater London
# 1 : London + some adjacent counties
allDatastubs <- c("m11","nondom") # "m11" = domestic properties
# "nondom" = non-domestic properties
N0 <- 10000 # N0 = size of fullsample used for estimation
# N0 = 0 then use all data (excluding outliers)
plotflag <- 0 # 0 = no plots. 1 = plot on screen. 2 = plot to device
depvar <- c("logvalue","value","boxcoxvalue")
avgtimeflag <- 1 # 1 = use average time to destinations. 0 = use (drive_time,trans_time)
nregs <- 11
regnames<-data.frame(region_id=seq(1:nregs),
region_str=c("CaMKOx", "CornwallDevon",
"EastMid", "EastEng",
"London", "NE", "NW",
"SE", "SW", "WestMid",
"YorkshireHumber"))
for (ds in 1:2) {
datastub<-allDatastubs[ds]
for (r in 1:11) {
region_id<-regnames$region_id[r]
region_str<-as.character(regnames$region_str[r])
dirs<-B2SetPath(RootDir,CodeDir,DataRoot,region_id,datastub)
# Load data
load(file=paste0(dirs$datadir,"/m2data2.RData"))
# Convert prob_4band="" to prob_4band==NA
i1<-(levels(m2data$prob_4band)=="")
levels(m2data$prob_4band)[i1]<-NA
# Estimate base model: Model 0
# vlist1: land use and other amenties + travel times to (station,coast,aroad,motorway)
# vlist2: (drive,trans) variables
# vlist3: splines in (drive,trans)
vlist1<-A3Model2_vlist1(r,datastub)
if (avgtimeflag==0) {
# List of all variables starting with "drive_"
drivevars<-colnames(m2data)[grep("\\<drive_[^acmnst]",colnames(m2data))]
iAONB<-grep("\\<drive_AONB",drivevars)
if (length(iAONB)>0) drivevars<-drivevars[-iAONB]
# List all variables with "trans_"
transvars<-colnames(m2data)[grep("\\<trans_[^acmnst]",colnames(m2data))]
# Get names of drive/trans and creates character strings for formula
commutevars<-c(drivevars,transvars)
drivesplines<-colnames(m2data)[grep("\\<spline_drive_",colnames(m2data))]
transsplines<-colnames(m2data)[grep("\\<spline_trans_",colnames(m2data))]
commute_splines<-c(drivesplines,transsplines)
} else if (avgtimeflag==1) {
# List of all variables starting with "avgtime_"
commutevars<-colnames(m2data)[grep("\\<avgtime_[^acmnst]",colnames(m2data))]
# Get names of drive/trans and creates character strings for formula
commute_splines<-colnames(m2data)[grep("\\<spline_avgtime_",colnames(m2data))]
}
# Create vlist2: to be included in formula
logfunc<-function(x) paste0("log(1+",x,")")
logcommutevars<-logfunc(commutevars)
vlist2<-""
for (i in 1:length(logcommutevars)) {vlist2<-paste(vlist2,logcommutevars[i], sep="+") }
vlist2spline<-""
for (i in 1:length(commute_splines)) {vlist2spline<-paste(vlist2spline,commute_splines[i], sep="+") }
formula0<-as.formula(paste0(vlist1,vlist2))
formula1<-as.formula(paste0(vlist1,vlist2spline))
for (y in depvar) {
# dependent variables is one of c("logvalue","value","boxcoxvalue")
m2data$location_value <- m2data[,grep(paste0("\\<",y),colnames(m2data))]
# Define subsamples
qnt <- quantile(m2data$location_value, probs=c(.01, .99),na.rm=TRUE)
iFull <- m2data$location_value>qnt[1] & m2data$location_value<qnt[2]
m2ols0<-lm(formula0,data=m2data,subset=iFull,na.action=na.exclude)
m2ols1<-lm(formula1,data=m2data,subset=iFull,na.action=na.exclude)
m2ols0$varlist<-B4GetVarList(names(m2ols0$model))
m2ols1$varlist<-B4GetVarList(names(m2ols1$model))
summary(m2ols0)
if (r!=5) {
if (avgtimeflag==0) {
# use (drive_time,trans_time)
formula5<-A3Model2_specification0(r)
} else if (avgtimeflag==1) {
# use (avgtime)
formula5<-A3Model2_specification1(r,datastub)
}
m2ols5<-lm(formula5,data=m2data[iFull,],na.action=na.exclude)
m2ols5$varlist<-B4GetVarList(names(m2ols5$model))
summary(m2ols5)
} else if (r==5) {
m2ols5<-m2ols1
}
m2ols0$depvar <- y
m2ols1$depvar <- y
m2ols5$depvar <- y
save(m2ols0,file=paste0(dirs$outdir,"/m2",y,"0.RData"))
save(m2ols1,file=paste0(dirs$outdir,"/m2",y,"1.RData"))
save(m2ols5,file=paste0(dirs$outdir,"/m2",y,"5.RData"))
} # for (y in depvar)
} # for (r in 1:nregs) {
} # loop over c("m11","nondom")
|
# Logistic Regression
# Importing the dataset
load("Social_Network_Ads.RData")
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Purchased ~ .,
family = binomial,
data = training_set)
# Predicting the Test set results
prob_pred = predict(classifier, type = 'response', newdata = test_set[-3])
y_pred = ifelse(prob_pred > 0.5, 1, 0)
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred > 0.5)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
/Udemy-Machine Learning A-Z/logistic.regression-raul.R
|
no_license
|
getachew67/Data-Science-using-R
|
R
| false
| false
| 2,484
|
r
|
# Logistic Regression
# Importing the dataset
load("Social_Network_Ads.RData")
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Purchased ~ .,
family = binomial,
data = training_set)
# Predicting the Test set results
prob_pred = predict(classifier, type = 'response', newdata = test_set[-3])
y_pred = ifelse(prob_pred > 0.5, 1, 0)
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred > 0.5)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codebuild_operations.R
\name{codebuild_start_build}
\alias{codebuild_start_build}
\title{Starts running a build}
\usage{
codebuild_start_build(projectName, secondarySourcesOverride,
secondarySourcesVersionOverride, sourceVersion, artifactsOverride,
secondaryArtifactsOverride, environmentVariablesOverride,
sourceTypeOverride, sourceLocationOverride, sourceAuthOverride,
gitCloneDepthOverride, gitSubmodulesConfigOverride, buildspecOverride,
insecureSslOverride, reportBuildStatusOverride,
buildStatusConfigOverride, environmentTypeOverride, imageOverride,
computeTypeOverride, certificateOverride, cacheOverride,
serviceRoleOverride, privilegedModeOverride, timeoutInMinutesOverride,
queuedTimeoutInMinutesOverride, encryptionKeyOverride, idempotencyToken,
logsConfigOverride, registryCredentialOverride,
imagePullCredentialsTypeOverride, debugSessionEnabled)
}
\arguments{
\item{projectName}{[required] The name of the AWS CodeBuild build project to start running a build.}
\item{secondarySourcesOverride}{An array of \code{ProjectSource} objects.}
\item{secondarySourcesVersionOverride}{An array of \code{ProjectSourceVersion} objects that specify one or more
versions of the project's secondary sources to be used for this build
only.}
\item{sourceVersion}{The version of the build input to be built, for this build only. If not
specified, the latest version is used. If specified, the contents
depends on the source provider:
\subsection{AWS CodeCommit}{
The commit ID, branch, or Git tag to use.
}
\subsection{GitHub}{
The commit ID, pull request ID, branch name, or tag name that
corresponds to the version of the source code you want to build. If a
pull request ID is specified, it must use the format
\code{pr/pull-request-ID} (for example \code{pr/25}). If a branch name is
specified, the branch's HEAD commit ID is used. If not specified, the
default branch's HEAD commit ID is used.
}
\subsection{Bitbucket}{
The commit ID, branch name, or tag name that corresponds to the version
of the source code you want to build. If a branch name is specified, the
branch's HEAD commit ID is used. If not specified, the default branch's
HEAD commit ID is used.
}
\subsection{Amazon Simple Storage Service (Amazon S3)}{
The version ID of the object that represents the build input ZIP file to
use.
If \code{sourceVersion} is specified at the project level, then this
\code{sourceVersion} (at the build level) takes precedence.
For more information, see \href{https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html}{Source Version Sample with CodeBuild}
in the \emph{AWS CodeBuild User Guide}.
}}
\item{artifactsOverride}{Build output artifact settings that override, for this build only, the
latest ones already defined in the build project.}
\item{secondaryArtifactsOverride}{An array of \code{ProjectArtifacts} objects.}
\item{environmentVariablesOverride}{A set of environment variables that overrides, for this build only, the
latest ones already defined in the build project.}
\item{sourceTypeOverride}{A source input type, for this build, that overrides the source input
defined in the build project.}
\item{sourceLocationOverride}{A location that overrides, for this build, the source location for the
one defined in the build project.}
\item{sourceAuthOverride}{An authorization type for this build that overrides the one defined in
the build project. This override applies only if the build project's
source is BitBucket or GitHub.}
\item{gitCloneDepthOverride}{The user-defined depth of history, with a minimum value of 0, that
overrides, for this build only, any previous depth of history defined in
the build project.}
\item{gitSubmodulesConfigOverride}{Information about the Git submodules configuration for this build of an
AWS CodeBuild build project.}
\item{buildspecOverride}{A buildspec file declaration that overrides, for this build only, the
latest one already defined in the build project.
If this value is set, it can be either an inline buildspec definition,
the path to an alternate buildspec file relative to the value of the
built-in \code{CODEBUILD_SRC_DIR} environment variable, or the path to an S3
bucket. The bucket must be in the same AWS Region as the build project.
Specify the buildspec file using its ARN (for example,
\code{arn:aws:s3:::my-codebuild-sample2/buildspec.yml}). If this value is not
provided or is set to an empty string, the source code must contain a
buildspec file in its root directory. For more information, see
\href{https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-name-storage}{Buildspec File Name and Storage Location}.}
\item{insecureSslOverride}{Enable this flag to override the insecure SSL setting that is specified
in the build project. The insecure SSL setting determines whether to
ignore SSL warnings while connecting to the project source code. This
override applies only if the build's source is GitHub Enterprise.}
\item{reportBuildStatusOverride}{Set to true to report to your source provider the status of a build's
start and completion. If you use this option with a source provider
other than GitHub, GitHub Enterprise, or Bitbucket, an
invalidInputException is thrown.
The status of a build triggered by a webhook is always reported to your
source provider.}
\item{buildStatusConfigOverride}{Contains information that defines how the build project reports the
build status to the source provider. This option is only used when the
source provider is \code{GITHUB}, \code{GITHUB_ENTERPRISE}, or \code{BITBUCKET}.}
\item{environmentTypeOverride}{A container type for this build that overrides the one specified in the
build project.}
\item{imageOverride}{The name of an image for this build that overrides the one specified in
the build project.}
\item{computeTypeOverride}{The name of a compute type for this build that overrides the one
specified in the build project.}
\item{certificateOverride}{The name of a certificate for this build that overrides the one
specified in the build project.}
\item{cacheOverride}{A ProjectCache object specified for this build that overrides the one
defined in the build project.}
\item{serviceRoleOverride}{The name of a service role for this build that overrides the one
specified in the build project.}
\item{privilegedModeOverride}{Enable this flag to override privileged mode in the build project.}
\item{timeoutInMinutesOverride}{The number of build timeout minutes, from 5 to 480 (8 hours), that
overrides, for this build only, the latest setting already defined in
the build project.}
\item{queuedTimeoutInMinutesOverride}{The number of minutes a build is allowed to be queued before it times
out.}
\item{encryptionKeyOverride}{The AWS Key Management Service (AWS KMS) customer master key (CMK) that
overrides the one specified in the build project. The CMK key encrypts
the build output artifacts.
You can use a cross-account KMS key to encrypt the build output
artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if
available, the CMK's alias (using the format
\verb{alias/<alias-name>}).}
\item{idempotencyToken}{A unique, case sensitive identifier you provide to ensure the
idempotency of the StartBuild request. The token is included in the
StartBuild request and is valid for 5 minutes. If you repeat the
StartBuild request with the same token, but change a parameter, AWS
CodeBuild returns a parameter mismatch error.}
\item{logsConfigOverride}{Log settings for this build that override the log settings defined in
the build project.}
\item{registryCredentialOverride}{The credentials for access to a private registry.}
\item{imagePullCredentialsTypeOverride}{The type of credentials AWS CodeBuild uses to pull images in your build.
There are two valid values:
\subsection{CODEBUILD}{
Specifies that AWS CodeBuild uses its own credentials. This requires
that you modify your ECR repository policy to trust AWS CodeBuild's
service principal.
}
\subsection{SERVICE\\_ROLE}{
Specifies that AWS CodeBuild uses your build project's service role.
When using a cross-account or private registry image, you must use
\code{SERVICE_ROLE} credentials. When using an AWS CodeBuild curated image,
you must use \code{CODEBUILD} credentials.
}}
\item{debugSessionEnabled}{Specifies if session debugging is enabled for this build. For more
information, see \href{https://docs.aws.amazon.com/codebuild/latest/userguide/session-manager.html}{Viewing a running build in Session Manager}.}
}
\description{
Starts running a build.
}
\section{Request syntax}{
\preformatted{svc$start_build(
projectName = "string",
secondarySourcesOverride = list(
list(
type = "CODECOMMIT"|"CODEPIPELINE"|"GITHUB"|"S3"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"NO_SOURCE",
location = "string",
gitCloneDepth = 123,
gitSubmodulesConfig = list(
fetchSubmodules = TRUE|FALSE
),
buildspec = "string",
auth = list(
type = "OAUTH",
resource = "string"
),
reportBuildStatus = TRUE|FALSE,
buildStatusConfig = list(
context = "string",
targetUrl = "string"
),
insecureSsl = TRUE|FALSE,
sourceIdentifier = "string"
)
),
secondarySourcesVersionOverride = list(
list(
sourceIdentifier = "string",
sourceVersion = "string"
)
),
sourceVersion = "string",
artifactsOverride = list(
type = "CODEPIPELINE"|"S3"|"NO_ARTIFACTS",
location = "string",
path = "string",
namespaceType = "NONE"|"BUILD_ID",
name = "string",
packaging = "NONE"|"ZIP",
overrideArtifactName = TRUE|FALSE,
encryptionDisabled = TRUE|FALSE,
artifactIdentifier = "string"
),
secondaryArtifactsOverride = list(
list(
type = "CODEPIPELINE"|"S3"|"NO_ARTIFACTS",
location = "string",
path = "string",
namespaceType = "NONE"|"BUILD_ID",
name = "string",
packaging = "NONE"|"ZIP",
overrideArtifactName = TRUE|FALSE,
encryptionDisabled = TRUE|FALSE,
artifactIdentifier = "string"
)
),
environmentVariablesOverride = list(
list(
name = "string",
value = "string",
type = "PLAINTEXT"|"PARAMETER_STORE"|"SECRETS_MANAGER"
)
),
sourceTypeOverride = "CODECOMMIT"|"CODEPIPELINE"|"GITHUB"|"S3"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"NO_SOURCE",
sourceLocationOverride = "string",
sourceAuthOverride = list(
type = "OAUTH",
resource = "string"
),
gitCloneDepthOverride = 123,
gitSubmodulesConfigOverride = list(
fetchSubmodules = TRUE|FALSE
),
buildspecOverride = "string",
insecureSslOverride = TRUE|FALSE,
reportBuildStatusOverride = TRUE|FALSE,
buildStatusConfigOverride = list(
context = "string",
targetUrl = "string"
),
environmentTypeOverride = "WINDOWS_CONTAINER"|"LINUX_CONTAINER"|"LINUX_GPU_CONTAINER"|"ARM_CONTAINER"|"WINDOWS_SERVER_2019_CONTAINER",
imageOverride = "string",
computeTypeOverride = "BUILD_GENERAL1_SMALL"|"BUILD_GENERAL1_MEDIUM"|"BUILD_GENERAL1_LARGE"|"BUILD_GENERAL1_2XLARGE",
certificateOverride = "string",
cacheOverride = list(
type = "NO_CACHE"|"S3"|"LOCAL",
location = "string",
modes = list(
"LOCAL_DOCKER_LAYER_CACHE"|"LOCAL_SOURCE_CACHE"|"LOCAL_CUSTOM_CACHE"
)
),
serviceRoleOverride = "string",
privilegedModeOverride = TRUE|FALSE,
timeoutInMinutesOverride = 123,
queuedTimeoutInMinutesOverride = 123,
encryptionKeyOverride = "string",
idempotencyToken = "string",
logsConfigOverride = list(
cloudWatchLogs = list(
status = "ENABLED"|"DISABLED",
groupName = "string",
streamName = "string"
),
s3Logs = list(
status = "ENABLED"|"DISABLED",
location = "string",
encryptionDisabled = TRUE|FALSE
)
),
registryCredentialOverride = list(
credential = "string",
credentialProvider = "SECRETS_MANAGER"
),
imagePullCredentialsTypeOverride = "CODEBUILD"|"SERVICE_ROLE",
debugSessionEnabled = TRUE|FALSE
)
}
}
\keyword{internal}
|
/cran/paws.developer.tools/man/codebuild_start_build.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 12,218
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codebuild_operations.R
\name{codebuild_start_build}
\alias{codebuild_start_build}
\title{Starts running a build}
\usage{
codebuild_start_build(projectName, secondarySourcesOverride,
secondarySourcesVersionOverride, sourceVersion, artifactsOverride,
secondaryArtifactsOverride, environmentVariablesOverride,
sourceTypeOverride, sourceLocationOverride, sourceAuthOverride,
gitCloneDepthOverride, gitSubmodulesConfigOverride, buildspecOverride,
insecureSslOverride, reportBuildStatusOverride,
buildStatusConfigOverride, environmentTypeOverride, imageOverride,
computeTypeOverride, certificateOverride, cacheOverride,
serviceRoleOverride, privilegedModeOverride, timeoutInMinutesOverride,
queuedTimeoutInMinutesOverride, encryptionKeyOverride, idempotencyToken,
logsConfigOverride, registryCredentialOverride,
imagePullCredentialsTypeOverride, debugSessionEnabled)
}
\arguments{
\item{projectName}{[required] The name of the AWS CodeBuild build project to start running a build.}
\item{secondarySourcesOverride}{An array of \code{ProjectSource} objects.}
\item{secondarySourcesVersionOverride}{An array of \code{ProjectSourceVersion} objects that specify one or more
versions of the project's secondary sources to be used for this build
only.}
\item{sourceVersion}{The version of the build input to be built, for this build only. If not
specified, the latest version is used. If specified, the contents
depends on the source provider:
\subsection{AWS CodeCommit}{
The commit ID, branch, or Git tag to use.
}
\subsection{GitHub}{
The commit ID, pull request ID, branch name, or tag name that
corresponds to the version of the source code you want to build. If a
pull request ID is specified, it must use the format
\code{pr/pull-request-ID} (for example \code{pr/25}). If a branch name is
specified, the branch's HEAD commit ID is used. If not specified, the
default branch's HEAD commit ID is used.
}
\subsection{Bitbucket}{
The commit ID, branch name, or tag name that corresponds to the version
of the source code you want to build. If a branch name is specified, the
branch's HEAD commit ID is used. If not specified, the default branch's
HEAD commit ID is used.
}
\subsection{Amazon Simple Storage Service (Amazon S3)}{
The version ID of the object that represents the build input ZIP file to
use.
If \code{sourceVersion} is specified at the project level, then this
\code{sourceVersion} (at the build level) takes precedence.
For more information, see \href{https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html}{Source Version Sample with CodeBuild}
in the \emph{AWS CodeBuild User Guide}.
}}
\item{artifactsOverride}{Build output artifact settings that override, for this build only, the
latest ones already defined in the build project.}
\item{secondaryArtifactsOverride}{An array of \code{ProjectArtifacts} objects.}
\item{environmentVariablesOverride}{A set of environment variables that overrides, for this build only, the
latest ones already defined in the build project.}
\item{sourceTypeOverride}{A source input type, for this build, that overrides the source input
defined in the build project.}
\item{sourceLocationOverride}{A location that overrides, for this build, the source location for the
one defined in the build project.}
\item{sourceAuthOverride}{An authorization type for this build that overrides the one defined in
the build project. This override applies only if the build project's
source is BitBucket or GitHub.}
\item{gitCloneDepthOverride}{The user-defined depth of history, with a minimum value of 0, that
overrides, for this build only, any previous depth of history defined in
the build project.}
\item{gitSubmodulesConfigOverride}{Information about the Git submodules configuration for this build of an
AWS CodeBuild build project.}
\item{buildspecOverride}{A buildspec file declaration that overrides, for this build only, the
latest one already defined in the build project.
If this value is set, it can be either an inline buildspec definition,
the path to an alternate buildspec file relative to the value of the
built-in \code{CODEBUILD_SRC_DIR} environment variable, or the path to an S3
bucket. The bucket must be in the same AWS Region as the build project.
Specify the buildspec file using its ARN (for example,
\code{arn:aws:s3:::my-codebuild-sample2/buildspec.yml}). If this value is not
provided or is set to an empty string, the source code must contain a
buildspec file in its root directory. For more information, see
\href{https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-name-storage}{Buildspec File Name and Storage Location}.}
\item{insecureSslOverride}{Enable this flag to override the insecure SSL setting that is specified
in the build project. The insecure SSL setting determines whether to
ignore SSL warnings while connecting to the project source code. This
override applies only if the build's source is GitHub Enterprise.}
\item{reportBuildStatusOverride}{Set to true to report to your source provider the status of a build's
start and completion. If you use this option with a source provider
other than GitHub, GitHub Enterprise, or Bitbucket, an
invalidInputException is thrown.
The status of a build triggered by a webhook is always reported to your
source provider.}
\item{buildStatusConfigOverride}{Contains information that defines how the build project reports the
build status to the source provider. This option is only used when the
source provider is \code{GITHUB}, \code{GITHUB_ENTERPRISE}, or \code{BITBUCKET}.}
\item{environmentTypeOverride}{A container type for this build that overrides the one specified in the
build project.}
\item{imageOverride}{The name of an image for this build that overrides the one specified in
the build project.}
\item{computeTypeOverride}{The name of a compute type for this build that overrides the one
specified in the build project.}
\item{certificateOverride}{The name of a certificate for this build that overrides the one
specified in the build project.}
\item{cacheOverride}{A ProjectCache object specified for this build that overrides the one
defined in the build project.}
\item{serviceRoleOverride}{The name of a service role for this build that overrides the one
specified in the build project.}
\item{privilegedModeOverride}{Enable this flag to override privileged mode in the build project.}
\item{timeoutInMinutesOverride}{The number of build timeout minutes, from 5 to 480 (8 hours), that
overrides, for this build only, the latest setting already defined in
the build project.}
\item{queuedTimeoutInMinutesOverride}{The number of minutes a build is allowed to be queued before it times
out.}
\item{encryptionKeyOverride}{The AWS Key Management Service (AWS KMS) customer master key (CMK) that
overrides the one specified in the build project. The CMK key encrypts
the build output artifacts.
You can use a cross-account KMS key to encrypt the build output
artifacts if your service role has permission to that key.
You can specify either the Amazon Resource Name (ARN) of the CMK or, if
available, the CMK's alias (using the format
\verb{alias/<alias-name>}).}
\item{idempotencyToken}{A unique, case sensitive identifier you provide to ensure the
idempotency of the StartBuild request. The token is included in the
StartBuild request and is valid for 5 minutes. If you repeat the
StartBuild request with the same token, but change a parameter, AWS
CodeBuild returns a parameter mismatch error.}
\item{logsConfigOverride}{Log settings for this build that override the log settings defined in
the build project.}
\item{registryCredentialOverride}{The credentials for access to a private registry.}
\item{imagePullCredentialsTypeOverride}{The type of credentials AWS CodeBuild uses to pull images in your build.
There are two valid values:
\subsection{CODEBUILD}{
Specifies that AWS CodeBuild uses its own credentials. This requires
that you modify your ECR repository policy to trust AWS CodeBuild's
service principal.
}
\subsection{SERVICE\\_ROLE}{
Specifies that AWS CodeBuild uses your build project's service role.
When using a cross-account or private registry image, you must use
\code{SERVICE_ROLE} credentials. When using an AWS CodeBuild curated image,
you must use \code{CODEBUILD} credentials.
}}
\item{debugSessionEnabled}{Specifies if session debugging is enabled for this build. For more
information, see \href{https://docs.aws.amazon.com/codebuild/latest/userguide/session-manager.html}{Viewing a running build in Session Manager}.}
}
\description{
Starts running a build.
}
\section{Request syntax}{
\preformatted{svc$start_build(
projectName = "string",
secondarySourcesOverride = list(
list(
type = "CODECOMMIT"|"CODEPIPELINE"|"GITHUB"|"S3"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"NO_SOURCE",
location = "string",
gitCloneDepth = 123,
gitSubmodulesConfig = list(
fetchSubmodules = TRUE|FALSE
),
buildspec = "string",
auth = list(
type = "OAUTH",
resource = "string"
),
reportBuildStatus = TRUE|FALSE,
buildStatusConfig = list(
context = "string",
targetUrl = "string"
),
insecureSsl = TRUE|FALSE,
sourceIdentifier = "string"
)
),
secondarySourcesVersionOverride = list(
list(
sourceIdentifier = "string",
sourceVersion = "string"
)
),
sourceVersion = "string",
artifactsOverride = list(
type = "CODEPIPELINE"|"S3"|"NO_ARTIFACTS",
location = "string",
path = "string",
namespaceType = "NONE"|"BUILD_ID",
name = "string",
packaging = "NONE"|"ZIP",
overrideArtifactName = TRUE|FALSE,
encryptionDisabled = TRUE|FALSE,
artifactIdentifier = "string"
),
secondaryArtifactsOverride = list(
list(
type = "CODEPIPELINE"|"S3"|"NO_ARTIFACTS",
location = "string",
path = "string",
namespaceType = "NONE"|"BUILD_ID",
name = "string",
packaging = "NONE"|"ZIP",
overrideArtifactName = TRUE|FALSE,
encryptionDisabled = TRUE|FALSE,
artifactIdentifier = "string"
)
),
environmentVariablesOverride = list(
list(
name = "string",
value = "string",
type = "PLAINTEXT"|"PARAMETER_STORE"|"SECRETS_MANAGER"
)
),
sourceTypeOverride = "CODECOMMIT"|"CODEPIPELINE"|"GITHUB"|"S3"|"BITBUCKET"|"GITHUB_ENTERPRISE"|"NO_SOURCE",
sourceLocationOverride = "string",
sourceAuthOverride = list(
type = "OAUTH",
resource = "string"
),
gitCloneDepthOverride = 123,
gitSubmodulesConfigOverride = list(
fetchSubmodules = TRUE|FALSE
),
buildspecOverride = "string",
insecureSslOverride = TRUE|FALSE,
reportBuildStatusOverride = TRUE|FALSE,
buildStatusConfigOverride = list(
context = "string",
targetUrl = "string"
),
environmentTypeOverride = "WINDOWS_CONTAINER"|"LINUX_CONTAINER"|"LINUX_GPU_CONTAINER"|"ARM_CONTAINER"|"WINDOWS_SERVER_2019_CONTAINER",
imageOverride = "string",
computeTypeOverride = "BUILD_GENERAL1_SMALL"|"BUILD_GENERAL1_MEDIUM"|"BUILD_GENERAL1_LARGE"|"BUILD_GENERAL1_2XLARGE",
certificateOverride = "string",
cacheOverride = list(
type = "NO_CACHE"|"S3"|"LOCAL",
location = "string",
modes = list(
"LOCAL_DOCKER_LAYER_CACHE"|"LOCAL_SOURCE_CACHE"|"LOCAL_CUSTOM_CACHE"
)
),
serviceRoleOverride = "string",
privilegedModeOverride = TRUE|FALSE,
timeoutInMinutesOverride = 123,
queuedTimeoutInMinutesOverride = 123,
encryptionKeyOverride = "string",
idempotencyToken = "string",
logsConfigOverride = list(
cloudWatchLogs = list(
status = "ENABLED"|"DISABLED",
groupName = "string",
streamName = "string"
),
s3Logs = list(
status = "ENABLED"|"DISABLED",
location = "string",
encryptionDisabled = TRUE|FALSE
)
),
registryCredentialOverride = list(
credential = "string",
credentialProvider = "SECRETS_MANAGER"
),
imagePullCredentialsTypeOverride = "CODEBUILD"|"SERVICE_ROLE",
debugSessionEnabled = TRUE|FALSE
)
}
}
\keyword{internal}
|
# Copyright (C) 2009 - 2012 Dirk Eddelbuettel and Romain Francois
# Copyright (C) 2013 Romain Francois
#
# This file is part of Rcpp11.
#
# Rcpp11 is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Rcpp11 is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rcpp11. If not, see <http://www.gnu.org/licenses/>.
Rcpp.package.skeleton <- function(
name = "anRpackage", list = character(), environment = .GlobalEnv,
path = ".", force = FALSE,
code_files = character(), cpp_files = character(),
example_code = TRUE,
attributes = TRUE,
module = FALSE,
author = "Who wrote it",
maintainer = if(missing( author)) "Who to complain to" else author,
email = "yourfault@somewhere.net",
license = "What Licence is it under ?"
){
if (!is.character(cpp_files))
stop("'cpp_files' must be a character vector")
# set example_code if attributes is set
if( isTRUE(attributes) )
example_code <- TRUE
env <- parent.frame(1)
if( !length(list) ){
fake <- TRUE
assign( "Rcpp.fake.fun", function(){}, envir = env )
if( example_code && !isTRUE(attributes)){
assign( "rcpp_hello_world", function(){}, envir = env )
remove_hello_world <- TRUE
} else {
remove_hello_world <- FALSE
}
} else {
if( ! "rcpp_hello_world" %in% list ){
call[["list"]] <- c( "rcpp_hello_world", call[["list"]] )
remove_hello_world <- TRUE
} else{
remove_hello_world <- FALSE
}
fake <- FALSE
}
# first let the traditional version do its business
call <- match.call()
call[[1]] <- as.name("package.skeleton")
# remove Rcpp specific arguments
call <- call[ c( 1L, which( names(call) %in% names(formals(package.skeleton)))) ]
if( fake ){
call[["list"]] <- c( if( isTRUE(example_code) && !isTRUE(attributes)) "rcpp_hello_world" , "Rcpp.fake.fun" )
}
tryCatch( eval( call, envir = env ), error = function(e){
stop( sprintf( "error while calling `package.skeleton` : %s", conditionMessage(e) ) )
} )
message( "\nAdding Rcpp11 settings" )
# now pick things up
root <- file.path( path, name )
# Add Rcpp to the DESCRIPTION
DESCRIPTION <- file.path( root, "DESCRIPTION" )
if( file.exists( DESCRIPTION ) ){
depends <- c(
if( isTRUE(module) ) "methods",
sprintf( "Rcpp11 (>= %s)", packageDescription("Rcpp11")[["Version"]] )
)
x <- cbind( read.dcf( DESCRIPTION ),
"Depends" = paste( depends, collapse = ", ") ,
"LinkingTo" = "Rcpp11"
)
if( isTRUE( module ) ){
x <- cbind( x, "RcppModules" = "yada, stdVector, NumEx" )
message( " >> added RcppModules: yada" )
}
x[, "Author" ] <- author
x[, "Maintainer" ] <- sprintf( "%s <%s>", maintainer, email )
x[, "License"] <- license
message( " >> added Depends: Rcpp11" )
message( " >> added LinkingTo: Rcpp11" )
write.dcf( x, file = DESCRIPTION )
}
# if there is a NAMESPACE, add a useDynLib
NAMESPACE <- file.path( root, "NAMESPACE")
if( file.exists( NAMESPACE ) ){
lines <- readLines( NAMESPACE )
ns <- file( NAMESPACE, open = "w" )
if( ! grepl( "useDynLib", lines ) ){
lines <- c( sprintf( "useDynLib(%s)", name), lines)
writeLines( lines, con = ns )
message( " >> added useDynLib directive to NAMESPACE" )
}
if(isTRUE(module)){
writeLines( 'import( Rcpp11 )', ns )
}
close( ns )
}
# update the package description help page
package_help_page <- file.path( root, "man", sprintf( "%s-package.Rd", name ) )
if( file.exists(package_help_page) ){
lines <- readLines(package_help_page)
lines <- gsub( "What license is it under?", license, lines, fixed = TRUE )
lines <- gsub( "Who to complain to <yourfault@somewhere.net>",
sprintf( "%s <%s>", maintainer, email),
lines,
fixed = TRUE
)
lines <- gsub( "Who wrote it", author, lines, fixed = TRUE )
writeLines( lines, package_help_page )
}
# lay things out in the src directory
src <- file.path( root, "src")
if( !file.exists( src )){
dir.create( src )
}
skeleton <- system.file( "skeleton", package = "Rcpp11" )
if ( length(cpp_files) > 0L ) {
for (file in cpp_files) {
file.copy(file, src)
message( " >> copied ", file, " to src directory" )
}
compileAttributes(root)
}
if( example_code ){
if ( isTRUE( attributes ) ) {
file.copy( file.path( skeleton, "rcpp_hello_world_attributes.cpp" ),
file.path( src, "rcpp_hello_world.cpp" ) )
message( " >> added example src file using Rcpp attributes")
compileAttributes(root)
message( " >> compiled Rcpp attributes")
} else {
header <- readLines( file.path( skeleton, "rcpp_hello_world.h" ) )
header <- gsub( "@PKG@", name, header, fixed = TRUE )
writeLines( header , file.path( src, "rcpp_hello_world.h" ) )
message( " >> added example header file using Rcpp classes")
file.copy( file.path( skeleton, "rcpp_hello_world.cpp" ), src )
message( " >> added example src file using Rcpp classes")
rcode <- readLines( file.path( skeleton, "rcpp_hello_world.R" ) )
rcode <- gsub( "@PKG@", name, rcode, fixed = TRUE )
writeLines( rcode , file.path( root, "R", "rcpp_hello_world.R" ) )
message( " >> added example R file calling the C++ example")
}
hello.Rd <- file.path( root, "man", "rcpp_hello_world.Rd")
unlink( hello.Rd )
file.copy(
system.file("skeleton", "rcpp_hello_world.Rd", package = "Rcpp11" ),
hello.Rd
)
message( " >> added Rd file for rcpp_hello_world")
}
if( isTRUE( module ) ){
file.copy(system.file( "skeleton", "rcpp_module.cpp", package = "Rcpp11" ), file.path( root, "src" ))
file.copy(system.file( "skeleton", "Num.cpp", package = "Rcpp11" ), file.path( root, "src" ))
file.copy(system.file( "skeleton", "stdVector.cpp", package = "Rcpp11" ), file.path( root, "src" ))
file.copy(system.file( "skeleton", "zzz.R", package = "Rcpp11" ), file.path( root, "R" ))
message( " >> copied the example module file " )
}
lines <- readLines( package.doc <- file.path( root, "man", sprintf( "%s-package.Rd", name ) ) )
lines <- sub( "~~ simple examples", "%% ~~ simple examples", lines )
lines <- lines[ !grepl( "~~ package title", lines) ]
lines <- lines[ !grepl( "~~ The author and", lines) ]
lines <- sub( "Who wrote it", author, lines )
lines <- sub( "Who to complain to.*", sprintf( "%s <%s>", maintainer, email), lines )
writeLines( lines, package.doc )
if( fake ){
rm( "Rcpp.fake.fun", envir = env )
unlink( file.path( root, "R" , "Rcpp.fake.fun.R" ) )
unlink( file.path( root, "man", "Rcpp.fake.fun.Rd" ) )
}
if( isTRUE(remove_hello_world) ){
rm( "rcpp_hello_world", envir = env )
}
invisible( NULL )
}
|
/R/Rcpp.package.skeleton.R
|
no_license
|
abelxie/Rcpp11
|
R
| false
| false
| 7,025
|
r
|
# Copyright (C) 2009 - 2012 Dirk Eddelbuettel and Romain Francois
# Copyright (C) 2013 Romain Francois
#
# This file is part of Rcpp11.
#
# Rcpp11 is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Rcpp11 is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rcpp11. If not, see <http://www.gnu.org/licenses/>.
Rcpp.package.skeleton <- function(
name = "anRpackage", list = character(), environment = .GlobalEnv,
path = ".", force = FALSE,
code_files = character(), cpp_files = character(),
example_code = TRUE,
attributes = TRUE,
module = FALSE,
author = "Who wrote it",
maintainer = if(missing( author)) "Who to complain to" else author,
email = "yourfault@somewhere.net",
license = "What Licence is it under ?"
){
if (!is.character(cpp_files))
stop("'cpp_files' must be a character vector")
# set example_code if attributes is set
if( isTRUE(attributes) )
example_code <- TRUE
env <- parent.frame(1)
if( !length(list) ){
fake <- TRUE
assign( "Rcpp.fake.fun", function(){}, envir = env )
if( example_code && !isTRUE(attributes)){
assign( "rcpp_hello_world", function(){}, envir = env )
remove_hello_world <- TRUE
} else {
remove_hello_world <- FALSE
}
} else {
if( ! "rcpp_hello_world" %in% list ){
call[["list"]] <- c( "rcpp_hello_world", call[["list"]] )
remove_hello_world <- TRUE
} else{
remove_hello_world <- FALSE
}
fake <- FALSE
}
# first let the traditional version do its business
call <- match.call()
call[[1]] <- as.name("package.skeleton")
# remove Rcpp specific arguments
call <- call[ c( 1L, which( names(call) %in% names(formals(package.skeleton)))) ]
if( fake ){
call[["list"]] <- c( if( isTRUE(example_code) && !isTRUE(attributes)) "rcpp_hello_world" , "Rcpp.fake.fun" )
}
tryCatch( eval( call, envir = env ), error = function(e){
stop( sprintf( "error while calling `package.skeleton` : %s", conditionMessage(e) ) )
} )
message( "\nAdding Rcpp11 settings" )
# now pick things up
root <- file.path( path, name )
# Add Rcpp to the DESCRIPTION
DESCRIPTION <- file.path( root, "DESCRIPTION" )
if( file.exists( DESCRIPTION ) ){
depends <- c(
if( isTRUE(module) ) "methods",
sprintf( "Rcpp11 (>= %s)", packageDescription("Rcpp11")[["Version"]] )
)
x <- cbind( read.dcf( DESCRIPTION ),
"Depends" = paste( depends, collapse = ", ") ,
"LinkingTo" = "Rcpp11"
)
if( isTRUE( module ) ){
x <- cbind( x, "RcppModules" = "yada, stdVector, NumEx" )
message( " >> added RcppModules: yada" )
}
x[, "Author" ] <- author
x[, "Maintainer" ] <- sprintf( "%s <%s>", maintainer, email )
x[, "License"] <- license
message( " >> added Depends: Rcpp11" )
message( " >> added LinkingTo: Rcpp11" )
write.dcf( x, file = DESCRIPTION )
}
# if there is a NAMESPACE, add a useDynLib
NAMESPACE <- file.path( root, "NAMESPACE")
if( file.exists( NAMESPACE ) ){
lines <- readLines( NAMESPACE )
ns <- file( NAMESPACE, open = "w" )
if( ! grepl( "useDynLib", lines ) ){
lines <- c( sprintf( "useDynLib(%s)", name), lines)
writeLines( lines, con = ns )
message( " >> added useDynLib directive to NAMESPACE" )
}
if(isTRUE(module)){
writeLines( 'import( Rcpp11 )', ns )
}
close( ns )
}
# update the package description help page
package_help_page <- file.path( root, "man", sprintf( "%s-package.Rd", name ) )
if( file.exists(package_help_page) ){
lines <- readLines(package_help_page)
lines <- gsub( "What license is it under?", license, lines, fixed = TRUE )
lines <- gsub( "Who to complain to <yourfault@somewhere.net>",
sprintf( "%s <%s>", maintainer, email),
lines,
fixed = TRUE
)
lines <- gsub( "Who wrote it", author, lines, fixed = TRUE )
writeLines( lines, package_help_page )
}
# lay things out in the src directory
src <- file.path( root, "src")
if( !file.exists( src )){
dir.create( src )
}
skeleton <- system.file( "skeleton", package = "Rcpp11" )
if ( length(cpp_files) > 0L ) {
for (file in cpp_files) {
file.copy(file, src)
message( " >> copied ", file, " to src directory" )
}
compileAttributes(root)
}
if( example_code ){
if ( isTRUE( attributes ) ) {
file.copy( file.path( skeleton, "rcpp_hello_world_attributes.cpp" ),
file.path( src, "rcpp_hello_world.cpp" ) )
message( " >> added example src file using Rcpp attributes")
compileAttributes(root)
message( " >> compiled Rcpp attributes")
} else {
header <- readLines( file.path( skeleton, "rcpp_hello_world.h" ) )
header <- gsub( "@PKG@", name, header, fixed = TRUE )
writeLines( header , file.path( src, "rcpp_hello_world.h" ) )
message( " >> added example header file using Rcpp classes")
file.copy( file.path( skeleton, "rcpp_hello_world.cpp" ), src )
message( " >> added example src file using Rcpp classes")
rcode <- readLines( file.path( skeleton, "rcpp_hello_world.R" ) )
rcode <- gsub( "@PKG@", name, rcode, fixed = TRUE )
writeLines( rcode , file.path( root, "R", "rcpp_hello_world.R" ) )
message( " >> added example R file calling the C++ example")
}
hello.Rd <- file.path( root, "man", "rcpp_hello_world.Rd")
unlink( hello.Rd )
file.copy(
system.file("skeleton", "rcpp_hello_world.Rd", package = "Rcpp11" ),
hello.Rd
)
message( " >> added Rd file for rcpp_hello_world")
}
if( isTRUE( module ) ){
file.copy(system.file( "skeleton", "rcpp_module.cpp", package = "Rcpp11" ), file.path( root, "src" ))
file.copy(system.file( "skeleton", "Num.cpp", package = "Rcpp11" ), file.path( root, "src" ))
file.copy(system.file( "skeleton", "stdVector.cpp", package = "Rcpp11" ), file.path( root, "src" ))
file.copy(system.file( "skeleton", "zzz.R", package = "Rcpp11" ), file.path( root, "R" ))
message( " >> copied the example module file " )
}
lines <- readLines( package.doc <- file.path( root, "man", sprintf( "%s-package.Rd", name ) ) )
lines <- sub( "~~ simple examples", "%% ~~ simple examples", lines )
lines <- lines[ !grepl( "~~ package title", lines) ]
lines <- lines[ !grepl( "~~ The author and", lines) ]
lines <- sub( "Who wrote it", author, lines )
lines <- sub( "Who to complain to.*", sprintf( "%s <%s>", maintainer, email), lines )
writeLines( lines, package.doc )
if( fake ){
rm( "Rcpp.fake.fun", envir = env )
unlink( file.path( root, "R" , "Rcpp.fake.fun.R" ) )
unlink( file.path( root, "man", "Rcpp.fake.fun.Rd" ) )
}
if( isTRUE(remove_hello_world) ){
rm( "rcpp_hello_world", envir = env )
}
invisible( NULL )
}
|
library(ggplot2)
library(ggformula)
library(transmem)
PDF <- FALSE
if (PDF) pdf("Perfiles23-09-19.pdf", height = 7/1.8, width = 9/1.8)
#-----STOCK SOLUTIONS--------------------------------------------------------
StockLi.200_2 <- 130.3 * 0.187872 * 0.99 / 0.1205105
StockNa.11000 <- 1.1693 * 0.996 /41.5065 * 0.393372 * 1000000
StockLi.5_6 <- StockLi.200_2 * 1.2650 / 50.0864
StockNa.600_2 <- StockNa.11000 * 1.6605 / 30.0755
StockNa.10_3 <- StockNa.600_2 * 0.6065 / 30.0068
#-----CURVAS DE CALIBRACIÓN--------------------------------------------------
CalCurves <- list(
Lithium.P = data.frame(Conc = c(0.0000, 0.0566, 0.0573, 0.1302, 0.1222, 0.1264, 0.2505, 0.2676, 0.6035, 0.6022,
1.2167, 1.2060, 1.2341, 2.4143, 2.4166, 2.6897, 2.6934, 2.6938) *
StockLi.5_6 / c(6.0000, 6.1509, 6.0088, 6.0399, 6.0856, 6.0786, 6.0121, 6.0258,
6.0866, 6.0290, 6.0289, 6.0364, 6.0655, 6.0202, 6.0293, 6.0689,
6.0541, 6.1592),
Signal = c(0.000, 0.007, 0.007, 0.015, 0.016, 0.017, 0.032, 0.035, 0.075, 0.076,
0.142, 0.147, 0.154, 0.293, 0.296, 0.316, 0.323, 0.310),
Conc.S = c(0.0000, 0.2770, 1.5102, 0.0000, 0.5191, 2.0127, 0.5132, 1.5121, 0.5081, 1.6378,
0.0000, 0.9990, 2.0486, 0.2315, 1.5022, 0.0000, 0.5067, 2.0409) *
StockNa.600_2 / c(6.0000, 6.1509, 6.0088, 6.0399, 6.0856, 6.0786, 6.0121, 6.0258,
6.0866, 6.0290, 6.0289, 6.0364, 6.0655, 6.0202, 6.0293, 6.0689,
6.0541, 6.1592)),
Sodium.1 = data.frame(Conc = c(0.0000, 0.0672, 0.1321, 0.3215, 0.6450, 1.5131, 3.0879, 4.1388) *
StockNa.10_3 / c(6.0000, 6.0089, 6.3138, 6.1288, 6.3744, 6.0450, 6.0895, 6.3559),
Signal = c(0.000, 0.028, 0.048, 0.099, 0.169, 0.389, 0.751, 0.921))
)
## for a cleaner workspace
#rm(list = ls()[grep("Stock", ls())])
#-----MODELOS DE LAS CURVAS--------------------------------------------------
CalModels <- list(
Lithium.P = calibPlane(plane = CalCurves$Lithium.P),
Sodium.1 = calibCurve(curve = CalCurves$Sodium.1, order = 2)
)
anova(CalModels$Lithium.P$model)
summary(CalModels$Lithium.P$model)
#-----MUESTRAS CIEGAS--------------------------------------------------------
BlindeP <- data.frame(LiRe = c(1.0245, 0.4836) * StockLi.5_6 /
c(6.1086, 6.1369),
LiSg = c(0.126, 0.060),
NaRe = c(1.0255, 0.2008) * StockNa.600_2 /
c(6.1086, 6.1369))
BlindeP$LiIn <- signal2conc(signal = BlindeP$LiSg, model = CalModels$Lithium.P, planar = TRUE,
Conc.S = BlindeP$NaRe)
plot(x = BlindeP$LiRe, y = BlindeP$LiIn)
abline(a = 0, b = 1, col = 2, lty = 3)
abline(lm(BlindeP$LiIn ~ BlindeP$LiRe))
summary(lm(BlindeP$LiIn ~ BlindeP$LiRe))
t.test(x = BlindeP$LiIn, y = BlindeP$LiRe, paired = TRUE)
#-----TIEMPOS DE LA TOMA DE ALÍCUOTAS----------------------------------------
AliTimes <- list (
T.16.9a = c(0, 1, 2, 3, 4, 5),
T.16.9b = c(0, 1, 2, 3, 4, 5)
)
ts <- c(1, 3, 6)
#-----FACTOR DE DILUCIÓN DE LAS MUESTRAS-------------------------------------
dilutions <- list(
Feed.16.9a = c(2.0335/0.0503, 2.0315/0.0497, 2.0061/0.0509),
Strip.16.9a = c(1.0352/0.3222, 1.0493/0.3237, 1.0363/0.3233),
Feed.16.9b = c(2.0350/0.0493, 2.0232/0.0495, 2.0237/0.0502),
Strip.16.9b = c(1.0320/0.3211, 1.0383/0.3224, 1.0384/0.3235)
)
#-----ABSORBANCIAS DE LAS ALÍCUOTAS------------------------------------------
AliAbs <- list(
Feed.16.9.Li.a = c(0.318, 0.170, 0.103, 0.064, 0.041, 0.027),
Strip.16.9.Li.a = c(0.000, 0.138, 0.202, 0.243, 0.263, 0.277),
Feed.16.9.Na.a = c(0.317, 0.301, 0.306),
Strip.16.9.Na.a = c(0.026, 0.058, 0.091),
Feed.16.9.Li.b = c(0.314, 0.184, 0.124, 0.082, 0.057, 0.040),
Strip.16.9.Li.b = c(0.002, 0.117, 0.180, 0.221, 0.246, 0.260),
Feed.16.9.Na.b = c(0.324, 0.298, 0.309),
Strip.16.9.Na.b = c(0.074, 0.058, 0.106)
)
#-----CONCENTRACIÓN DE ESPECIES EN LAS ALÍCUOTAS-----------------------------
AliConc <- vector(mode = "list", length = length(AliAbs))
names(AliConc) <- names(AliAbs)
for (i in 1:(length(AliConc)/4)) {
#Feed sodium
AliConc[[4*i-1]] <- signal2conc(signal = AliAbs[[4*i-1]], model = CalModels$Sodium.1,
dilution = dilutions[[2*i-1]])
#Strip sodium
AliConc[[4*i]] <- signal2conc(signal = AliAbs[[4*i]], model = CalModels$Sodium.1,
dilution = dilutions[[2*i]])
#Feed lithium
AliConc[[4*i-3]] <- signal2conc(signal = AliAbs[[4*i-3]], model = CalModels$Lithium.P, planar = TRUE,
Conc.S = fixSecondary(conc = AliConc[[4*i-1]],
time = AliTimes[[i]][ts], compTime = AliTimes[[i]],
order = 2))
#Strip litium
AliConc[[4*i-2]] <- signal2conc(signal = AliAbs[[4*i-2]], model = CalModels$Lithium.P, planar = TRUE,
Conc.S = fixSecondary(conc = AliConc[[4*i]],
time = AliTimes[[i]][ts], compTime = AliTimes[[i]],
order = 2))
}
#-----CONCENTRACIONES A FRACCIONES-------------------------------------------
TransFrac <- vector(mode = "list", length = length(AliConc)/2)
names(TransFrac) <- paste0(rep(c("Lithium.", "Sodium."), length(TransFrac)/2),
rep(c("0a", "0b"), each = 2))
for (i in 1:(length(TransFrac)/2)) {
#Lithium
TransFrac[[i*2-1]] <- conc2frac(feed = AliConc[[4*i-3]], strip = AliConc[[4*i-2]], time = AliTimes[[i]])
#Sodium
TransFrac[[i*2]] <- conc2frac(feed = AliConc[[4*i-1]], strip = AliConc[[4*i]], time = AliTimes[[i]][ts])
}
#-----MODELOS DE REGRESIÓN NO LINEAL-----------------------------------------
TransNLS <- vector(mode = "list", length = length(TransFrac)/2)
names(TransNLS) <- names(TransFrac)[seq(from = 1, to = length(TransFrac), by = 2)]
SS_par <- vector()
for (i in 1:length(TransNLS)) {
TransNLS[[i]] <- transTrend(TransFrac[[2*i-1]], model = 'paredes', eccen = 1)
SS_par <- c(SS_par, sum(resid(TransNLS[[i]]$feed)^2), sum(resid(TransNLS[[i]]$strip)^2))
}
TransNLSXot <- vector(mode = "list", length = length(TransFrac)/2)
names(TransNLSXot) <- names(TransFrac)[seq(from = 1, to = length(TransFrac), by = 2)]
SS_xot <- vector()
for (i in 1:length(TransNLSXot)) {
TransNLSXot[[i]] <- transTrend(TransFrac[[2*i-1]], model = 'rodriguez')
SS_xot <- c(SS_xot, sum(resid(TransNLSXot[[i]]$feed)^2), sum(resid(TransNLSXot[[i]]$strip)^2))
}
t.test(x = SS_par, y = SS_xot, paired = TRUE)
plot(SS_par, SS_xot)
abline(lm(SS_xot~SS_par))
lm(SS_xot~SS_par)
#-----FACTORES DE SEPARACIÓN-------------------------------------------------
sepFactor <- vector(mode = "list", length = length(TransFrac)/2)
names(sepFactor) <- names(TransNLS)
for (i in 1:length(sepFactor)) {
sec <- fixSecondary(conc = AliConc[[4*i]], time = AliTimes[[i]][ts], compTime = AliTimes[[i]], order = 2)
X <- data.frame(time = AliTimes[[i]],
factor = (AliConc[[i*4-2]]/sec) / (AliConc[[i*4-3]][1]/AliConc[[i*4-1]][1]))
#X$factor[1] <- 1
X <- X[-1, ]
sepFactor[[i]] <- X
}
ssepFactor <- data.frame()
for (i in 1:length(sepFactor)) ssepFactor <- rbind(ssepFactor, sepFactor[[i]])
ssepFactor$Membrana <- as.factor(paste0("Mem.", rep(c("0a", "0b"), each = 5)))
ggplot(data = ssepFactor, aes(x = time, y = factor, colour = Membrana)) + geom_point() + theme_bw() +
ggsci::scale_color_npg() + stat_smooth(method = "lm", formula = y ~ poly(x, 2), se = FALSE, size = 0.4) +
xlab(label = "Tiempo (horas)") + ylab(label = "Factor de separación")
sF <- vector()
for (i in 1:length(sepFactor)) sF <- c(sF, mean(sepFactor[[i]][, 2]))
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
#-----PERFILES DE TRANSPORTE ------------------------------------------------
for (i in 1:1) {
(p <- transPlotWR(trans = list(TransFrac[[4*i-3]], TransFrac[[4*i-1]]),
trend = list(TransNLS[[2*i-1]], TransNLS[[2*i]]),
secondary = list(TransFrac[[4*i-2]], TransFrac[[4*i]]),
lin.secon = TRUE, xlim = c(0, 5.2), ylim = c(-0.01, 1.01),
ybreaks = c(0, 0.20, 0.40, 0.60, 0.80, 1), xbreaks = 1:5, xlab = 'Tiempo (h)', bw = TRUE, srs = 0.5))
}
# invisible(readline(prompt="Press [enter] to continue"))
#-----PARÁMETROS DE DESEMPEÑO------------------------------------------------
Parameters <- data.frame()
j = 0
for (i in 1:2) {
Parameters <- rbind(Parameters, c(TransNLS[[i]]$Result, sF[i], TransFrac[[2*i-1]][12, 3]))
}
colnames(Parameters) <- c(names(TransNLS[[1]]$Result), "sF")
round(Parameters, 3)
if (PDF) dev.off()
|
/19-09-Simplex-2/19-09-23-LiNa-Mem16_9.R
|
no_license
|
Crparedes/master-data-treatment
|
R
| false
| false
| 9,092
|
r
|
library(ggplot2)
library(ggformula)
library(transmem)
PDF <- FALSE
if (PDF) pdf("Perfiles23-09-19.pdf", height = 7/1.8, width = 9/1.8)
#-----STOCK SOLUTIONS--------------------------------------------------------
StockLi.200_2 <- 130.3 * 0.187872 * 0.99 / 0.1205105
StockNa.11000 <- 1.1693 * 0.996 /41.5065 * 0.393372 * 1000000
StockLi.5_6 <- StockLi.200_2 * 1.2650 / 50.0864
StockNa.600_2 <- StockNa.11000 * 1.6605 / 30.0755
StockNa.10_3 <- StockNa.600_2 * 0.6065 / 30.0068
#-----CURVAS DE CALIBRACIÓN--------------------------------------------------
CalCurves <- list(
Lithium.P = data.frame(Conc = c(0.0000, 0.0566, 0.0573, 0.1302, 0.1222, 0.1264, 0.2505, 0.2676, 0.6035, 0.6022,
1.2167, 1.2060, 1.2341, 2.4143, 2.4166, 2.6897, 2.6934, 2.6938) *
StockLi.5_6 / c(6.0000, 6.1509, 6.0088, 6.0399, 6.0856, 6.0786, 6.0121, 6.0258,
6.0866, 6.0290, 6.0289, 6.0364, 6.0655, 6.0202, 6.0293, 6.0689,
6.0541, 6.1592),
Signal = c(0.000, 0.007, 0.007, 0.015, 0.016, 0.017, 0.032, 0.035, 0.075, 0.076,
0.142, 0.147, 0.154, 0.293, 0.296, 0.316, 0.323, 0.310),
Conc.S = c(0.0000, 0.2770, 1.5102, 0.0000, 0.5191, 2.0127, 0.5132, 1.5121, 0.5081, 1.6378,
0.0000, 0.9990, 2.0486, 0.2315, 1.5022, 0.0000, 0.5067, 2.0409) *
StockNa.600_2 / c(6.0000, 6.1509, 6.0088, 6.0399, 6.0856, 6.0786, 6.0121, 6.0258,
6.0866, 6.0290, 6.0289, 6.0364, 6.0655, 6.0202, 6.0293, 6.0689,
6.0541, 6.1592)),
Sodium.1 = data.frame(Conc = c(0.0000, 0.0672, 0.1321, 0.3215, 0.6450, 1.5131, 3.0879, 4.1388) *
StockNa.10_3 / c(6.0000, 6.0089, 6.3138, 6.1288, 6.3744, 6.0450, 6.0895, 6.3559),
Signal = c(0.000, 0.028, 0.048, 0.099, 0.169, 0.389, 0.751, 0.921))
)
## for a cleaner workspace
#rm(list = ls()[grep("Stock", ls())])
#-----MODELOS DE LAS CURVAS--------------------------------------------------
CalModels <- list(
Lithium.P = calibPlane(plane = CalCurves$Lithium.P),
Sodium.1 = calibCurve(curve = CalCurves$Sodium.1, order = 2)
)
anova(CalModels$Lithium.P$model)
summary(CalModels$Lithium.P$model)
#-----MUESTRAS CIEGAS--------------------------------------------------------
BlindeP <- data.frame(LiRe = c(1.0245, 0.4836) * StockLi.5_6 /
c(6.1086, 6.1369),
LiSg = c(0.126, 0.060),
NaRe = c(1.0255, 0.2008) * StockNa.600_2 /
c(6.1086, 6.1369))
BlindeP$LiIn <- signal2conc(signal = BlindeP$LiSg, model = CalModels$Lithium.P, planar = TRUE,
Conc.S = BlindeP$NaRe)
plot(x = BlindeP$LiRe, y = BlindeP$LiIn)
abline(a = 0, b = 1, col = 2, lty = 3)
abline(lm(BlindeP$LiIn ~ BlindeP$LiRe))
summary(lm(BlindeP$LiIn ~ BlindeP$LiRe))
t.test(x = BlindeP$LiIn, y = BlindeP$LiRe, paired = TRUE)
#-----TIEMPOS DE LA TOMA DE ALÍCUOTAS----------------------------------------
AliTimes <- list (
T.16.9a = c(0, 1, 2, 3, 4, 5),
T.16.9b = c(0, 1, 2, 3, 4, 5)
)
ts <- c(1, 3, 6)
#-----FACTOR DE DILUCIÓN DE LAS MUESTRAS-------------------------------------
dilutions <- list(
Feed.16.9a = c(2.0335/0.0503, 2.0315/0.0497, 2.0061/0.0509),
Strip.16.9a = c(1.0352/0.3222, 1.0493/0.3237, 1.0363/0.3233),
Feed.16.9b = c(2.0350/0.0493, 2.0232/0.0495, 2.0237/0.0502),
Strip.16.9b = c(1.0320/0.3211, 1.0383/0.3224, 1.0384/0.3235)
)
#-----ABSORBANCIAS DE LAS ALÍCUOTAS------------------------------------------
AliAbs <- list(
Feed.16.9.Li.a = c(0.318, 0.170, 0.103, 0.064, 0.041, 0.027),
Strip.16.9.Li.a = c(0.000, 0.138, 0.202, 0.243, 0.263, 0.277),
Feed.16.9.Na.a = c(0.317, 0.301, 0.306),
Strip.16.9.Na.a = c(0.026, 0.058, 0.091),
Feed.16.9.Li.b = c(0.314, 0.184, 0.124, 0.082, 0.057, 0.040),
Strip.16.9.Li.b = c(0.002, 0.117, 0.180, 0.221, 0.246, 0.260),
Feed.16.9.Na.b = c(0.324, 0.298, 0.309),
Strip.16.9.Na.b = c(0.074, 0.058, 0.106)
)
#-----CONCENTRACIÓN DE ESPECIES EN LAS ALÍCUOTAS-----------------------------
AliConc <- vector(mode = "list", length = length(AliAbs))
names(AliConc) <- names(AliAbs)
for (i in 1:(length(AliConc)/4)) {
#Feed sodium
AliConc[[4*i-1]] <- signal2conc(signal = AliAbs[[4*i-1]], model = CalModels$Sodium.1,
dilution = dilutions[[2*i-1]])
#Strip sodium
AliConc[[4*i]] <- signal2conc(signal = AliAbs[[4*i]], model = CalModels$Sodium.1,
dilution = dilutions[[2*i]])
#Feed lithium
AliConc[[4*i-3]] <- signal2conc(signal = AliAbs[[4*i-3]], model = CalModels$Lithium.P, planar = TRUE,
Conc.S = fixSecondary(conc = AliConc[[4*i-1]],
time = AliTimes[[i]][ts], compTime = AliTimes[[i]],
order = 2))
#Strip litium
AliConc[[4*i-2]] <- signal2conc(signal = AliAbs[[4*i-2]], model = CalModels$Lithium.P, planar = TRUE,
Conc.S = fixSecondary(conc = AliConc[[4*i]],
time = AliTimes[[i]][ts], compTime = AliTimes[[i]],
order = 2))
}
#-----CONCENTRACIONES A FRACCIONES-------------------------------------------
TransFrac <- vector(mode = "list", length = length(AliConc)/2)
names(TransFrac) <- paste0(rep(c("Lithium.", "Sodium."), length(TransFrac)/2),
rep(c("0a", "0b"), each = 2))
for (i in 1:(length(TransFrac)/2)) {
#Lithium
TransFrac[[i*2-1]] <- conc2frac(feed = AliConc[[4*i-3]], strip = AliConc[[4*i-2]], time = AliTimes[[i]])
#Sodium
TransFrac[[i*2]] <- conc2frac(feed = AliConc[[4*i-1]], strip = AliConc[[4*i]], time = AliTimes[[i]][ts])
}
#-----MODELOS DE REGRESIÓN NO LINEAL-----------------------------------------
TransNLS <- vector(mode = "list", length = length(TransFrac)/2)
names(TransNLS) <- names(TransFrac)[seq(from = 1, to = length(TransFrac), by = 2)]
SS_par <- vector()
for (i in 1:length(TransNLS)) {
TransNLS[[i]] <- transTrend(TransFrac[[2*i-1]], model = 'paredes', eccen = 1)
SS_par <- c(SS_par, sum(resid(TransNLS[[i]]$feed)^2), sum(resid(TransNLS[[i]]$strip)^2))
}
TransNLSXot <- vector(mode = "list", length = length(TransFrac)/2)
names(TransNLSXot) <- names(TransFrac)[seq(from = 1, to = length(TransFrac), by = 2)]
SS_xot <- vector()
for (i in 1:length(TransNLSXot)) {
TransNLSXot[[i]] <- transTrend(TransFrac[[2*i-1]], model = 'rodriguez')
SS_xot <- c(SS_xot, sum(resid(TransNLSXot[[i]]$feed)^2), sum(resid(TransNLSXot[[i]]$strip)^2))
}
t.test(x = SS_par, y = SS_xot, paired = TRUE)
plot(SS_par, SS_xot)
abline(lm(SS_xot~SS_par))
lm(SS_xot~SS_par)
#-----FACTORES DE SEPARACIÓN-------------------------------------------------
sepFactor <- vector(mode = "list", length = length(TransFrac)/2)
names(sepFactor) <- names(TransNLS)
for (i in 1:length(sepFactor)) {
sec <- fixSecondary(conc = AliConc[[4*i]], time = AliTimes[[i]][ts], compTime = AliTimes[[i]], order = 2)
X <- data.frame(time = AliTimes[[i]],
factor = (AliConc[[i*4-2]]/sec) / (AliConc[[i*4-3]][1]/AliConc[[i*4-1]][1]))
#X$factor[1] <- 1
X <- X[-1, ]
sepFactor[[i]] <- X
}
ssepFactor <- data.frame()
for (i in 1:length(sepFactor)) ssepFactor <- rbind(ssepFactor, sepFactor[[i]])
ssepFactor$Membrana <- as.factor(paste0("Mem.", rep(c("0a", "0b"), each = 5)))
ggplot(data = ssepFactor, aes(x = time, y = factor, colour = Membrana)) + geom_point() + theme_bw() +
ggsci::scale_color_npg() + stat_smooth(method = "lm", formula = y ~ poly(x, 2), se = FALSE, size = 0.4) +
xlab(label = "Tiempo (horas)") + ylab(label = "Factor de separación")
sF <- vector()
for (i in 1:length(sepFactor)) sF <- c(sF, mean(sepFactor[[i]][, 2]))
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
#-----PERFILES DE TRANSPORTE ------------------------------------------------
for (i in 1:1) {
(p <- transPlotWR(trans = list(TransFrac[[4*i-3]], TransFrac[[4*i-1]]),
trend = list(TransNLS[[2*i-1]], TransNLS[[2*i]]),
secondary = list(TransFrac[[4*i-2]], TransFrac[[4*i]]),
lin.secon = TRUE, xlim = c(0, 5.2), ylim = c(-0.01, 1.01),
ybreaks = c(0, 0.20, 0.40, 0.60, 0.80, 1), xbreaks = 1:5, xlab = 'Tiempo (h)', bw = TRUE, srs = 0.5))
}
# invisible(readline(prompt="Press [enter] to continue"))
#-----PARÁMETROS DE DESEMPEÑO------------------------------------------------
Parameters <- data.frame()
j = 0
for (i in 1:2) {
Parameters <- rbind(Parameters, c(TransNLS[[i]]$Result, sF[i], TransFrac[[2*i-1]][12, 3]))
}
colnames(Parameters) <- c(names(TransNLS[[1]]$Result), "sF")
round(Parameters, 3)
if (PDF) dev.off()
|
fun1 <- function() {"fun1"}
.script_version <- "v1.0"
# formals from import::from
.from <- "a script"
.into <- "an env"
.directory <- "my_dir"
|
/tests/test_import/module_hidden_objects.R
|
no_license
|
cran/import
|
R
| false
| false
| 144
|
r
|
fun1 <- function() {"fun1"}
.script_version <- "v1.0"
# formals from import::from
.from <- "a script"
.into <- "an env"
.directory <- "my_dir"
|
bubblePlot= function( x,y=NULL, z,col=tim.colors(256), ...){
ctab= color.scale( z, col)
points( x,y, col=ctab,pch=16,...)
image.plot( legend.only=TRUE,add=TRUE,
col=col, zlim =range( z, na.rm=TRUE))
}
|
/bubblePlot.R
|
no_license
|
dnychka/UrbanTypology
|
R
| false
| false
| 221
|
r
|
bubblePlot= function( x,y=NULL, z,col=tim.colors(256), ...){
ctab= color.scale( z, col)
points( x,y, col=ctab,pch=16,...)
image.plot( legend.only=TRUE,add=TRUE,
col=col, zlim =range( z, na.rm=TRUE))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ft_serialize.R
\name{ft_serialize}
\alias{ft_serialize}
\alias{ft_get_keys}
\title{Serialize raw text to other formats, including to disk}
\usage{
ft_serialize(x, to = "xml", from = NULL, ...)
ft_get_keys(x)
}
\arguments{
\item{x}{Input object, output from a call to \code{ft_get}. Required.}
\item{to}{(character) Format to serialize to. One of list,
xml, or json. Required. Output to xml returns object of
class XMLInternalDocument.}
\item{from}{(character) Format \code{x} is currently in. Function attempts
to use metadata provided, or guess from data itself. Optional.
CURRENTLY IGNORED.}
\item{...}{Further args passed on to \code{xml2::read_xml()} or
\code{jsonlite::toJSON()}}
}
\value{
An object of class \code{ft_parsed}
}
\description{
\code{ft_serialize} helps you convert to various data formats. If
your data is in unparsed XML (i.e., character class), you can convert to
parsed XML. If in XML, you can convert to (ugly-ish) JSON, or a list.
}
\examples{
\dontrun{
res <- ft_get('10.7717/peerj.228')
# if articles in xml format, parse the XML
(out <- ft_serialize(ft_collect(res), to='xml'))
out$peerj$data$data[[1]] # the xml
# From XML to JSON
(out <- ft_serialize(ft_collect(res), to='json'))
out$peerj$data$data$`10.7717/peerj.228` # the json
jsonlite::fromJSON(out$peerj$data$data$`10.7717/peerj.228`)
# To a list
out <- ft_serialize(ft_collect(res), to='list')
out$peerj$data$data
out$peerj$data$data[[1]]$body$sec$title
}
}
|
/man/ft_serialize.Rd
|
no_license
|
cran/fulltext
|
R
| false
| true
| 1,530
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ft_serialize.R
\name{ft_serialize}
\alias{ft_serialize}
\alias{ft_get_keys}
\title{Serialize raw text to other formats, including to disk}
\usage{
ft_serialize(x, to = "xml", from = NULL, ...)
ft_get_keys(x)
}
\arguments{
\item{x}{Input object, output from a call to \code{ft_get}. Required.}
\item{to}{(character) Format to serialize to. One of list,
xml, or json. Required. Output to xml returns object of
class XMLInternalDocument.}
\item{from}{(character) Format \code{x} is currently in. Function attempts
to use metadata provided, or guess from data itself. Optional.
CURRENTLY IGNORED.}
\item{...}{Further args passed on to \code{xml2::read_xml()} or
\code{jsonlite::toJSON()}}
}
\value{
An object of class \code{ft_parsed}
}
\description{
\code{ft_serialize} helps you convert to various data formats. If
your data is in unparsed XML (i.e., character class), you can convert to
parsed XML. If in XML, you can convert to (ugly-ish) JSON, or a list.
}
\examples{
\dontrun{
res <- ft_get('10.7717/peerj.228')
# if articles in xml format, parse the XML
(out <- ft_serialize(ft_collect(res), to='xml'))
out$peerj$data$data[[1]] # the xml
# From XML to JSON
(out <- ft_serialize(ft_collect(res), to='json'))
out$peerj$data$data$`10.7717/peerj.228` # the json
jsonlite::fromJSON(out$peerj$data$data$`10.7717/peerj.228`)
# To a list
out <- ft_serialize(ft_collect(res), to='list')
out$peerj$data$data
out$peerj$data$data[[1]]$body$sec$title
}
}
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.15,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/soft_tissue/soft_tissue_031.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/soft_tissue/soft_tissue_031.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 366
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/soft_tissue.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.15,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/soft_tissue/soft_tissue_031.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{tp_accnames}
\alias{tp_accnames}
\title{Return all accepted names for a taxon name with a given id.}
\usage{
tp_accnames(id, key = NULL, callopts = list())
}
\arguments{
\item{id}{the taxon identifier code}
\item{key}{Your Tropicos API key; loads from .Rprofile.}
\item{callopts}{Further args passed on to httr::GET}
}
\value{
List or dataframe.
}
\description{
Return all accepted names for a taxon name with a given id.
}
\examples{
\dontrun{
tp_accnames(id = 25503923)
tp_accnames(id = 25538750)
# No accepted names found
tp_accnames(id = 25509881)
}
}
|
/man/tp_accnames.Rd
|
permissive
|
fmichonneau/taxize
|
R
| false
| false
| 619
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{tp_accnames}
\alias{tp_accnames}
\title{Return all accepted names for a taxon name with a given id.}
\usage{
tp_accnames(id, key = NULL, callopts = list())
}
\arguments{
\item{id}{the taxon identifier code}
\item{key}{Your Tropicos API key; loads from .Rprofile.}
\item{callopts}{Further args passed on to httr::GET}
}
\value{
List or dataframe.
}
\description{
Return all accepted names for a taxon name with a given id.
}
\examples{
\dontrun{
tp_accnames(id = 25503923)
tp_accnames(id = 25538750)
# No accepted names found
tp_accnames(id = 25509881)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cog_spec.R
\name{cog_spec}
\alias{cog_spec}
\alias{as_cog_specs}
\title{Cognostic Specification}
\usage{
cog_spec(bivariate_continuous = TRUE, bivariate_counts = TRUE,
bivariate_step = TRUE, boxplot = TRUE,
density_2d_continuous = TRUE, density_continuous = TRUE,
grouped_counts = TRUE, grouped_testing = TRUE, hex_counts = TRUE,
histogram_counts = TRUE, linear_model = TRUE, loess_model = TRUE,
pairwise_counts = TRUE, quantile_quantile = TRUE,
scagnostics = TRUE, smooth_line = TRUE, square_counts = TRUE,
univariate_continuous = TRUE, univariate_counts = TRUE,
univariate_discrete = TRUE, ..., .keep_layer = TRUE)
as_cog_specs(p, specs)
}
\arguments{
\item{bivariate_continuous, bivariate_counts, bivariate_step, boxplot, density_2d_continuous, density_continuous, grouped_counts, grouped_testing, hex_counts, histogram_counts, linear_model, loess_model, pairwise_counts, quantile_quantile, scagnostics, smooth_line, square_counts, univariate_continuous, univariate_counts, univariate_discrete}{names of cognostic groups to calculate. The boolean value (TRUE) supplied to each argument determines if the value should be displayed if possible or removed if possible.}
\item{...}{ignored. Will cause error if any are supplied}
\item{.keep_layer}{boolean (TRUE) that determines if the layer should be kept at all}
\item{p}{plot object in question}
\item{specs}{list of cog_spec outputs for each layer of the plot object}
}
\value{
cognostic specification that determines which cogs are added or removed if possible
}
\description{
Cognostic Specification
}
\examples{
# example cog specifications
# display like normal
cog_spec(); TRUE
# remove scagnostics
cog_spec(scagnostics = FALSE)
# remove layer
cog_spec(.keep_layer = FALSE); FALSE
# set up data
p <- ggplot2::qplot(Sepal.Length, Sepal.Width, data = iris, geom = c("point", "smooth"))
dt <- tibble::data_frame(panel = list(p))
# compute cognostics like normal
add_panel_cogs(dt)
# do not compute scagnostics for geom_point cognostics
# compute geom_smooth cognostics
add_panel_cogs(dt, spec = list(cog_spec(scagnostics = FALSE), TRUE))
# do not compute scagnostics for geom_point cognostics
# do not compute geom_smooth cognostics
add_panel_cogs(dt, spec = list(cog_spec(scagnostics = FALSE), FALSE))
}
|
/man/cog_spec.Rd
|
no_license
|
hafen/autocogs
|
R
| false
| true
| 2,365
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cog_spec.R
\name{cog_spec}
\alias{cog_spec}
\alias{as_cog_specs}
\title{Cognostic Specification}
\usage{
cog_spec(bivariate_continuous = TRUE, bivariate_counts = TRUE,
bivariate_step = TRUE, boxplot = TRUE,
density_2d_continuous = TRUE, density_continuous = TRUE,
grouped_counts = TRUE, grouped_testing = TRUE, hex_counts = TRUE,
histogram_counts = TRUE, linear_model = TRUE, loess_model = TRUE,
pairwise_counts = TRUE, quantile_quantile = TRUE,
scagnostics = TRUE, smooth_line = TRUE, square_counts = TRUE,
univariate_continuous = TRUE, univariate_counts = TRUE,
univariate_discrete = TRUE, ..., .keep_layer = TRUE)
as_cog_specs(p, specs)
}
\arguments{
\item{bivariate_continuous, bivariate_counts, bivariate_step, boxplot, density_2d_continuous, density_continuous, grouped_counts, grouped_testing, hex_counts, histogram_counts, linear_model, loess_model, pairwise_counts, quantile_quantile, scagnostics, smooth_line, square_counts, univariate_continuous, univariate_counts, univariate_discrete}{names of cognostic groups to calculate. The boolean value (TRUE) supplied to each argument determines if the value should be displayed if possible or removed if possible.}
\item{...}{ignored. Will cause error if any are supplied}
\item{.keep_layer}{boolean (TRUE) that determines if the layer should be kept at all}
\item{p}{plot object in question}
\item{specs}{list of cog_spec outputs for each layer of the plot object}
}
\value{
cognostic specification that determines which cogs are added or removed if possible
}
\description{
Cognostic Specification
}
\examples{
# example cog specifications
# display like normal
cog_spec(); TRUE
# remove scagnostics
cog_spec(scagnostics = FALSE)
# remove layer
cog_spec(.keep_layer = FALSE); FALSE
# set up data
p <- ggplot2::qplot(Sepal.Length, Sepal.Width, data = iris, geom = c("point", "smooth"))
dt <- tibble::data_frame(panel = list(p))
# compute cognostics like normal
add_panel_cogs(dt)
# do not compute scagnostics for geom_point cognostics
# compute geom_smooth cognostics
add_panel_cogs(dt, spec = list(cog_spec(scagnostics = FALSE), TRUE))
# do not compute scagnostics for geom_point cognostics
# do not compute geom_smooth cognostics
add_panel_cogs(dt, spec = list(cog_spec(scagnostics = FALSE), FALSE))
}
|
## read in text file
hpc <- read.table('./household_power_consumption.txt', header=T, sep =";", na.strings = "?")
## covert dates
hpc$Date <- as.Date(hpc$Date, format="%d/%m/%Y")
## subset data on specified dates
hpc <- hpc[which(hpc$Date == "2007-02-02" | hpc$Date == "2007-02-01"),]
## convert date/time variables
hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), format="%Y-%m-%d %H:%M:%S")
## create plot 4
par(mfrow = c(2, 2))
plot(hpc$DateTime, hpc$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(hpc$DateTime, hpc$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(hpc$DateTime, hpc$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(hpc$DateTime, hpc$Sub_metering_2, col="red")
lines(hpc$DateTime, hpc$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c('black', 'red', 'blue'), cex=0.8, bty="n")
plot(hpc$DateTime, hpc$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
## create PNG file for plot 4
dev.copy(png, file="plot4.png", width=628, height=529)
dev.off()
|
/plot4.R
|
no_license
|
brandiloper/ExData_Plotting1
|
R
| false
| false
| 1,137
|
r
|
## read in text file
hpc <- read.table('./household_power_consumption.txt', header=T, sep =";", na.strings = "?")
## covert dates
hpc$Date <- as.Date(hpc$Date, format="%d/%m/%Y")
## subset data on specified dates
hpc <- hpc[which(hpc$Date == "2007-02-02" | hpc$Date == "2007-02-01"),]
## convert date/time variables
hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), format="%Y-%m-%d %H:%M:%S")
## create plot 4
par(mfrow = c(2, 2))
plot(hpc$DateTime, hpc$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(hpc$DateTime, hpc$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(hpc$DateTime, hpc$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(hpc$DateTime, hpc$Sub_metering_2, col="red")
lines(hpc$DateTime, hpc$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c('black', 'red', 'blue'), cex=0.8, bty="n")
plot(hpc$DateTime, hpc$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
## create PNG file for plot 4
dev.copy(png, file="plot4.png", width=628, height=529)
dev.off()
|
#' scRNA seq matrix of pancreas cells
#'
#' Data from Tabula Muris project. A subset of the single cell RNA-seq data
#' from the pancreas to be used as an example for GeneFishing. The rows are
#' genes and columns are cells.
#'
#' @docType data
#'
#' @usage data(pancreas)
#'
#' @keywords datasets
#'
#' @references Schaum, N. et al. (2018) Nature 562, 367–372
#' (\href{https://www.nature.com/articles/s41586-018-0590-4}{Nature})
#'
#' @source \href{https://tabula-muris.ds.czbiohub.org/}{Tabula Muris database}
#'
#' @examples
#' data(pancreas)
#' pancreas[1:4, 1:4]
"pancreas"
|
/R/pancreas-data.R
|
no_license
|
zoevernon/scGeneFishing
|
R
| false
| false
| 585
|
r
|
#' scRNA seq matrix of pancreas cells
#'
#' Data from Tabula Muris project. A subset of the single cell RNA-seq data
#' from the pancreas to be used as an example for GeneFishing. The rows are
#' genes and columns are cells.
#'
#' @docType data
#'
#' @usage data(pancreas)
#'
#' @keywords datasets
#'
#' @references Schaum, N. et al. (2018) Nature 562, 367–372
#' (\href{https://www.nature.com/articles/s41586-018-0590-4}{Nature})
#'
#' @source \href{https://tabula-muris.ds.czbiohub.org/}{Tabula Muris database}
#'
#' @examples
#' data(pancreas)
#' pancreas[1:4, 1:4]
"pancreas"
|
# Copyright (c) 2012-2020 Broad Institute, Inc., Massachusetts Institute of Technology, and Regents of the University of California. All rights reserved.
# ssGSEA
# processes the cmd line for the ssGSEA.project.dataset
ssGSEA.cmdline <- function(...)
{
input.gct.filename <- NA
output.prefix <- NA
gene.sets.db.list.filename <- NA
gene.symbol.column <- "Name"
gene.set.selection <- "ALL"
sample.normalization.method <- "none"
weighting.exponent <- 0.75
min.overlap <- 1
combine.mode <- "combine.all"
args <- list(...)
for (i in 1:length(args[[1]]))
{
arg <- args[[1]][i]
flag <- substring(arg, 1, 2)
value <- substring(arg, 3, nchar(arg))
if (value == '')
{
next
}
else if (flag == '-l')
{
libdir <- value
}
else if (flag == '-i')
{
input.gct.filename <- value
}
else if (flag == '-o')
{
output.prefix <- value
}
else if (flag == '-D')
{
gene.sets.db.list.filename <- value
}
else if (flag == '-c')
{
gene.symbol.column <- value
}
else if (flag == '-s')
{
gene.set.selection <- unlist(strsplit(value,','))
}
else if (flag == '-n')
{
sample.normalization.method <- value
}
else if (flag == '-w')
{
weighting.exponent <- as.numeric(value)
}
else if (flag == '-v')
{
min.overlap <- as.integer(value)
}
else if (flag == '-C')
combine.mode <- value
else
stop("Unknown option", flag)
}
if (is.na(input.gct.filename))
stop("Missing input.gct.filename")
if (is.na(output.prefix))
{
temp <- strsplit(input.gct.filename, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
output.prefix <- paste(temp[[1]][1],".PROJ", sep="")
}
gene.sets.dbfile.list <- NA
if (!is.na(gene.sets.db.list.filename)) {
gene.sets.dbfile.list <- readLines(gene.sets.db.list.filename)
}
else {
stop("No Gene Set DB files provided")
}
setup(libdir)
source(file.path(libdir,"ssGSEA.Library.R"))
suppressWarnings(ssGSEA.project.dataset(input.gct.filename,
paste(output.prefix, ".gct", sep=""),
gene.sets.dbfile.list = gene.sets.dbfile.list,
gene.symbol.column = gene.symbol.column,
gene.set.selection = gene.set.selection,
sample.norm.type = sample.normalization.method,
weight = weighting.exponent,
min.overlap = min.overlap,
combine.mode = combine.mode))
}
setup <- function(libdir)
{
source(file.path(libdir,"common.R"))
setLibPath(libdir)
install.required.packages(libdir)
}
install.required.packages <- function(libdir)
{
info(libdir)
# no non-base packages required by this module
}
# Call the command-line function, passing the args from GenePattern
ssGSEA.cmdline(commandArgs(trailingOnly=T))
|
/src/ssGSEA.R
|
permissive
|
mirabellechen/ssGSEA-gpmodule
|
R
| false
| false
| 3,655
|
r
|
# Copyright (c) 2012-2020 Broad Institute, Inc., Massachusetts Institute of Technology, and Regents of the University of California. All rights reserved.
# ssGSEA
# processes the cmd line for the ssGSEA.project.dataset
ssGSEA.cmdline <- function(...)
{
input.gct.filename <- NA
output.prefix <- NA
gene.sets.db.list.filename <- NA
gene.symbol.column <- "Name"
gene.set.selection <- "ALL"
sample.normalization.method <- "none"
weighting.exponent <- 0.75
min.overlap <- 1
combine.mode <- "combine.all"
args <- list(...)
for (i in 1:length(args[[1]]))
{
arg <- args[[1]][i]
flag <- substring(arg, 1, 2)
value <- substring(arg, 3, nchar(arg))
if (value == '')
{
next
}
else if (flag == '-l')
{
libdir <- value
}
else if (flag == '-i')
{
input.gct.filename <- value
}
else if (flag == '-o')
{
output.prefix <- value
}
else if (flag == '-D')
{
gene.sets.db.list.filename <- value
}
else if (flag == '-c')
{
gene.symbol.column <- value
}
else if (flag == '-s')
{
gene.set.selection <- unlist(strsplit(value,','))
}
else if (flag == '-n')
{
sample.normalization.method <- value
}
else if (flag == '-w')
{
weighting.exponent <- as.numeric(value)
}
else if (flag == '-v')
{
min.overlap <- as.integer(value)
}
else if (flag == '-C')
combine.mode <- value
else
stop("Unknown option", flag)
}
if (is.na(input.gct.filename))
stop("Missing input.gct.filename")
if (is.na(output.prefix))
{
temp <- strsplit(input.gct.filename, split="/") # Extract input file name
s <- length(temp[[1]])
input.file.name <- temp[[1]][s]
temp <- strsplit(input.file.name, split=".gct")
output.prefix <- paste(temp[[1]][1],".PROJ", sep="")
}
gene.sets.dbfile.list <- NA
if (!is.na(gene.sets.db.list.filename)) {
gene.sets.dbfile.list <- readLines(gene.sets.db.list.filename)
}
else {
stop("No Gene Set DB files provided")
}
setup(libdir)
source(file.path(libdir,"ssGSEA.Library.R"))
suppressWarnings(ssGSEA.project.dataset(input.gct.filename,
paste(output.prefix, ".gct", sep=""),
gene.sets.dbfile.list = gene.sets.dbfile.list,
gene.symbol.column = gene.symbol.column,
gene.set.selection = gene.set.selection,
sample.norm.type = sample.normalization.method,
weight = weighting.exponent,
min.overlap = min.overlap,
combine.mode = combine.mode))
}
setup <- function(libdir)
{
source(file.path(libdir,"common.R"))
setLibPath(libdir)
install.required.packages(libdir)
}
install.required.packages <- function(libdir)
{
info(libdir)
# no non-base packages required by this module
}
# Call the command-line function, passing the args from GenePattern
ssGSEA.cmdline(commandArgs(trailingOnly=T))
|
#기본
library(doBy)
library(dplyr)
library(psych)
library(Hmisc)
library(skimr)
library(fBasics)
library(ggplot2)
Sys.setlocale("LC_ALL","korean")#os가 한글이 아닐시에 꼭 써야함
x_data <- read.csv("C:/Users/seokm/OneDrive/Documents/project_data/X_train.csv",header = TRUE, sep = ',', stringsAsFactors = FALSE,encoding = "CP949")
y_data <- read.csv("C:/Users/seokm/OneDrive/Documents/project_data/y_train.csv",header = TRUE, sep = ',',stringsAsFactors = FALSE,encoding = "CP949")
data <- merge(x = y_data, y = x_data, by = 'custid')
#---------dc_rate--------------------------------
#--------------------------------
dc_rate <- round((data$dis_amt/ data$tot_amt)*100, 0)
dc_rate
unique(dc_rate)
from <- list(0, c(1:5), c(6:65))
to <- list(0, 5, 10)
library(doBy)
dc_rate <- recodeVar(dc_rate , from , to)
dc_rate_f <- factor(dc_rate, levels = c(0,5,10), labels = c('0%','5%','10%'))
data$dc_rate <- dc_rate
data$dc_rate_f <- dc_rate_f
dc_rate
summary(data$dc_rate)
#-------------------------------------------------------------
# inst_tot / 무이자 할부 = 1/ 유이자 할부 = 2/ 일시불 = 3
str(data)
data_tmp <- data
tmp <- as.data.frame(table(data_tmp$inst_mon, data_tmp$inst_fee))
tmp
names(tmp) <- c('inst_mon', 'inst_fee', 'inst_tot')
tmp
#할부요인
tmp$inst_tot <- c(3, 1, 1, 1, 1, 1 ,1, 1 ,1 ,1 ,1, 1, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 )
tmp
data_tmp <- merge(data_tmp, tmp, by = c('inst_mon', 'inst_fee'))
data_inst <- data_tmp
data_inst
str(data_inst)
data_pos <- data_inst[data_inst$net_amt>=0,]
str(data_pos)
data_pos$inst_tot_f <- as.factor(data_pos$inst_tot)
data_pos$inst_tot_f <- factor(data_pos$inst_tot_f, levels= c(1:3), labels = c('무이자할부', '유이자할부','일시불'))
#------------------------------------------------buyer_nm_f------------------------------------------------
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
length(unique(data_pos$buyer_nm))
data_pos$buyer_nm_f <- factor(data_pos$buyer_nm, levels= unique(data_pos$buyer_nm),labels = c(0:34))
data_pos$buyer_nm_f <- factor(data_pos$buyer_nm_f, levels= c(0:34),labels = unique(data_pos$buyer_nm))
table(data_pos$buyer_nm_f)
#-------------------------------------buyer_nm /dc_rate / count/ proposition-------------------------------------------------------
library(doBy)
tmp <- table(data_pos$dc_rate_f, data_pos$buyer_nm_f)
tmp_prop <- prop.table(tmp, 2)
tmp_prop <- round(tmp_prop, 4) *100
tmp_prop
tmp_prop <- as.data.frame(tmp_prop)
names(tmp_prop) <- c('할인율', '카테고리별', '건수')
tmp_prop
tmp_prop<-tmp_prop[-c(79:105),]
tmp_prop
table(data_pos$buyer_nm_f) ###############################행사장, 조리식품, 청과곡물,점외
ggplot(as.data.frame(tmp_prop), aes(x=카테고리별, y=건수, fill=할인율)) +
ggtitle("할인율에 따른 카테고리별 판매건수 비교(NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=건수, label = paste(건수,"%")),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, face = "bold", vjust=0, color="black", size=12),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#-------------------------------------buyer_nm /dc_rate / count/ real-------------------------------------------------------
library(doBy)
tmp <- table(data_pos$dc_rate_f, data_pos$buyer_nm_f)
tmp <- as.data.frame(tmp)
names(tmp) <- c('할인율', '카테고리별', '건수')
tmp
tmp<-tmp[-c(79:105),]
tmp
table(data_pos$buyer_nm_f) ###############################행사장, 조리식품, 청과곡물,점외
ggplot(as.data.frame(tmp), aes(x=카테고리별, y=건수, fill=할인율)) +
ggtitle("할인율에 따른 카테고리별 판매건수 비교(real, NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=건수, label = paste(건수)),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, face = "bold", vjust=0, color="black", size=14),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#---------------------------------------buyer_nm /dc_rate / net_amt /Proposition-----------------------------------------------
tmp <- aggregate(net_amt ~ buyer_nm_f + dc_rate_f, data_pos, sum, drop = FALSE)
tmp[is.na(tmp)]<-0
tmp
temp <-matrix(as.numeric(tmp$net_amt), ncol = length(unique(tmp$buyer_nm_f)), byrow=TRUE)
colnames(temp) <-levels(tmp$buyer_nm_f)
rownames(temp)<-levels(tmp$dc_rate_f)
temp <- as.table(temp)
temp
tmp_prop <- prop.table(temp,2)
tmp_prop <-round(tmp_prop ,4)*100
tmp_prop <-as.data.frame(tmp_prop)
tmp_prop
tmp_prop<-tmp_prop[-c(79:105),]
tmp_prop
names(tmp_prop) <-c('할인율','카테고리별','금액')
ggplot(as.data.frame(tmp_prop), aes(x=카테고리별, y=금액, fill=할인율)) +
ggtitle("할인율에 따른 카테고리별 판매금액 비교(NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=금액, label = paste(금액,"%")),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, hjust = 1, face = "bold", vjust=0, color="black", size=13),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#---------------------------------------buyer_nm /dc_rate / net_amt / Real-----------------------------------------------
tmp <- aggregate(net_amt ~ buyer_nm_f + dc_rate_f, data_pos, sum, drop = FALSE)
tmp[is.na(tmp)]<-0
tmp
temp <-matrix(as.numeric(tmp$net_amt), ncol = length(unique(tmp$buyer_nm_f)), byrow=TRUE)
colnames(temp) <-levels(tmp$buyer_nm_f)
rownames(temp)<-levels(tmp$dc_rate_f)
temp <- as.table(temp)
temp
temp <-as.data.frame(temp)
temp
temp<-temp[-c(79:105),]
temp
names(temp) <-c('할인율','카테고리별','금액')
ggplot(as.data.frame(temp), aes(x=카테고리별, y=금액, fill=할인율)) +
ggtitle("할인율에 따른 카테고리별 판매금액 비교(real)")+
geom_bar(stat="identity")+
geom_text(aes(y=금액, label = paste(금액)),position = position_stack(vjust = 0.5), color = "black", size=2)+
theme(axis.text.x = element_text(angle=90, hjust = 1, vjust=0, color="black", size=10),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#--------------------------------------------buyer_nm /inst_tot / count /Proposition---------------------------------------------------------
# inst_tot 팩터형 추가
# inst_tot / 무이자 할부 = 1/ 유이자 할부 = 2/ 일시불 = 3
#data_pos$inst_tot_f <- factor(data_pos$inst_tot, levels = c(1:3), labels = c("무이자 할부", "유이자 할부", "일시불"))
#-------------------------------------------------------------
tmp <- table(data_pos$inst_tot_f, data_pos$buyer_nm_f)
tmp_prop <-prop.table(tmp,2)
tmp_prop <- round(tmp_prop,4)*100
tmp_prop <-as.data.frame(tmp_prop)
names(tmp_prop) <- c('할부요인','카테고리별', '건수')
tmp_prop
tmp_prop<-tmp_prop[-c(79:105),]
tmp_prop
ggplot(as.data.frame(tmp_prop), aes(x=카테고리별, y=건수, fill=할부요인)) +
ggtitle("할부요인에 따른 카테고리별 판매건수 비교(NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=건수, label = paste(건수,"%")),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, hjust = 1, vjust=0, color="black", size=10),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#--------------------------------------------buyer_nm /inst_tot / count /real---------------------------------------------------------
# inst_tot 팩터형 추가
# inst_tot / 무이자 할부 = 1/ 유이자 할부 = 2/ 일시불 = 3
#data_pos$inst_tot_f <- factor(data_pos$inst_tot, levels = c(1:3), labels = c("무이자 할부", "유이자 할부", "일시불"))
#-------------------------------------------------------------
tmp <- table(data_pos$inst_tot_f, data_pos$buyer_nm_f)
tmp <-as.data.frame(tmp)
names(tmp) <- c('할부요인','카테고리별', '건수')
tmp
tmp<-tmp[-c(79:105),]
tmp
ggplot(as.data.frame(tmp), aes(x=카테고리별, y=건수, fill=할부요인)) +
ggtitle("할부요인에 따른 카테고리별 판매건수 비교(real, NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=건수, label = paste(건수)),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, hjust = 1, face = "bold", vjust=0, color="black", size=13),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#--------------------------------------------buyer_nm /inst_tot / net_amt / Proposition-------------------------------------------------------------
tmp <- aggregate(net_amt ~ buyer_nm_f + inst_tot_f, data_pos, sum, drop=FALSE)
tmp[is.na(tmp)] <- 0
tmp
temp <- matrix(as.numeric(tmp$net_amt), ncol=length(unique(tmp$buyer_nm_f)), byrow=TRUE)
colnames(temp) <- levels(tmp$buyer_nm_f)
rownames(temp) <- levels(tmp$inst_tot_f)
temp
temp <- as.table(temp)
tmp_prop <- prop.table(temp, 2)
tmp_prop <- round(tmp_prop, 4)*100
tmp_prop <- as.data.frame(tmp_prop)
tmp_prop
names(tmp_prop) <- c('할부요인', '카테고리별', '금액')
tmp_prop
tmp_prop <-tmp_prop[-c(79:105),]
tmp_prop
ggplot(as.data.frame(tmp_prop), aes(x=카테고리별, y=금액, fill=할부요인)) +
ggtitle("할부요인에 따른 카테고리별 판매금액 비교(NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=금액, label = paste(금액,"%")),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, hjust = 0.5, face= 'bold', vjust=0.5, color="black", size=13),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#--------------------------------------------buyer_nm /inst_tot / net_amt / Real-------------------------------------------------------------
tmp <- aggregate(net_amt ~ buyer_nm_f + inst_tot_f, data_pos, sum, drop=FALSE)
tmp[is.na(tmp)] <- 0
tmp
temp <- matrix(as.numeric(tmp$net_amt), ncol=length(unique(tmp$buyer_nm_f)), byrow=TRUE)
colnames(temp) <- levels(tmp$buyer_nm_f)
rownames(temp) <- levels(tmp$inst_tot_f)
temp
temp <- as.table(temp)
temp <- as.data.frame(temp)
temp
names(temp) <- c('할부요인', '카테고리별', '금액')
temp
temp <-temp[-c(79:105),]
temp
ggplot(as.data.frame(temp), aes(x=카테고리별, y=금액, fill=할부요인)) +
ggtitle("할부요인에 따른 카테고리별 판매금액 비교(real, NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=금액, label = paste(금액)),position = position_stack(vjust = 0.5), color = "black", size=2)+
theme(axis.text.x = element_text(angle=90, face = "bold", hjust = 0.5, vjust=0.5, color="black", size=13),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
|
/1_Code/3_Chung/R/department_Chung/buyer_nm.R
|
no_license
|
horaeng1/Asiae_AI
|
R
| false
| false
| 11,027
|
r
|
#기본
library(doBy)
library(dplyr)
library(psych)
library(Hmisc)
library(skimr)
library(fBasics)
library(ggplot2)
Sys.setlocale("LC_ALL","korean")#os가 한글이 아닐시에 꼭 써야함
x_data <- read.csv("C:/Users/seokm/OneDrive/Documents/project_data/X_train.csv",header = TRUE, sep = ',', stringsAsFactors = FALSE,encoding = "CP949")
y_data <- read.csv("C:/Users/seokm/OneDrive/Documents/project_data/y_train.csv",header = TRUE, sep = ',',stringsAsFactors = FALSE,encoding = "CP949")
data <- merge(x = y_data, y = x_data, by = 'custid')
#---------dc_rate--------------------------------
#--------------------------------
dc_rate <- round((data$dis_amt/ data$tot_amt)*100, 0)
dc_rate
unique(dc_rate)
from <- list(0, c(1:5), c(6:65))
to <- list(0, 5, 10)
library(doBy)
dc_rate <- recodeVar(dc_rate , from , to)
dc_rate_f <- factor(dc_rate, levels = c(0,5,10), labels = c('0%','5%','10%'))
data$dc_rate <- dc_rate
data$dc_rate_f <- dc_rate_f
dc_rate
summary(data$dc_rate)
#-------------------------------------------------------------
# inst_tot / 무이자 할부 = 1/ 유이자 할부 = 2/ 일시불 = 3
str(data)
data_tmp <- data
tmp <- as.data.frame(table(data_tmp$inst_mon, data_tmp$inst_fee))
tmp
names(tmp) <- c('inst_mon', 'inst_fee', 'inst_tot')
tmp
#할부요인
tmp$inst_tot <- c(3, 1, 1, 1, 1, 1 ,1, 1 ,1 ,1 ,1, 1, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 )
tmp
data_tmp <- merge(data_tmp, tmp, by = c('inst_mon', 'inst_fee'))
data_inst <- data_tmp
data_inst
str(data_inst)
data_pos <- data_inst[data_inst$net_amt>=0,]
str(data_pos)
data_pos$inst_tot_f <- as.factor(data_pos$inst_tot)
data_pos$inst_tot_f <- factor(data_pos$inst_tot_f, levels= c(1:3), labels = c('무이자할부', '유이자할부','일시불'))
#------------------------------------------------buyer_nm_f------------------------------------------------
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
length(unique(data_pos$buyer_nm))
data_pos$buyer_nm_f <- factor(data_pos$buyer_nm, levels= unique(data_pos$buyer_nm),labels = c(0:34))
data_pos$buyer_nm_f <- factor(data_pos$buyer_nm_f, levels= c(0:34),labels = unique(data_pos$buyer_nm))
table(data_pos$buyer_nm_f)
#-------------------------------------buyer_nm /dc_rate / count/ proposition-------------------------------------------------------
library(doBy)
tmp <- table(data_pos$dc_rate_f, data_pos$buyer_nm_f)
tmp_prop <- prop.table(tmp, 2)
tmp_prop <- round(tmp_prop, 4) *100
tmp_prop
tmp_prop <- as.data.frame(tmp_prop)
names(tmp_prop) <- c('할인율', '카테고리별', '건수')
tmp_prop
tmp_prop<-tmp_prop[-c(79:105),]
tmp_prop
table(data_pos$buyer_nm_f) ###############################행사장, 조리식품, 청과곡물,점외
ggplot(as.data.frame(tmp_prop), aes(x=카테고리별, y=건수, fill=할인율)) +
ggtitle("할인율에 따른 카테고리별 판매건수 비교(NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=건수, label = paste(건수,"%")),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, face = "bold", vjust=0, color="black", size=12),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#-------------------------------------buyer_nm /dc_rate / count/ real-------------------------------------------------------
library(doBy)
tmp <- table(data_pos$dc_rate_f, data_pos$buyer_nm_f)
tmp <- as.data.frame(tmp)
names(tmp) <- c('할인율', '카테고리별', '건수')
tmp
tmp<-tmp[-c(79:105),]
tmp
table(data_pos$buyer_nm_f) ###############################행사장, 조리식품, 청과곡물,점외
ggplot(as.data.frame(tmp), aes(x=카테고리별, y=건수, fill=할인율)) +
ggtitle("할인율에 따른 카테고리별 판매건수 비교(real, NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=건수, label = paste(건수)),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, face = "bold", vjust=0, color="black", size=14),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#---------------------------------------buyer_nm /dc_rate / net_amt /Proposition-----------------------------------------------
tmp <- aggregate(net_amt ~ buyer_nm_f + dc_rate_f, data_pos, sum, drop = FALSE)
tmp[is.na(tmp)]<-0
tmp
temp <-matrix(as.numeric(tmp$net_amt), ncol = length(unique(tmp$buyer_nm_f)), byrow=TRUE)
colnames(temp) <-levels(tmp$buyer_nm_f)
rownames(temp)<-levels(tmp$dc_rate_f)
temp <- as.table(temp)
temp
tmp_prop <- prop.table(temp,2)
tmp_prop <-round(tmp_prop ,4)*100
tmp_prop <-as.data.frame(tmp_prop)
tmp_prop
tmp_prop<-tmp_prop[-c(79:105),]
tmp_prop
names(tmp_prop) <-c('할인율','카테고리별','금액')
ggplot(as.data.frame(tmp_prop), aes(x=카테고리별, y=금액, fill=할인율)) +
ggtitle("할인율에 따른 카테고리별 판매금액 비교(NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=금액, label = paste(금액,"%")),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, hjust = 1, face = "bold", vjust=0, color="black", size=13),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#---------------------------------------buyer_nm /dc_rate / net_amt / Real-----------------------------------------------
tmp <- aggregate(net_amt ~ buyer_nm_f + dc_rate_f, data_pos, sum, drop = FALSE)
tmp[is.na(tmp)]<-0
tmp
temp <-matrix(as.numeric(tmp$net_amt), ncol = length(unique(tmp$buyer_nm_f)), byrow=TRUE)
colnames(temp) <-levels(tmp$buyer_nm_f)
rownames(temp)<-levels(tmp$dc_rate_f)
temp <- as.table(temp)
temp
temp <-as.data.frame(temp)
temp
temp<-temp[-c(79:105),]
temp
names(temp) <-c('할인율','카테고리별','금액')
ggplot(as.data.frame(temp), aes(x=카테고리별, y=금액, fill=할인율)) +
ggtitle("할인율에 따른 카테고리별 판매금액 비교(real)")+
geom_bar(stat="identity")+
geom_text(aes(y=금액, label = paste(금액)),position = position_stack(vjust = 0.5), color = "black", size=2)+
theme(axis.text.x = element_text(angle=90, hjust = 1, vjust=0, color="black", size=10),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#--------------------------------------------buyer_nm /inst_tot / count /Proposition---------------------------------------------------------
# inst_tot 팩터형 추가
# inst_tot / 무이자 할부 = 1/ 유이자 할부 = 2/ 일시불 = 3
#data_pos$inst_tot_f <- factor(data_pos$inst_tot, levels = c(1:3), labels = c("무이자 할부", "유이자 할부", "일시불"))
#-------------------------------------------------------------
tmp <- table(data_pos$inst_tot_f, data_pos$buyer_nm_f)
tmp_prop <-prop.table(tmp,2)
tmp_prop <- round(tmp_prop,4)*100
tmp_prop <-as.data.frame(tmp_prop)
names(tmp_prop) <- c('할부요인','카테고리별', '건수')
tmp_prop
tmp_prop<-tmp_prop[-c(79:105),]
tmp_prop
ggplot(as.data.frame(tmp_prop), aes(x=카테고리별, y=건수, fill=할부요인)) +
ggtitle("할부요인에 따른 카테고리별 판매건수 비교(NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=건수, label = paste(건수,"%")),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, hjust = 1, vjust=0, color="black", size=10),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#--------------------------------------------buyer_nm /inst_tot / count /real---------------------------------------------------------
# inst_tot 팩터형 추가
# inst_tot / 무이자 할부 = 1/ 유이자 할부 = 2/ 일시불 = 3
#data_pos$inst_tot_f <- factor(data_pos$inst_tot, levels = c(1:3), labels = c("무이자 할부", "유이자 할부", "일시불"))
#-------------------------------------------------------------
tmp <- table(data_pos$inst_tot_f, data_pos$buyer_nm_f)
tmp <-as.data.frame(tmp)
names(tmp) <- c('할부요인','카테고리별', '건수')
tmp
tmp<-tmp[-c(79:105),]
tmp
ggplot(as.data.frame(tmp), aes(x=카테고리별, y=건수, fill=할부요인)) +
ggtitle("할부요인에 따른 카테고리별 판매건수 비교(real, NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=건수, label = paste(건수)),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, hjust = 1, face = "bold", vjust=0, color="black", size=13),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#--------------------------------------------buyer_nm /inst_tot / net_amt / Proposition-------------------------------------------------------------
tmp <- aggregate(net_amt ~ buyer_nm_f + inst_tot_f, data_pos, sum, drop=FALSE)
tmp[is.na(tmp)] <- 0
tmp
temp <- matrix(as.numeric(tmp$net_amt), ncol=length(unique(tmp$buyer_nm_f)), byrow=TRUE)
colnames(temp) <- levels(tmp$buyer_nm_f)
rownames(temp) <- levels(tmp$inst_tot_f)
temp
temp <- as.table(temp)
tmp_prop <- prop.table(temp, 2)
tmp_prop <- round(tmp_prop, 4)*100
tmp_prop <- as.data.frame(tmp_prop)
tmp_prop
names(tmp_prop) <- c('할부요인', '카테고리별', '금액')
tmp_prop
tmp_prop <-tmp_prop[-c(79:105),]
tmp_prop
ggplot(as.data.frame(tmp_prop), aes(x=카테고리별, y=금액, fill=할부요인)) +
ggtitle("할부요인에 따른 카테고리별 판매금액 비교(NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=금액, label = paste(금액,"%")),position = position_stack(vjust = 0.5), color = "black", size=3)+
theme(axis.text.x = element_text(angle=90, hjust = 0.5, face= 'bold', vjust=0.5, color="black", size=13),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
#--------------------------------------------buyer_nm /inst_tot / net_amt / Real-------------------------------------------------------------
tmp <- aggregate(net_amt ~ buyer_nm_f + inst_tot_f, data_pos, sum, drop=FALSE)
tmp[is.na(tmp)] <- 0
tmp
temp <- matrix(as.numeric(tmp$net_amt), ncol=length(unique(tmp$buyer_nm_f)), byrow=TRUE)
colnames(temp) <- levels(tmp$buyer_nm_f)
rownames(temp) <- levels(tmp$inst_tot_f)
temp
temp <- as.table(temp)
temp <- as.data.frame(temp)
temp
names(temp) <- c('할부요인', '카테고리별', '금액')
temp
temp <-temp[-c(79:105),]
temp
ggplot(as.data.frame(temp), aes(x=카테고리별, y=금액, fill=할부요인)) +
ggtitle("할부요인에 따른 카테고리별 판매금액 비교(real, NO SCALE)")+
geom_bar(stat="identity")+
geom_text(aes(y=금액, label = paste(금액)),position = position_stack(vjust = 0.5), color = "black", size=2)+
theme(axis.text.x = element_text(angle=90, face = "bold", hjust = 0.5, vjust=0.5, color="black", size=13),
plot.title = element_text(family="serif", face = "bold", hjust= 0.5, size=20))
|
library(ggplot2)
d <- read.csv(file="./CodierungVideo.csv", head=TRUE, sep=",",stringsAsFactors=FALSE)
cor.test(d$M201_MessungKorrekt, d$M201_Q1,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M201_MessungKorrekt, y=d$M201_Q1)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = - 0.13"))+
geom_text(data = data.frame(), aes(2.71, 1.4, label = "p-value = 0.76"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q1") +
xlab("Video korrekte Messung") +
ggsave(file="corVideoQ1201.png")
cor.test(d$M301_MessungKorrekt, d$M301_Q1,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M301_MessungKorrekt, y=d$M301_Q1)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = 0.00"))+
geom_text(data = data.frame(), aes(2.71, 1.4, label = "p-value = 1.00"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q1") +
xlab("Video korrekte Messung") +
ggsave(file="corVideoQ1301.png")
cor.test(d$M305_MessungKorrekt, d$M305_Q1,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M305_MessungKorrekt, y=d$M305_Q1)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = 0.26"))+
geom_text(data = data.frame(), aes(2.7, 1.4, label = "p-value = 0.53"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q1") +
xlab("Video korrekte Messung") +
ggsave(file="corVideoQ1305.png")
cor.test(d$M201_Messwiederholung, d$M201_Q4,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M201_MessungKorrekt, y=d$M201_Q4)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 0.055, label = "Spearman-rho = 0.00"))+
geom_text(data = data.frame(), aes(2.7, 0.052, label = "p-value = 1.00"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q4") +
xlab("Video Messwiederholung") +
ggsave(file="corVideoQ4201.png")
cor.test(d$M301_Messwiederholung, d$M301_Q4,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M301_MessungKorrekt, y=d$M301_Q4)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = 0.00"))+
geom_text(data = data.frame(), aes(2.71, 1.4, label = "p-value = 1.00"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q4") +
xlab("Video Messwiederholung") +
ggsave(file="corVideoQ4301.png")
cor.test(d$M305_Messwiederholung, d$M305_Q4,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M305_MessungKorrekt, y=d$M305_Q4)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = 0.07"))+
geom_text(data = data.frame(), aes(2.7, 1.4, label = "p-value = 0.87"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q4") +
xlab("Video Messwiederholung") +
ggsave(file="corVideoQ4305.png")
|
/Auswertung/VideoCodierung.R
|
no_license
|
DavidSichau/masterarbeit-phzh
|
R
| false
| false
| 3,297
|
r
|
library(ggplot2)
d <- read.csv(file="./CodierungVideo.csv", head=TRUE, sep=",",stringsAsFactors=FALSE)
cor.test(d$M201_MessungKorrekt, d$M201_Q1,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M201_MessungKorrekt, y=d$M201_Q1)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = - 0.13"))+
geom_text(data = data.frame(), aes(2.71, 1.4, label = "p-value = 0.76"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q1") +
xlab("Video korrekte Messung") +
ggsave(file="corVideoQ1201.png")
cor.test(d$M301_MessungKorrekt, d$M301_Q1,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M301_MessungKorrekt, y=d$M301_Q1)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = 0.00"))+
geom_text(data = data.frame(), aes(2.71, 1.4, label = "p-value = 1.00"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q1") +
xlab("Video korrekte Messung") +
ggsave(file="corVideoQ1301.png")
cor.test(d$M305_MessungKorrekt, d$M305_Q1,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M305_MessungKorrekt, y=d$M305_Q1)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = 0.26"))+
geom_text(data = data.frame(), aes(2.7, 1.4, label = "p-value = 0.53"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q1") +
xlab("Video korrekte Messung") +
ggsave(file="corVideoQ1305.png")
cor.test(d$M201_Messwiederholung, d$M201_Q4,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M201_MessungKorrekt, y=d$M201_Q4)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 0.055, label = "Spearman-rho = 0.00"))+
geom_text(data = data.frame(), aes(2.7, 0.052, label = "p-value = 1.00"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q4") +
xlab("Video Messwiederholung") +
ggsave(file="corVideoQ4201.png")
cor.test(d$M301_Messwiederholung, d$M301_Q4,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M301_MessungKorrekt, y=d$M301_Q4)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = 0.00"))+
geom_text(data = data.frame(), aes(2.71, 1.4, label = "p-value = 1.00"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q4") +
xlab("Video Messwiederholung") +
ggsave(file="corVideoQ4301.png")
cor.test(d$M305_Messwiederholung, d$M305_Q4,method="spearm")
theme_set(theme_grey(base_size = 18))
ggplot(d, aes(x=d$M305_MessungKorrekt, y=d$M305_Q4)) +
geom_smooth(method=lm) + # Add linear regression line
geom_text(data = data.frame(), aes(2.6, 1.5, label = "Spearman-rho = 0.07"))+
geom_text(data = data.frame(), aes(2.7, 1.4, label = "p-value = 0.87"))+
stat_sum( geom = "point", aes(size = ..n..))+ scale_size(range = c(2, 10))+
ylab("Q4") +
xlab("Video Messwiederholung") +
ggsave(file="corVideoQ4305.png")
|
###############################################
# Code for creating Figure 4 for core-transient manuscript
library(lme4)
library(plyr) # for core-transient functions
library(ggplot2)
library(merTools)
library(tidyr)
library(maps)
library(gridExtra)
library(RColorBrewer)
library(sp)
library(rgdal)
library(raster)
library(dplyr)
library(digest)
library(Hmisc)
library(piecewiseSEM)
library(MuMIn)
source('scripts/R-scripts/core-transient_functions.R')
# Specify here the datasetIDs and then run the code below.
dataformattingtable = read.csv('data_formatting_table.csv', header = T)
datasetIDs = dataformattingtable$dataset_ID[dataformattingtable$format_flag == 1]
# BBS (dataset 1) will be analyzed separately for now.
datasetIDs = datasetIDs[!datasetIDs %in% c(1)]
#################### FIG 4 #########################
occ_taxa=read.csv("output/tabular_data/occ_taxa.csv",header=TRUE)
colors7 = c(colors()[552], # plankton
rgb(29/255, 106/255, 155/255), #bird
colors()[144], # invert
colors()[139], # plant
colors()[551], #mammal
colors()[17], #benthos
colors()[637]) #fish
symbols7 = c(16, 18, 167, 15, 17, 1, 3)
taxcolors = read.csv("output/tabular_data/taxcolors.csv", header = TRUE)
scaleIDs = filter(dataformattingtable, spatial_scale_variable == 'Y',
format_flag == 1)$dataset_ID
# subsetting to only count ids
scaleIDs = scaleIDs[! scaleIDs %in% c(207, 210, 217, 218, 222, 223, 225, 238, 241,258, 282, 322, 280,317)]
bbs_abun = read.csv("data/BBS/bbs_allscales33.csv", header=TRUE)
bbs_abun$pctTrans = bbs_abun$propTrans
# convert km2 to m2
bbs_abun$area = bbs_abun$area * 1000000
#### Fig 4a Area #####
area = read.csv("output/tabular_data/scaled_areas_3_2.csv", header = TRUE)
areamerge.5 = merge(occ_taxa[,c("datasetID", "site", "pctTrans")], area, by = c("datasetID", "site"), na.rm = TRUE)
areamerge.5$area = areamerge.5$area
areamerge1 = areamerge.5 [, c("datasetID", "site", "taxa", "pctTrans", "area")]
# read in bbs abundance data
bbs_area = bbs_abun[, c("datasetID", "site", "taxa", "pctTrans", "area")]
areamerge = rbind(bbs_area,areamerge1)
# write.csv(areamerge, "output/tabular_data/areamerge.csv", row.names = FALSE)
#### Figures 4a-4c panel plot #####
scaleIDs = filter(dataformattingtable, spatial_scale_variable == 'Y',
format_flag == 1)$dataset_ID
scaleIDs = scaleIDs[! scaleIDs %in% c(207, 210, 217, 218, 222, 223, 225, 241,258, 282, 322, 280, 248, 254, 279, 291)] # waiting on data for 248
bbs_abun$pctCore = bbs_abun$propCore
bbs_spRich = bbs_abun[,c("datasetID","site","taxa", "meanAbundance", "pctTrans","pctCore")]
occ_merge = occ_taxa[,c("datasetID", "site","taxa", "meanAbundance", "pctTrans","pctCore")]
bbs_occ = rbind(bbs_spRich,occ_merge)
bbs_occ = bbs_occ[!bbs_occ$site %in% c("53800-5-6", "53800-25-2"),]
#### Fig 4c/d predicted model ####
bbs_occ_pred = bbs_occ[!bbs_occ$datasetID %in% c(207, 210, 217, 218, 222, 223, 225, 238, 241, 248, 258, 282, 322, 280,317),]
mod4c = lmer(pctTrans ~ log10(meanAbundance) * taxa + (log10(meanAbundance)|datasetID), data = bbs_occ_pred)
summary(mod4c)
occ_sub_pred = data.frame(datasetID = 999, taxa = unique(bbs_occ_pred$taxa), meanAbundance = 102) # 102 is median abun for data frame (median(bbs_occ_pred$meanAbundance))
# to test: test = filter(occ_sub_pred, taxa == "Invertebrate")
predmod4c = merTools::predictInterval(mod4c, occ_sub_pred, n.sims=1000)
# matching by predicted output vals based on occ_sub_pred
predmod4c$taxa = c("Bird","Invertebrate", "Plant", "Mammal","Fish", "Plankton", "Benthos")
# write.csv(predmod4c, "output/tabular_data/predmod4c.csv", row.names = FALSE)
predmod = merge(predmod4c, taxcolors, by = "taxa")
lm.hsd = lm(fit ~ taxa, data= predmod) #Tukeys HSD
summary(aov(fit ~ taxa, data= predmod), test = "Chisq")
# agricolae::HSD.test(lm.hsd, "taxa")
predmod$order = c(1,4,3,6,7,5,2)
# 4d
ecosys = merge(bbs_occ_pred, dataformattingtable[,c("dataset_ID", "system")], by.y = "dataset_ID", by.x = "datasetID")
mod4d = lmer(pctTrans ~ log10(meanAbundance) * system + (log10(meanAbundance)|datasetID), data=ecosys)
summary(mod4d)
occ_pred_4d = data.frame(datasetID = 999, system = unique(ecosys$system), meanAbundance = 102) # 102 is median abun for data frame (median(bbs_occ_pred$meanAbundance))
predmod4d = merTools::predictInterval(mod4d, occ_pred_4d, n.sims=1000)
predmod4d$order = c(1:3)
# pseudo r2 area
bbs_occ_area = merge(bbs_occ_pred, areamerge[,c("datasetID", "site", "area")], by = c("datasetID", "site"))
mod4a = lmer(pctTrans ~ log10(area) * taxa + (log10(area)|datasetID), data=bbs_occ_area)
r.squaredGLMM(mod4a)
coefs <- data.frame(coef(summary(mod4a)))
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
# R2 area
modar = lm(pctTrans~log10(area), data=bbs_occ_area)
summary(modar)
mod6 = lm(pctTrans~log10(meanAbundance), data=bbs_occ_area)
summary(mod6)
# pseudo r2 abun
mod4b = lmer(pctTrans ~ log10(meanAbundance) * taxa + (log10(meanAbundance)|datasetID), data = bbs_occ_area)
rsquared(mod4b, aicc = FALSE)
coefs <- data.frame(coef(summary(mod4b)))
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
# https://ecologyforacrowdedplanet.wordpress.com/2013/08/27/r-squared-in-mixed-models-the-easy-way/
#### panel plot ####
area_plot = data.frame()
areafig = read.csv("output/tabular_data/areafig.csv", header = TRUE)
area.5 = merge(occ_taxa[,c("datasetID", "site", "pctTrans")], areafig, by = c("datasetID", "site"), na.rm = TRUE)
area.5 = area.5 [, c("datasetID", "site", "taxa", "pctTrans", "area")]
areamerge.5 = rbind(bbs_area,area.5)
areamerge_fig = subset(areamerge.5, datasetID %in% scaleIDs)
pdf('output/plots/4a_4d.pdf', height = 10, width = 14)
par(mfrow = c(2, 2), mar = c(5,5,1,1), cex = 1, oma = c(0,0,0,0), las = 1)
palette(colors7)
all = lm(areamerge_fig$pctTrans ~ log10(areamerge_fig$area))
xnew = range(log10(areamerge_fig$area))
xhat <- predict(all, newdata = data.frame((xnew)))
xhats = range(xhat)
lower = range(xhat)[1]
upper = range(xhat)[2]
plot(NA, xlim = c(-2, 8), ylim = c(0,1), xlab = expression("log"[10]*" Area (m"^2*")"),
ylab = "% Transients", cex.lab = 2, frame.plot=FALSE, xaxt = "n", yaxt = "n",
mgp = c(3.25,1,0))
axis(1, cex.axis = 1.5)
axis(2, cex.axis = 1.5)
b1 = for(id in scaleIDs){
print(id)
plotsub = subset(areamerge_fig,datasetID == id)
taxa = as.character(unique(plotsub$taxa))
mod4 = lm(plotsub$pctTrans ~ log10(plotsub$area))
mod4.slope = summary(mod4)$coef[2,"Estimate"]
mod4.coef1 = summary(mod4$coef[1])[3]
xnew = range(log10(plotsub$area))
xhat <- predict(mod4, newdata = data.frame((xnew)))
xhats = range(xhat)
lower = range(xhat)[1]
upper = range(xhat)[2]
print(xhats)
taxcolor = subset(taxcolors, taxa == as.character(plotsub$taxa)[1])
y= summary(mod4)$coef[1]+ (xhats)*summary(mod4)$coef[2]
area_plot = rbind(area_plot , c(id, lower,upper, mod4.slope,taxa))
lines(log10(plotsub$area), fitted(mod4), col=as.character(taxcolor$color),lwd=4)
# points(log10(plotsub$area), plotsub$pctTrans)
par(new=TRUE)
}
lines(log10(areamerge_fig$area), fitted(all), col="black", lwd=4)
title(outer=FALSE,adj=0.02,main="A",cex.main=2,col="black",font=2,line=-1)
par(new= FALSE)
bbs_occ_plot = subset(bbs_occ, datasetID %in% scaleIDs)
occ_all = lm(bbs_occ_plot$pctTrans ~ log10(bbs_occ_plot$meanAbundance))
xnew = range(log10(bbs_occ_plot$meanAbundance))
xhat <- predict(occ_all, newdata = data.frame((xnew)))
xhats = range(xhat)
plot(NA, xlim = c(0, 7), ylim = c(0,1), col = as.character(taxcolor$color), xlab = expression("log"[10]*" Community Size"), ylab = "% Transients", cex.lab = 2,frame.plot=FALSE, yaxt = "n", xaxt = "n", mgp = c(3.25,1,0))
axis(1, cex.axis = 1.5)
axis(2, cex.axis = 1.5)
b2 = for(id in scaleIDs){
print(id)
plotsub = subset(bbs_occ_plot,datasetID == id)
mod4 = lm(plotsub$pctTrans ~ log10(plotsub$meanAbundance))
xnew = range(log10(plotsub$meanAbundance))
xhat <- predict(mod4, newdata = data.frame((xnew)))
xhats = range(xhat)
print(xhats)
taxcolor = subset(taxcolors, taxa == as.character(plotsub$taxa)[1])
y=summary(mod4)$coef[1] + (xhats)*summary(mod4)$coef[2]
lines(log10(plotsub$meanAbundance), fitted(mod4), col=as.character(taxcolor$color),lwd=4)
# points(log10(plotsub$meanAbundance), plotsub$pctTrans)
par(new=TRUE)
}
abline(v = log10(102), lty = 'dotted', lwd = 2)
par(new=TRUE)
title(outer=FALSE,adj=0.02,main="B",cex.main=2,col="black",font=2,line=-1)
lines(log10(bbs_occ_plot$meanAbundance), fitted(occ_all), col="black",lwd=4)
legend('topright', legend = as.character(taxcolors$taxa), lty=1,lwd=3,col = as.character(taxcolors$color), cex = 1.5, bty = "n")
par(new = FALSE)
b4 = barplot(predmod$fit[predmod$order], cex.names = 2,col = c(colors()[17],"gold2", "turquoise2","red","forestgreen","purple4","#1D6A9B"), ylim = c(0, 1.1), yaxt = "n")
axis(2, cex.axis = 1.5)
Hmisc::errbar(c(0.7, 1.9, 3.1, 4.3, 5.5, 6.7, 7.9), predmod$fit[predmod$order], predmod$upr[predmod$order], predmod$lwr[predmod$order], add= TRUE, lwd = 1.25, pch = 3)
mtext("% Transients", 2, cex = 2, las = 0, line = 3, mgp = c(3.25,1,0))
title(outer=FALSE,adj=0.02,main="C",cex.main=2,col="black",font=2,line=-1)
b4 = barplot(predmod4d$fit[predmod4d$order], cex.names = 1.5,col = c('burlywood','skyblue','navy'), ylim = c(0, 0.9), yaxt = "n")
axis(2, cex.axis = 1.5)
Hmisc::errbar(c(0.7, 1.9, 3.1), predmod4d$fit[predmod4d$order], predmod4d$upr[predmod4d$order], predmod4d$lwr[predmod4d$order], add= TRUE, lwd = 1.25, pch = 3)
mtext("% Transients", 2, cex = 2, las = 0, line = 3, mgp = c(3.25,1,0))
title(outer=FALSE,adj=0.02,main="D",cex.main=2,col="black",font=2,line=-1)
dev.off()
dev.off()
colnames(area_plot) = c("id","xlow","xhigh","slope", "taxa")
area_plot = data.frame(area_plot)
area_plot$datasetID = as.numeric(area_plot$id)
area_plot$xlow = as.numeric(area_plot$xlow)
area_plot$xhigh = as.numeric(area_plot$xhigh)
area_plot$slope = as.numeric(area_plot$slope)
write.csv(area_plot, "output/tabular_data/fig_4a_output.csv", row.names =FALSE)
####### elev heterogeneity model ################
latlongs = read.csv("data/latlongs/latlongs.csv", header =TRUE)
latlongs = filter(latlongs, datasetID != 1)
latlongs = filter(latlongs, taxa != "Fish")
bbs_latlong = read.csv("data/latlongs/bbs_2000_2014_latlongs.csv", header = TRUE)
bbs_latlong$datasetID = 1
bbs_latlong$taxa = "Bird"
bbs_latlong$Lon = bbs_latlong$Longi
bbs_latlong$Lat = bbs_latlong$Lati
bbs_latlong$site = as.factor(bbs_latlong$stateroute)
bbs_latlong = bbs_latlong[, c("datasetID", "taxa", "site", "Lat", "Lon")]
all_latlongs = rbind(latlongs, bbs_latlong)
all_latlongs = na.omit(all_latlongs)
# Makes routes into a spatialPointsDataframe
coordinates(all_latlongs)=c('Lon','Lat')
projection(all_latlongs) = CRS("+proj=longlat +ellps=WGS84")
prj.string <- CRS("+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km")
# "+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km"
# Transforms routes to an equal-area projection - see previously defined prj.string
routes.laea = spTransform(all_latlongs, CRS("+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km"))
##### extracting elevation data ####
# A function that draws a circle of radius r around a point: p (x,y)
RADIUS = 5
make.cir = function(p,r){
points=c()
for(i in 1:360){
theta = i*2*pi/360
y = p[2] + r*cos(theta)
x = p[1] + r*sin(theta)
points = rbind(points,c(x,y))
}
points=rbind(points,points[1,])
circle=Polygon(points,hole=F)
circle
}
routes.laea@data$dId_site = paste(routes.laea@data$datasetID, routes.laea@data$site, sep = "_")
routes.laea@data$unique = 1:1077
#Draw circles around all routes
circs = sapply(1:nrow(routes.laea@data), function(x){
circ = make.cir(routes.laea@coords[x,],RADIUS)
circ = Polygons(list(circ),ID=routes.laea$unique[x])
}
)
circs.sp = SpatialPolygons(circs, proj4string=CRS("+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km"))
# Check that circle locations look right
# plot(circs.sp, add = TRUE)
# read in elevation raster at 1 km resolution
elev <- raster("Z:/GIS/DEM/sdat_10003_1_20170424_102000103.tif")
NorthAm = readOGR("Z:/GIS/geography", "continent")
NorthAm2 = spTransform(NorthAm, CRS("+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km"))
elevNA2 = projectRaster(elev, crs = prj.string) #UNMASKED!
elevNA3 <- raster::mask(elev, NorthAm2)
elev.point = raster::extract(elevNA3, routes.laea)
elev.mean = raster::extract(elevNA3, circs.sp, fun = mean, na.rm=T)
elev.var = raster::extract(elevNA3, circs.sp, fun = var, na.rm=T)
env_elev = data.frame(unique = routes.laea@data$unique, elev.point = elev.point, elev.mean = elev.mean, elev.var = elev.var)
lat_scale_elev = merge(routes.laea, env_elev, by = c("unique")) # checked to make sure order lined up, d/n seem to be another way to merge since DID keeps getting lost
lat_scale_elev = data.frame(lat_scale_elev)
lat_scale_rich = merge(lat_scale_elev, summ[,c("datasetID","site", "meanAbundance")], by = c("datasetID", "site"), all.x = TRUE)
# write.csv(lat_scale_rich, "output/tabular_data/lat_scale_rich_3_30.csv", row.names = F)
lat_scale = read.csv("output/tabular_data/lat_scale_rich_5km.csv", header = TRUE, stringsAsFactors = FALSE)
lat_scale_rich_taxa = filter(lat_scale, datasetID == 1) %>% separate(., site, c("stateroute", "level", "number"), sep = "-") %>% filter(., level == 50)
lat_scale_rich_taxa$site = lat_scale_rich_taxa$stateroute
lat_scale_rich_taxa = lat_scale_rich_taxa[,c("datasetID","site", "unique", "taxa", "propTrans" , "dId_site", "elev.point", "elev.mean" , "elev.var" ,"Lon","Lat", "optional","stateroute", "meanAbundance")]
lat_scale_final.5 = filter(lat_scale, datasetID != 1) %>% filter(., taxa != "Fish") %>% filter(., taxa != "Plankton") %>% filter(., taxa != "Benthos")
lat_scale_final = lat_scale_final.5[,c("datasetID","site", "unique", "taxa", "propTrans" , "dId_site", "elev.point", "elev.mean" , "elev.var" ,"Lon","Lat", "optional","stateroute", "meanAbundance")]
lat_scale_rich = rbind(lat_scale_final, lat_scale_rich_taxa)
# Model - sampled at 5 km radius
# same model structure (but only terrestrial datasets, not necessarily hierarchically scaled datasets) as used in
# core-transient-figure-4.R, but adding an elevational variance term
mod1 = lmer(propTrans ~ log10(meanAbundance) * taxa + log10(elev.var) + (log10(meanAbundance)|datasetID) , data=lat_scale_rich)
summary(mod1)
coefs <- data.frame(coef(summary(mod1)))
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
|
/scripts/R-scripts/core-transient-figure-4.R
|
no_license
|
hurlbertlab/core-transient
|
R
| false
| false
| 14,509
|
r
|
###############################################
# Code for creating Figure 4 for core-transient manuscript
library(lme4)
library(plyr) # for core-transient functions
library(ggplot2)
library(merTools)
library(tidyr)
library(maps)
library(gridExtra)
library(RColorBrewer)
library(sp)
library(rgdal)
library(raster)
library(dplyr)
library(digest)
library(Hmisc)
library(piecewiseSEM)
library(MuMIn)
source('scripts/R-scripts/core-transient_functions.R')
# Specify here the datasetIDs and then run the code below.
dataformattingtable = read.csv('data_formatting_table.csv', header = T)
datasetIDs = dataformattingtable$dataset_ID[dataformattingtable$format_flag == 1]
# BBS (dataset 1) will be analyzed separately for now.
datasetIDs = datasetIDs[!datasetIDs %in% c(1)]
#################### FIG 4 #########################
occ_taxa=read.csv("output/tabular_data/occ_taxa.csv",header=TRUE)
colors7 = c(colors()[552], # plankton
rgb(29/255, 106/255, 155/255), #bird
colors()[144], # invert
colors()[139], # plant
colors()[551], #mammal
colors()[17], #benthos
colors()[637]) #fish
symbols7 = c(16, 18, 167, 15, 17, 1, 3)
taxcolors = read.csv("output/tabular_data/taxcolors.csv", header = TRUE)
scaleIDs = filter(dataformattingtable, spatial_scale_variable == 'Y',
format_flag == 1)$dataset_ID
# subsetting to only count ids
scaleIDs = scaleIDs[! scaleIDs %in% c(207, 210, 217, 218, 222, 223, 225, 238, 241,258, 282, 322, 280,317)]
bbs_abun = read.csv("data/BBS/bbs_allscales33.csv", header=TRUE)
bbs_abun$pctTrans = bbs_abun$propTrans
# convert km2 to m2
bbs_abun$area = bbs_abun$area * 1000000
#### Fig 4a Area #####
area = read.csv("output/tabular_data/scaled_areas_3_2.csv", header = TRUE)
areamerge.5 = merge(occ_taxa[,c("datasetID", "site", "pctTrans")], area, by = c("datasetID", "site"), na.rm = TRUE)
areamerge.5$area = areamerge.5$area
areamerge1 = areamerge.5 [, c("datasetID", "site", "taxa", "pctTrans", "area")]
# read in bbs abundance data
bbs_area = bbs_abun[, c("datasetID", "site", "taxa", "pctTrans", "area")]
areamerge = rbind(bbs_area,areamerge1)
# write.csv(areamerge, "output/tabular_data/areamerge.csv", row.names = FALSE)
#### Figures 4a-4c panel plot #####
scaleIDs = filter(dataformattingtable, spatial_scale_variable == 'Y',
format_flag == 1)$dataset_ID
scaleIDs = scaleIDs[! scaleIDs %in% c(207, 210, 217, 218, 222, 223, 225, 241,258, 282, 322, 280, 248, 254, 279, 291)] # waiting on data for 248
bbs_abun$pctCore = bbs_abun$propCore
bbs_spRich = bbs_abun[,c("datasetID","site","taxa", "meanAbundance", "pctTrans","pctCore")]
occ_merge = occ_taxa[,c("datasetID", "site","taxa", "meanAbundance", "pctTrans","pctCore")]
bbs_occ = rbind(bbs_spRich,occ_merge)
bbs_occ = bbs_occ[!bbs_occ$site %in% c("53800-5-6", "53800-25-2"),]
#### Fig 4c/d predicted model ####
bbs_occ_pred = bbs_occ[!bbs_occ$datasetID %in% c(207, 210, 217, 218, 222, 223, 225, 238, 241, 248, 258, 282, 322, 280,317),]
mod4c = lmer(pctTrans ~ log10(meanAbundance) * taxa + (log10(meanAbundance)|datasetID), data = bbs_occ_pred)
summary(mod4c)
occ_sub_pred = data.frame(datasetID = 999, taxa = unique(bbs_occ_pred$taxa), meanAbundance = 102) # 102 is median abun for data frame (median(bbs_occ_pred$meanAbundance))
# to test: test = filter(occ_sub_pred, taxa == "Invertebrate")
predmod4c = merTools::predictInterval(mod4c, occ_sub_pred, n.sims=1000)
# matching by predicted output vals based on occ_sub_pred
predmod4c$taxa = c("Bird","Invertebrate", "Plant", "Mammal","Fish", "Plankton", "Benthos")
# write.csv(predmod4c, "output/tabular_data/predmod4c.csv", row.names = FALSE)
predmod = merge(predmod4c, taxcolors, by = "taxa")
lm.hsd = lm(fit ~ taxa, data= predmod) #Tukeys HSD
summary(aov(fit ~ taxa, data= predmod), test = "Chisq")
# agricolae::HSD.test(lm.hsd, "taxa")
predmod$order = c(1,4,3,6,7,5,2)
# 4d
ecosys = merge(bbs_occ_pred, dataformattingtable[,c("dataset_ID", "system")], by.y = "dataset_ID", by.x = "datasetID")
mod4d = lmer(pctTrans ~ log10(meanAbundance) * system + (log10(meanAbundance)|datasetID), data=ecosys)
summary(mod4d)
occ_pred_4d = data.frame(datasetID = 999, system = unique(ecosys$system), meanAbundance = 102) # 102 is median abun for data frame (median(bbs_occ_pred$meanAbundance))
predmod4d = merTools::predictInterval(mod4d, occ_pred_4d, n.sims=1000)
predmod4d$order = c(1:3)
# pseudo r2 area
bbs_occ_area = merge(bbs_occ_pred, areamerge[,c("datasetID", "site", "area")], by = c("datasetID", "site"))
mod4a = lmer(pctTrans ~ log10(area) * taxa + (log10(area)|datasetID), data=bbs_occ_area)
r.squaredGLMM(mod4a)
coefs <- data.frame(coef(summary(mod4a)))
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
# R2 area
modar = lm(pctTrans~log10(area), data=bbs_occ_area)
summary(modar)
mod6 = lm(pctTrans~log10(meanAbundance), data=bbs_occ_area)
summary(mod6)
# pseudo r2 abun
mod4b = lmer(pctTrans ~ log10(meanAbundance) * taxa + (log10(meanAbundance)|datasetID), data = bbs_occ_area)
rsquared(mod4b, aicc = FALSE)
coefs <- data.frame(coef(summary(mod4b)))
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
# https://ecologyforacrowdedplanet.wordpress.com/2013/08/27/r-squared-in-mixed-models-the-easy-way/
#### panel plot ####
area_plot = data.frame()
areafig = read.csv("output/tabular_data/areafig.csv", header = TRUE)
area.5 = merge(occ_taxa[,c("datasetID", "site", "pctTrans")], areafig, by = c("datasetID", "site"), na.rm = TRUE)
area.5 = area.5 [, c("datasetID", "site", "taxa", "pctTrans", "area")]
areamerge.5 = rbind(bbs_area,area.5)
areamerge_fig = subset(areamerge.5, datasetID %in% scaleIDs)
pdf('output/plots/4a_4d.pdf', height = 10, width = 14)
par(mfrow = c(2, 2), mar = c(5,5,1,1), cex = 1, oma = c(0,0,0,0), las = 1)
palette(colors7)
all = lm(areamerge_fig$pctTrans ~ log10(areamerge_fig$area))
xnew = range(log10(areamerge_fig$area))
xhat <- predict(all, newdata = data.frame((xnew)))
xhats = range(xhat)
lower = range(xhat)[1]
upper = range(xhat)[2]
plot(NA, xlim = c(-2, 8), ylim = c(0,1), xlab = expression("log"[10]*" Area (m"^2*")"),
ylab = "% Transients", cex.lab = 2, frame.plot=FALSE, xaxt = "n", yaxt = "n",
mgp = c(3.25,1,0))
axis(1, cex.axis = 1.5)
axis(2, cex.axis = 1.5)
b1 = for(id in scaleIDs){
print(id)
plotsub = subset(areamerge_fig,datasetID == id)
taxa = as.character(unique(plotsub$taxa))
mod4 = lm(plotsub$pctTrans ~ log10(plotsub$area))
mod4.slope = summary(mod4)$coef[2,"Estimate"]
mod4.coef1 = summary(mod4$coef[1])[3]
xnew = range(log10(plotsub$area))
xhat <- predict(mod4, newdata = data.frame((xnew)))
xhats = range(xhat)
lower = range(xhat)[1]
upper = range(xhat)[2]
print(xhats)
taxcolor = subset(taxcolors, taxa == as.character(plotsub$taxa)[1])
y= summary(mod4)$coef[1]+ (xhats)*summary(mod4)$coef[2]
area_plot = rbind(area_plot , c(id, lower,upper, mod4.slope,taxa))
lines(log10(plotsub$area), fitted(mod4), col=as.character(taxcolor$color),lwd=4)
# points(log10(plotsub$area), plotsub$pctTrans)
par(new=TRUE)
}
lines(log10(areamerge_fig$area), fitted(all), col="black", lwd=4)
title(outer=FALSE,adj=0.02,main="A",cex.main=2,col="black",font=2,line=-1)
par(new= FALSE)
bbs_occ_plot = subset(bbs_occ, datasetID %in% scaleIDs)
occ_all = lm(bbs_occ_plot$pctTrans ~ log10(bbs_occ_plot$meanAbundance))
xnew = range(log10(bbs_occ_plot$meanAbundance))
xhat <- predict(occ_all, newdata = data.frame((xnew)))
xhats = range(xhat)
plot(NA, xlim = c(0, 7), ylim = c(0,1), col = as.character(taxcolor$color), xlab = expression("log"[10]*" Community Size"), ylab = "% Transients", cex.lab = 2,frame.plot=FALSE, yaxt = "n", xaxt = "n", mgp = c(3.25,1,0))
axis(1, cex.axis = 1.5)
axis(2, cex.axis = 1.5)
b2 = for(id in scaleIDs){
print(id)
plotsub = subset(bbs_occ_plot,datasetID == id)
mod4 = lm(plotsub$pctTrans ~ log10(plotsub$meanAbundance))
xnew = range(log10(plotsub$meanAbundance))
xhat <- predict(mod4, newdata = data.frame((xnew)))
xhats = range(xhat)
print(xhats)
taxcolor = subset(taxcolors, taxa == as.character(plotsub$taxa)[1])
y=summary(mod4)$coef[1] + (xhats)*summary(mod4)$coef[2]
lines(log10(plotsub$meanAbundance), fitted(mod4), col=as.character(taxcolor$color),lwd=4)
# points(log10(plotsub$meanAbundance), plotsub$pctTrans)
par(new=TRUE)
}
abline(v = log10(102), lty = 'dotted', lwd = 2)
par(new=TRUE)
title(outer=FALSE,adj=0.02,main="B",cex.main=2,col="black",font=2,line=-1)
lines(log10(bbs_occ_plot$meanAbundance), fitted(occ_all), col="black",lwd=4)
legend('topright', legend = as.character(taxcolors$taxa), lty=1,lwd=3,col = as.character(taxcolors$color), cex = 1.5, bty = "n")
par(new = FALSE)
b4 = barplot(predmod$fit[predmod$order], cex.names = 2,col = c(colors()[17],"gold2", "turquoise2","red","forestgreen","purple4","#1D6A9B"), ylim = c(0, 1.1), yaxt = "n")
axis(2, cex.axis = 1.5)
Hmisc::errbar(c(0.7, 1.9, 3.1, 4.3, 5.5, 6.7, 7.9), predmod$fit[predmod$order], predmod$upr[predmod$order], predmod$lwr[predmod$order], add= TRUE, lwd = 1.25, pch = 3)
mtext("% Transients", 2, cex = 2, las = 0, line = 3, mgp = c(3.25,1,0))
title(outer=FALSE,adj=0.02,main="C",cex.main=2,col="black",font=2,line=-1)
b4 = barplot(predmod4d$fit[predmod4d$order], cex.names = 1.5,col = c('burlywood','skyblue','navy'), ylim = c(0, 0.9), yaxt = "n")
axis(2, cex.axis = 1.5)
Hmisc::errbar(c(0.7, 1.9, 3.1), predmod4d$fit[predmod4d$order], predmod4d$upr[predmod4d$order], predmod4d$lwr[predmod4d$order], add= TRUE, lwd = 1.25, pch = 3)
mtext("% Transients", 2, cex = 2, las = 0, line = 3, mgp = c(3.25,1,0))
title(outer=FALSE,adj=0.02,main="D",cex.main=2,col="black",font=2,line=-1)
dev.off()
dev.off()
colnames(area_plot) = c("id","xlow","xhigh","slope", "taxa")
area_plot = data.frame(area_plot)
area_plot$datasetID = as.numeric(area_plot$id)
area_plot$xlow = as.numeric(area_plot$xlow)
area_plot$xhigh = as.numeric(area_plot$xhigh)
area_plot$slope = as.numeric(area_plot$slope)
write.csv(area_plot, "output/tabular_data/fig_4a_output.csv", row.names =FALSE)
####### elev heterogeneity model ################
latlongs = read.csv("data/latlongs/latlongs.csv", header =TRUE)
latlongs = filter(latlongs, datasetID != 1)
latlongs = filter(latlongs, taxa != "Fish")
bbs_latlong = read.csv("data/latlongs/bbs_2000_2014_latlongs.csv", header = TRUE)
bbs_latlong$datasetID = 1
bbs_latlong$taxa = "Bird"
bbs_latlong$Lon = bbs_latlong$Longi
bbs_latlong$Lat = bbs_latlong$Lati
bbs_latlong$site = as.factor(bbs_latlong$stateroute)
bbs_latlong = bbs_latlong[, c("datasetID", "taxa", "site", "Lat", "Lon")]
all_latlongs = rbind(latlongs, bbs_latlong)
all_latlongs = na.omit(all_latlongs)
# Makes routes into a spatialPointsDataframe
coordinates(all_latlongs)=c('Lon','Lat')
projection(all_latlongs) = CRS("+proj=longlat +ellps=WGS84")
prj.string <- CRS("+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km")
# "+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km"
# Transforms routes to an equal-area projection - see previously defined prj.string
routes.laea = spTransform(all_latlongs, CRS("+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km"))
##### extracting elevation data ####
# A function that draws a circle of radius r around a point: p (x,y)
RADIUS = 5
make.cir = function(p,r){
points=c()
for(i in 1:360){
theta = i*2*pi/360
y = p[2] + r*cos(theta)
x = p[1] + r*sin(theta)
points = rbind(points,c(x,y))
}
points=rbind(points,points[1,])
circle=Polygon(points,hole=F)
circle
}
routes.laea@data$dId_site = paste(routes.laea@data$datasetID, routes.laea@data$site, sep = "_")
routes.laea@data$unique = 1:1077
#Draw circles around all routes
circs = sapply(1:nrow(routes.laea@data), function(x){
circ = make.cir(routes.laea@coords[x,],RADIUS)
circ = Polygons(list(circ),ID=routes.laea$unique[x])
}
)
circs.sp = SpatialPolygons(circs, proj4string=CRS("+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km"))
# Check that circle locations look right
# plot(circs.sp, add = TRUE)
# read in elevation raster at 1 km resolution
elev <- raster("Z:/GIS/DEM/sdat_10003_1_20170424_102000103.tif")
NorthAm = readOGR("Z:/GIS/geography", "continent")
NorthAm2 = spTransform(NorthAm, CRS("+proj=laea +lat_0=45.235 +lon_0=-106.675 +units=km"))
elevNA2 = projectRaster(elev, crs = prj.string) #UNMASKED!
elevNA3 <- raster::mask(elev, NorthAm2)
elev.point = raster::extract(elevNA3, routes.laea)
elev.mean = raster::extract(elevNA3, circs.sp, fun = mean, na.rm=T)
elev.var = raster::extract(elevNA3, circs.sp, fun = var, na.rm=T)
env_elev = data.frame(unique = routes.laea@data$unique, elev.point = elev.point, elev.mean = elev.mean, elev.var = elev.var)
lat_scale_elev = merge(routes.laea, env_elev, by = c("unique")) # checked to make sure order lined up, d/n seem to be another way to merge since DID keeps getting lost
lat_scale_elev = data.frame(lat_scale_elev)
lat_scale_rich = merge(lat_scale_elev, summ[,c("datasetID","site", "meanAbundance")], by = c("datasetID", "site"), all.x = TRUE)
# write.csv(lat_scale_rich, "output/tabular_data/lat_scale_rich_3_30.csv", row.names = F)
lat_scale = read.csv("output/tabular_data/lat_scale_rich_5km.csv", header = TRUE, stringsAsFactors = FALSE)
lat_scale_rich_taxa = filter(lat_scale, datasetID == 1) %>% separate(., site, c("stateroute", "level", "number"), sep = "-") %>% filter(., level == 50)
lat_scale_rich_taxa$site = lat_scale_rich_taxa$stateroute
lat_scale_rich_taxa = lat_scale_rich_taxa[,c("datasetID","site", "unique", "taxa", "propTrans" , "dId_site", "elev.point", "elev.mean" , "elev.var" ,"Lon","Lat", "optional","stateroute", "meanAbundance")]
lat_scale_final.5 = filter(lat_scale, datasetID != 1) %>% filter(., taxa != "Fish") %>% filter(., taxa != "Plankton") %>% filter(., taxa != "Benthos")
lat_scale_final = lat_scale_final.5[,c("datasetID","site", "unique", "taxa", "propTrans" , "dId_site", "elev.point", "elev.mean" , "elev.var" ,"Lon","Lat", "optional","stateroute", "meanAbundance")]
lat_scale_rich = rbind(lat_scale_final, lat_scale_rich_taxa)
# Model - sampled at 5 km radius
# same model structure (but only terrestrial datasets, not necessarily hierarchically scaled datasets) as used in
# core-transient-figure-4.R, but adding an elevational variance term
mod1 = lmer(propTrans ~ log10(meanAbundance) * taxa + log10(elev.var) + (log10(meanAbundance)|datasetID) , data=lat_scale_rich)
summary(mod1)
coefs <- data.frame(coef(summary(mod1)))
coefs$p.z <- 2 * (1 - pnorm(abs(coefs$t.value)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visEdges.R
\name{visEdges}
\alias{visEdges}
\title{Network visualization edges options}
\usage{
visEdges(graph, title = NULL, value = NULL, label = NULL, length = NULL,
width = NULL, dashes = NULL, hidden = NULL, hoverWidth = NULL,
id = NULL, physics = NULL, selectionWidth = NULL,
selfReferenceSize = NULL, labelHighlightBold = NULL, color = NULL,
font = NULL, arrows = NULL, arrowStrikethrough = NULL, smooth = NULL,
shadow = NULL, scaling = NULL, widthConstraint = NULL, chosen = NULL)
}
\arguments{
\item{graph}{: a visNetwork object}
\item{title}{: String. Default to undefined. The title is shown in a pop-up when the mouse moves over the edge.}
\item{value}{: Number. Default to undefined. When a value is set, the edges' width will be scaled using the options in the scaling object defined above.}
\item{label}{: String. Default to undefined. The label of the edge. HTML does not work in here because the network uses HTML5 Canvas.}
\item{length}{: Number. Default to undefined. The physics simulation gives edges a spring length. This value can override the length of the spring in rest.}
\item{width}{: Number. Default to 1. The width of the edge. If value is set, this is not used.}
\item{dashes}{: Array or Boolean. Default to false. When true, the edge will be drawn as a dashed line. You can customize the dashes by supplying an Array. Array formart: Array of numbers, gap length, dash length, gap length, dash length, ... etc. The array is repeated until the distance is filled. When using dashed lines in IE versions older than 11, the line will be drawn straight, not smooth.}
\item{hidden}{: Boolean. Default to false. When true, the edge is not drawn. It is part still part of the physics simulation however!}
\item{hoverWidth}{: Number or Function. Default to 0.5. Assuming the hover behaviour is enabled in the interaction module, the hoverWidth determines the width of the edge when the user hovers over it with the mouse. If a number is supplied, this number will be added to the width. Because the width can be altered by the value and the scaling functions, a constant multiplier or added value may not give the best results. To solve this, you can supply a function.}
\item{id}{: String. Default to undefined. The id of the edge. The id is optional for edges. When not supplied, an UUID will be assigned to the edge.}
\item{physics}{: Boolean. Default to true. When true, the edge is part of the physics simulation. When false, it will not act as a spring.}
\item{selectionWidth}{: Number or Function. Default to 1. The selectionWidth determines the width of the edge when the edge is selected. If a number is supplied, this number will be added to the width. Because the width can be altered by the value and the scaling functions, a constant multiplier or added value may not give the best results. To solve this, you can supply a function.}
\item{selfReferenceSize}{: Number. Default to false. When the to and from nodes are the same, a circle is drawn. This is the radius of that circle.}
\item{labelHighlightBold}{: Boolean. Default to true. Determines whether or not the label becomes bold when the edge is selected.}
\item{color}{: Named list or String. Default to named list. Color information of the edge in every situation. Can be 'rgba(120,32,14,1)', '#97C2FC' or 'red'.
\itemize{
\item{"color"}{ : String. Default to '#848484. The color of the edge when it is not selected or hovered over (assuming hover is enabled in the interaction module).}
\item{"highlight "}{ : String. Default to '#848484'. The color the edge when it is selected.}
\item{"hover"}{ : String. Default to '#848484'. The color the edge when the mouse hovers over it (assuming hover is enabled in the interaction module).}
\item{"inherit"}{ : String or Boolean. Default to 'from'. When color, highlight or hover are defined, inherit is set to false! Supported options are: true, false, 'from','to','both'.}
\item{"opacity"}{ : Number. Default to 1.0. It can be useful to set the opacity of an edge without manually changing all the colors. The allowed range of the opacity option is between 0 and 1.}
}}
\item{font}{: Named list or String. This object defines the details of the label. A shorthand is also supported in the form 'size face color' for example: '14px arial red'
\itemize{
\item{"color"}{ : String. Default to '#343434'. Color of the label text.}
\item{"size"}{ : Number. Default to 14. Size of the label text.}
\item{"face"}{ : String. Default to 'arial. Font face (or font family) of the label text.}
\item{"background"}{ : String. Default to undefined. When not undefined but a color string, a background rectangle will be drawn behind the label in the supplied color.}
\item{"strokeWidth"}{ : Number. Default to 2. As an alternative to the background rectangle, a stroke can be drawn around the text. When a value higher than 0 is supplied, the stroke will be drawn.}
\item{"strokeColor"}{ : String. Default to '#ffffff'. This is the color of the stroke assuming the value for stroke is higher than 0.}
\item{"align"}{ : String. Default to 'horizontal'. Possible options: 'horizontal','top','middle','bottom'. The alignment determines how the label is aligned over the edge. The default value horizontal aligns the label horizontally, regardless of the orientation of the edge. When an option other than horizontal is chosen, the label will align itself according to the edge.}
\item{"vadjust, multi, bold, ital, boldital, mono"}{See \link{visDocumentation}}
}}
\item{arrows}{: Named list or String. To draw an arrow with default settings a string can be supplied. For example: 'to, from,middle' or 'to;from', any combination with any seperating symbol is fine. If you want to control the size of the arrowheads, you can supply an object.
\itemize{
\item{"to"}{ : Named list or Boolean. Default to Named list. When true, an arrowhead on the 'to' side of the edge is drawn, pointing to the 'to' node with default settings. To customize the size of the arrow, supply an object.
\itemize{
\item{"enabled"}{ : Boolean. Default to false. Toggle the arrow on or off. This option is optional, if undefined and the scaleFactor property is set, enabled will be set to true.}
\item{"scaleFactor"}{ : Number. Default to 1. The scale factor allows you to change the size of the arrowhead.}
\item{"type"}{ : Character. Default to 'arrow'. The type of endpoint. Also possible is 'circle'.}
}
}
\item{"middle"}{ : Named list or Boolean. Default to Named list. Exactly the same as the to object but with an arrowhead in the center node of the edge.}
\item{"from "}{ : Named list or Boolean. Default to Named list. Exactly the same as the to object but with an arrowhead at the from node of the edge.}
}}
\item{arrowStrikethrough}{: Boolean. Default to True. When false, the edge stops at the arrow. This can be useful if you have thick lines and you want the arrow to end in a point. Middle arrows are not affected by this.}
\item{smooth}{: Boolean | named list. Default to named list. When true, the edge is drawn as a dynamic quadratic bezier curve. The drawing of these curves takes longer than that of straight curves but it looks better.
\itemize{
\item{"enabled"}{ : Boolean. Default to true. Toggle smooth curves on and off. This is an optional option. If any of the other properties in this object are set, this option will be set to true.}
\item{"type"}{ : String. Default to 'dynamic'. Possible options: 'dynamic', 'continuous', 'discrete', 'diagonalCross', 'straightCross', 'horizontal', 'vertical', 'curvedCW', 'curvedCCW', 'cubicBezier'.}
\item{"roundness"}{ : Number. Default to 0.5. Accepted range: 0 .. 1.0. This parameter tweaks the roundness of the smooth curves for all types EXCEPT dynamic.}
\item{"forceDirection"}{ : String or Boolean. Default to false. Accepted options: ['horizontal', 'vertical', 'none']. This options is only used with the cubicBezier curves. When true, horizontal is chosen, when false, the direction that is larger (x distance between nodes vs y distance between nodes) is used. If the x distance is larger, horizontal. This is ment to be used with hierarchical layouts. }
}}
\item{shadow}{: Boolean | named list. Default to false. When true, the edges casts a shadow using the default settings. This can be further refined by supplying a list
\itemize{
\item{"enabled"}{ : Boolean. Default to false. Toggle the casting of shadows. If this option is not defined, it is set to true if any of the properties in this object are defined.}
\item{"color"}{ : String. Default to 'rgba(0,0,0,0.5)'. The color of the shadow as a string. Supported formats are 'rgb(255,255,255)', 'rgba(255,255,255,1)' and '#FFFFFF'.}
\item{"size"}{ : Number. Default to 10. The blur size of the shadow.}
\item{"x"}{ : Number. Default to 5. The x offset.}
\item{"y"}{ : Number. Default to 5. The y offset.}
}}
\item{scaling}{: Named list. If the value option is specified, the size of the edges will be scaled according to the properties in this object.
\itemize{
\item{"min"}{ : Number. Default to 10. If edges have a value, their sizes are determined by the value, the scaling function and the min max values.}
\item{"max"}{ : Number. Default to 30. This is the maximum allowed size when the edges are scaled using the value option.}
\item{"label"}{ : Named list or Boolean. Default to Named list. This can be false if the label is not allowed to scale with the node. If true it will scale using default settigns. For further customization, you can supply an object.
\itemize{
\item{"enabled"}{ : Boolean. Default to false. Toggle the scaling of the label on or off. If this option is not defined, it is set to true if any of the properties in this object are defined.}
\item{"min"}{ : Number. Default to 14. The minimum font-size used for labels when scaling.}
\item{"max"}{ : Number. Default to 30. The maximum font-size used for labels when scaling.}
\item{"maxVisible"}{ : Number. Default to 30. When zooming in, the font is drawn larger as well. You can limit the perceived font size using this option. If set to 30, the font will never look larger than size 30 zoomed at 100\%.}
\item{"drawThreshold"}{ : Number. Default to 5. When zooming out, the font will be drawn smaller. This defines a lower limit for when the font is drawn. When using font scaling, you can use this together with the maxVisible to first show labels of important nodes when zoomed out and only show the rest when zooming in.}
}
}
\item{"customScalingFunction"}{ : Function. If nodes have value fields, this function determines how the size of the nodes are scaled based on their values.}
}}
\item{widthConstraint}{: Number, boolean or list. If false (defaut), no widthConstraint is applied. If a number is specified, the maximum width of the edge's label is set to the value. The edge's label's lines will be broken on spaces to stay below the maximum.
\itemize{
\item{"maximum"}{ : Boolean. If a number is specified, the maximum width of the edge's label is set to the value. The edge's label's lines will be broken on spaces to stay below the maximum.}
}}
\item{chosen}{: See \link{visDocumentation}}
}
\description{
Network visualization edges options. For full documentation, have a look at \link{visDocumentation}.
}
\examples{
nodes <- data.frame(id = 1:3)
edges <- data.frame(from = c(1,2), to = c(1,3))
# arrows
visNetwork(nodes, edges) \%>\% visEdges(arrows = 'from')
visNetwork(nodes, edges) \%>\% visEdges(arrows = 'to, from')
visNetwork(nodes, edges) \%>\%
visEdges(arrows = list(to = list(enabled = TRUE,
scaleFactor = 2, type = 'circle')))
# smooth
visNetwork(nodes, edges) \%>\% visEdges(smooth = FALSE)
visNetwork(nodes, edges) \%>\% visEdges(smooth = list(enabled = TRUE, type = "diagonalCross"))
# width
visNetwork(nodes, edges) \%>\% visEdges(width = 10)
# color
visNetwork(nodes, edges) \%>\% visEdges(color = list(hover = "green")) \%>\%
visInteraction(hover = TRUE)
visNetwork(nodes, edges) \%>\% visEdges(color = "red")
visNetwork(nodes, edges) \%>\% visEdges(color = list(color = "red", highlight = "yellow"))
# shadow
visNetwork(nodes, edges) \%>\% visEdges(shadow = TRUE)
visNetwork(nodes, edges) \%>\% visEdges(shadow = list(enabled = TRUE, size = 5))
# dashes
# globally
visNetwork(nodes, edges) \%>\% visEdges(dashes = TRUE)
# set configuration individualy
# have to use specific notation...
nodes <- data.frame(id = 1:3)
edges <- data.frame(from = c(1,2), to = c(1,3),
dashes = c("[10,10,2,2]", "false"))
visNetwork(nodes, edges)
edges <- data.frame(from = c(1,2), to = c(1,3),
dashes = c("[10,10,2,2]", "[2]"))
visNetwork(nodes, edges)
}
\references{
See online documentation \url{http://datastorm-open.github.io/visNetwork/}
}
\seealso{
\link{visNodes} for nodes options, \link{visEdges} for edges options, \link{visGroups} for groups options,
\link{visLegend} for adding legend, \link{visOptions} for custom option, \link{visLayout} & \link{visHierarchicalLayout} for layout,
\link{visPhysics} for control physics, \link{visInteraction} for interaction, \link{visNetworkProxy} & \link{visFocus} & \link{visFit} for animation within shiny,
\link{visDocumentation}, \link{visEvents}, \link{visConfigure} ...
}
|
/man/visEdges.Rd
|
no_license
|
ktargows/visNetwork
|
R
| false
| true
| 13,583
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visEdges.R
\name{visEdges}
\alias{visEdges}
\title{Network visualization edges options}
\usage{
visEdges(graph, title = NULL, value = NULL, label = NULL, length = NULL,
width = NULL, dashes = NULL, hidden = NULL, hoverWidth = NULL,
id = NULL, physics = NULL, selectionWidth = NULL,
selfReferenceSize = NULL, labelHighlightBold = NULL, color = NULL,
font = NULL, arrows = NULL, arrowStrikethrough = NULL, smooth = NULL,
shadow = NULL, scaling = NULL, widthConstraint = NULL, chosen = NULL)
}
\arguments{
\item{graph}{: a visNetwork object}
\item{title}{: String. Default to undefined. The title is shown in a pop-up when the mouse moves over the edge.}
\item{value}{: Number. Default to undefined. When a value is set, the edges' width will be scaled using the options in the scaling object defined above.}
\item{label}{: String. Default to undefined. The label of the edge. HTML does not work in here because the network uses HTML5 Canvas.}
\item{length}{: Number. Default to undefined. The physics simulation gives edges a spring length. This value can override the length of the spring in rest.}
\item{width}{: Number. Default to 1. The width of the edge. If value is set, this is not used.}
\item{dashes}{: Array or Boolean. Default to false. When true, the edge will be drawn as a dashed line. You can customize the dashes by supplying an Array. Array formart: Array of numbers, gap length, dash length, gap length, dash length, ... etc. The array is repeated until the distance is filled. When using dashed lines in IE versions older than 11, the line will be drawn straight, not smooth.}
\item{hidden}{: Boolean. Default to false. When true, the edge is not drawn. It is part still part of the physics simulation however!}
\item{hoverWidth}{: Number or Function. Default to 0.5. Assuming the hover behaviour is enabled in the interaction module, the hoverWidth determines the width of the edge when the user hovers over it with the mouse. If a number is supplied, this number will be added to the width. Because the width can be altered by the value and the scaling functions, a constant multiplier or added value may not give the best results. To solve this, you can supply a function.}
\item{id}{: String. Default to undefined. The id of the edge. The id is optional for edges. When not supplied, an UUID will be assigned to the edge.}
\item{physics}{: Boolean. Default to true. When true, the edge is part of the physics simulation. When false, it will not act as a spring.}
\item{selectionWidth}{: Number or Function. Default to 1. The selectionWidth determines the width of the edge when the edge is selected. If a number is supplied, this number will be added to the width. Because the width can be altered by the value and the scaling functions, a constant multiplier or added value may not give the best results. To solve this, you can supply a function.}
\item{selfReferenceSize}{: Number. Default to false. When the to and from nodes are the same, a circle is drawn. This is the radius of that circle.}
\item{labelHighlightBold}{: Boolean. Default to true. Determines whether or not the label becomes bold when the edge is selected.}
\item{color}{: Named list or String. Default to named list. Color information of the edge in every situation. Can be 'rgba(120,32,14,1)', '#97C2FC' or 'red'.
\itemize{
\item{"color"}{ : String. Default to '#848484. The color of the edge when it is not selected or hovered over (assuming hover is enabled in the interaction module).}
\item{"highlight "}{ : String. Default to '#848484'. The color the edge when it is selected.}
\item{"hover"}{ : String. Default to '#848484'. The color the edge when the mouse hovers over it (assuming hover is enabled in the interaction module).}
\item{"inherit"}{ : String or Boolean. Default to 'from'. When color, highlight or hover are defined, inherit is set to false! Supported options are: true, false, 'from','to','both'.}
\item{"opacity"}{ : Number. Default to 1.0. It can be useful to set the opacity of an edge without manually changing all the colors. The allowed range of the opacity option is between 0 and 1.}
}}
\item{font}{: Named list or String. This object defines the details of the label. A shorthand is also supported in the form 'size face color' for example: '14px arial red'
\itemize{
\item{"color"}{ : String. Default to '#343434'. Color of the label text.}
\item{"size"}{ : Number. Default to 14. Size of the label text.}
\item{"face"}{ : String. Default to 'arial. Font face (or font family) of the label text.}
\item{"background"}{ : String. Default to undefined. When not undefined but a color string, a background rectangle will be drawn behind the label in the supplied color.}
\item{"strokeWidth"}{ : Number. Default to 2. As an alternative to the background rectangle, a stroke can be drawn around the text. When a value higher than 0 is supplied, the stroke will be drawn.}
\item{"strokeColor"}{ : String. Default to '#ffffff'. This is the color of the stroke assuming the value for stroke is higher than 0.}
\item{"align"}{ : String. Default to 'horizontal'. Possible options: 'horizontal','top','middle','bottom'. The alignment determines how the label is aligned over the edge. The default value horizontal aligns the label horizontally, regardless of the orientation of the edge. When an option other than horizontal is chosen, the label will align itself according to the edge.}
\item{"vadjust, multi, bold, ital, boldital, mono"}{See \link{visDocumentation}}
}}
\item{arrows}{: Named list or String. To draw an arrow with default settings a string can be supplied. For example: 'to, from,middle' or 'to;from', any combination with any seperating symbol is fine. If you want to control the size of the arrowheads, you can supply an object.
\itemize{
\item{"to"}{ : Named list or Boolean. Default to Named list. When true, an arrowhead on the 'to' side of the edge is drawn, pointing to the 'to' node with default settings. To customize the size of the arrow, supply an object.
\itemize{
\item{"enabled"}{ : Boolean. Default to false. Toggle the arrow on or off. This option is optional, if undefined and the scaleFactor property is set, enabled will be set to true.}
\item{"scaleFactor"}{ : Number. Default to 1. The scale factor allows you to change the size of the arrowhead.}
\item{"type"}{ : Character. Default to 'arrow'. The type of endpoint. Also possible is 'circle'.}
}
}
\item{"middle"}{ : Named list or Boolean. Default to Named list. Exactly the same as the to object but with an arrowhead in the center node of the edge.}
\item{"from "}{ : Named list or Boolean. Default to Named list. Exactly the same as the to object but with an arrowhead at the from node of the edge.}
}}
\item{arrowStrikethrough}{: Boolean. Default to True. When false, the edge stops at the arrow. This can be useful if you have thick lines and you want the arrow to end in a point. Middle arrows are not affected by this.}
\item{smooth}{: Boolean | named list. Default to named list. When true, the edge is drawn as a dynamic quadratic bezier curve. The drawing of these curves takes longer than that of straight curves but it looks better.
\itemize{
\item{"enabled"}{ : Boolean. Default to true. Toggle smooth curves on and off. This is an optional option. If any of the other properties in this object are set, this option will be set to true.}
\item{"type"}{ : String. Default to 'dynamic'. Possible options: 'dynamic', 'continuous', 'discrete', 'diagonalCross', 'straightCross', 'horizontal', 'vertical', 'curvedCW', 'curvedCCW', 'cubicBezier'.}
\item{"roundness"}{ : Number. Default to 0.5. Accepted range: 0 .. 1.0. This parameter tweaks the roundness of the smooth curves for all types EXCEPT dynamic.}
\item{"forceDirection"}{ : String or Boolean. Default to false. Accepted options: ['horizontal', 'vertical', 'none']. This options is only used with the cubicBezier curves. When true, horizontal is chosen, when false, the direction that is larger (x distance between nodes vs y distance between nodes) is used. If the x distance is larger, horizontal. This is ment to be used with hierarchical layouts. }
}}
\item{shadow}{: Boolean | named list. Default to false. When true, the edges casts a shadow using the default settings. This can be further refined by supplying a list
\itemize{
\item{"enabled"}{ : Boolean. Default to false. Toggle the casting of shadows. If this option is not defined, it is set to true if any of the properties in this object are defined.}
\item{"color"}{ : String. Default to 'rgba(0,0,0,0.5)'. The color of the shadow as a string. Supported formats are 'rgb(255,255,255)', 'rgba(255,255,255,1)' and '#FFFFFF'.}
\item{"size"}{ : Number. Default to 10. The blur size of the shadow.}
\item{"x"}{ : Number. Default to 5. The x offset.}
\item{"y"}{ : Number. Default to 5. The y offset.}
}}
\item{scaling}{: Named list. If the value option is specified, the size of the edges will be scaled according to the properties in this object.
\itemize{
\item{"min"}{ : Number. Default to 10. If edges have a value, their sizes are determined by the value, the scaling function and the min max values.}
\item{"max"}{ : Number. Default to 30. This is the maximum allowed size when the edges are scaled using the value option.}
\item{"label"}{ : Named list or Boolean. Default to Named list. This can be false if the label is not allowed to scale with the node. If true it will scale using default settigns. For further customization, you can supply an object.
\itemize{
\item{"enabled"}{ : Boolean. Default to false. Toggle the scaling of the label on or off. If this option is not defined, it is set to true if any of the properties in this object are defined.}
\item{"min"}{ : Number. Default to 14. The minimum font-size used for labels when scaling.}
\item{"max"}{ : Number. Default to 30. The maximum font-size used for labels when scaling.}
\item{"maxVisible"}{ : Number. Default to 30. When zooming in, the font is drawn larger as well. You can limit the perceived font size using this option. If set to 30, the font will never look larger than size 30 zoomed at 100\%.}
\item{"drawThreshold"}{ : Number. Default to 5. When zooming out, the font will be drawn smaller. This defines a lower limit for when the font is drawn. When using font scaling, you can use this together with the maxVisible to first show labels of important nodes when zoomed out and only show the rest when zooming in.}
}
}
\item{"customScalingFunction"}{ : Function. If nodes have value fields, this function determines how the size of the nodes are scaled based on their values.}
}}
\item{widthConstraint}{: Number, boolean or list. If false (defaut), no widthConstraint is applied. If a number is specified, the maximum width of the edge's label is set to the value. The edge's label's lines will be broken on spaces to stay below the maximum.
\itemize{
\item{"maximum"}{ : Boolean. If a number is specified, the maximum width of the edge's label is set to the value. The edge's label's lines will be broken on spaces to stay below the maximum.}
}}
\item{chosen}{: See \link{visDocumentation}}
}
\description{
Network visualization edges options. For full documentation, have a look at \link{visDocumentation}.
}
\examples{
nodes <- data.frame(id = 1:3)
edges <- data.frame(from = c(1,2), to = c(1,3))
# arrows
visNetwork(nodes, edges) \%>\% visEdges(arrows = 'from')
visNetwork(nodes, edges) \%>\% visEdges(arrows = 'to, from')
visNetwork(nodes, edges) \%>\%
visEdges(arrows = list(to = list(enabled = TRUE,
scaleFactor = 2, type = 'circle')))
# smooth
visNetwork(nodes, edges) \%>\% visEdges(smooth = FALSE)
visNetwork(nodes, edges) \%>\% visEdges(smooth = list(enabled = TRUE, type = "diagonalCross"))
# width
visNetwork(nodes, edges) \%>\% visEdges(width = 10)
# color
visNetwork(nodes, edges) \%>\% visEdges(color = list(hover = "green")) \%>\%
visInteraction(hover = TRUE)
visNetwork(nodes, edges) \%>\% visEdges(color = "red")
visNetwork(nodes, edges) \%>\% visEdges(color = list(color = "red", highlight = "yellow"))
# shadow
visNetwork(nodes, edges) \%>\% visEdges(shadow = TRUE)
visNetwork(nodes, edges) \%>\% visEdges(shadow = list(enabled = TRUE, size = 5))
# dashes
# globally
visNetwork(nodes, edges) \%>\% visEdges(dashes = TRUE)
# set configuration individualy
# have to use specific notation...
nodes <- data.frame(id = 1:3)
edges <- data.frame(from = c(1,2), to = c(1,3),
dashes = c("[10,10,2,2]", "false"))
visNetwork(nodes, edges)
edges <- data.frame(from = c(1,2), to = c(1,3),
dashes = c("[10,10,2,2]", "[2]"))
visNetwork(nodes, edges)
}
\references{
See online documentation \url{http://datastorm-open.github.io/visNetwork/}
}
\seealso{
\link{visNodes} for nodes options, \link{visEdges} for edges options, \link{visGroups} for groups options,
\link{visLegend} for adding legend, \link{visOptions} for custom option, \link{visLayout} & \link{visHierarchicalLayout} for layout,
\link{visPhysics} for control physics, \link{visInteraction} for interaction, \link{visNetworkProxy} & \link{visFocus} & \link{visFit} for animation within shiny,
\link{visDocumentation}, \link{visEvents}, \link{visConfigure} ...
}
|
## cargar librerías e importar raw data afiliados
library(tidyverse)
library(readxl)
library(lubridate)
library(janitor)
cnae2009_raw <- read_xls("data/dictionaries/estructura_cnae2009.xls")
## limpiar nombres columnas y eliminar blancos extra
cnae2009_raw2 <- cnae2009_raw %>%
clean_names() %>%
mutate(titulo_cnae2009=str_trim(titulo_cnae2009))
## obtener data frame con secciones - Nivel primero codigo alfabético de 1 dígito
cnae2009_1digito <- cnae2009_raw2 %>%
filter(str_detect(cod_cnae2009, "[A-Z]")) %>%
select(cnae2009_seccion_1digito_cod=cod_cnae2009, cnae2009_seccion_1digito_nombre=titulo_cnae2009)
## obtener data frame con divisiones - Nivel segundo codigo numérico de 2 dígitos
cnae2009_2digitos <- cnae2009_raw2 %>%
filter(str_detect(cod_cnae2009, "^[0-9]{2}$")) %>%
select(cnae2009_division_2digitos_cod=cod_cnae2009, cnae2009_division_2digitos_nombre=titulo_cnae2009)
## obtener data frame con grupos - Nivel tercero codigo numérico de 3 dígitos
cnae2009_3digitos <- cnae2009_raw2 %>%
filter(str_detect(cod_cnae2009, "^[0-9]{3}$")) %>%
select(cnae2009_grupo_3digitos_cod=cod_cnae2009, cnae2009_grupo_3digitos_nombre=titulo_cnae2009)
## obtener data frame con clases - Nivel cuarto codigo numérico de 4 dígitos
cnae2009_4digitos <- cnae2009_raw2 %>%
filter(str_detect(cod_cnae2009, "^[0-9]{4}$")) %>%
select(codintegr, cnae2009_clase_4digitos_cod=cod_cnae2009, cnae2009_clase_4digitos_nombre=titulo_cnae2009) %>%
mutate(cnae2009_grupo_3digitos_cod=str_sub(codintegr, 2, 4),
cnae2009_division_2digitos_cod=str_sub(codintegr, 2, 3),
cnae2009_seccion_1digito_cod=str_sub(codintegr, 1, 1))
## unir diferentes data frames para crear archivo tidy data
tidy_data1 <- left_join(cnae2009_4digitos, cnae2009_3digitos,
by = "cnae2009_grupo_3digitos_cod")
tidy_data2 <- left_join(tidy_data1, cnae2009_2digitos,
by = "cnae2009_division_2digitos_cod")
tidy_data_final <- left_join(tidy_data2, cnae2009_1digito,
by = "cnae2009_seccion_1digito_cod") %>%
select(cnae2009_seccion_1digito_cod, cnae2009_seccion_1digito_nombre,
cnae2009_division_2digitos_cod, cnae2009_division_2digitos_nombre,
cnae2009_grupo_3digitos_cod, cnae2009_grupo_3digitos_nombre,
cnae2009_clase_4digitos_cod, cnae2009_clase_4digitos_nombre, cnae2009_clase_4digitos_codint=codintegr)
write_csv(tidy_data_final, "data/dictionaries/cnae2009_tidy.csv")
|
/2021-04-12_erte-afiliados-sectores/scripts/data_tidying_cnae2009.R
|
no_license
|
jescuderoma/filas-y-columnas
|
R
| false
| false
| 2,536
|
r
|
## cargar librerías e importar raw data afiliados
library(tidyverse)
library(readxl)
library(lubridate)
library(janitor)
cnae2009_raw <- read_xls("data/dictionaries/estructura_cnae2009.xls")
## limpiar nombres columnas y eliminar blancos extra
cnae2009_raw2 <- cnae2009_raw %>%
clean_names() %>%
mutate(titulo_cnae2009=str_trim(titulo_cnae2009))
## obtener data frame con secciones - Nivel primero codigo alfabético de 1 dígito
cnae2009_1digito <- cnae2009_raw2 %>%
filter(str_detect(cod_cnae2009, "[A-Z]")) %>%
select(cnae2009_seccion_1digito_cod=cod_cnae2009, cnae2009_seccion_1digito_nombre=titulo_cnae2009)
## obtener data frame con divisiones - Nivel segundo codigo numérico de 2 dígitos
cnae2009_2digitos <- cnae2009_raw2 %>%
filter(str_detect(cod_cnae2009, "^[0-9]{2}$")) %>%
select(cnae2009_division_2digitos_cod=cod_cnae2009, cnae2009_division_2digitos_nombre=titulo_cnae2009)
## obtener data frame con grupos - Nivel tercero codigo numérico de 3 dígitos
cnae2009_3digitos <- cnae2009_raw2 %>%
filter(str_detect(cod_cnae2009, "^[0-9]{3}$")) %>%
select(cnae2009_grupo_3digitos_cod=cod_cnae2009, cnae2009_grupo_3digitos_nombre=titulo_cnae2009)
## obtener data frame con clases - Nivel cuarto codigo numérico de 4 dígitos
cnae2009_4digitos <- cnae2009_raw2 %>%
filter(str_detect(cod_cnae2009, "^[0-9]{4}$")) %>%
select(codintegr, cnae2009_clase_4digitos_cod=cod_cnae2009, cnae2009_clase_4digitos_nombre=titulo_cnae2009) %>%
mutate(cnae2009_grupo_3digitos_cod=str_sub(codintegr, 2, 4),
cnae2009_division_2digitos_cod=str_sub(codintegr, 2, 3),
cnae2009_seccion_1digito_cod=str_sub(codintegr, 1, 1))
## unir diferentes data frames para crear archivo tidy data
tidy_data1 <- left_join(cnae2009_4digitos, cnae2009_3digitos,
by = "cnae2009_grupo_3digitos_cod")
tidy_data2 <- left_join(tidy_data1, cnae2009_2digitos,
by = "cnae2009_division_2digitos_cod")
tidy_data_final <- left_join(tidy_data2, cnae2009_1digito,
by = "cnae2009_seccion_1digito_cod") %>%
select(cnae2009_seccion_1digito_cod, cnae2009_seccion_1digito_nombre,
cnae2009_division_2digitos_cod, cnae2009_division_2digitos_nombre,
cnae2009_grupo_3digitos_cod, cnae2009_grupo_3digitos_nombre,
cnae2009_clase_4digitos_cod, cnae2009_clase_4digitos_nombre, cnae2009_clase_4digitos_codint=codintegr)
write_csv(tidy_data_final, "data/dictionaries/cnae2009_tidy.csv")
|
#' read my encrypted token
#'
#' @inheritParams safer::decrypt_string
#' @return decrypted token.
#' @export
token <- function(key, pkey = NULL) {
# encrypted token
string = "OWe2bHi5r2Iak2wa1OxqKOa8+qTbKvDZkYBgwTZsF9ckzeqnv/deS4/LlgR6nFUHbk8ahTetjF4="
return(safer::decrypt_string(string, key = key, pkey = pkey))
}
|
/R/token.R
|
no_license
|
fountainer/token
|
R
| false
| false
| 323
|
r
|
#' read my encrypted token
#'
#' @inheritParams safer::decrypt_string
#' @return decrypted token.
#' @export
token <- function(key, pkey = NULL) {
# encrypted token
string = "OWe2bHi5r2Iak2wa1OxqKOa8+qTbKvDZkYBgwTZsF9ckzeqnv/deS4/LlgR6nFUHbk8ahTetjF4="
return(safer::decrypt_string(string, key = key, pkey = pkey))
}
|
# Exercise 2: writing and executing functions (II)
# Write a function `CompareLength` that takes in 2 vectors, and returns the sentence:
# "The difference in lengths is N"
CompareLength <- function(v1, v2) {
dif <- abs(length(v1) - length(v2))
return (paste("The difference in lengths is ", dif))
}
# Pass two vectors of different length to your `CompareLength` function
v1 <- 1:4
v2 <- 1:10
CompareLength(v1,v2)
# Write a function `DescribeDifference` that will return one of the following statements:
# "Your first vector is longer by N elements"
# "Your second vector is longer by N elements"
DescribeDifference <- function(v1,v2) {
dif <- abs(length(v1) - length(v2))
if (length(v1) > length(v2)) {
return (paste("Your first vector is longer by ",dif," elements"))
} else {
return (paste("Your second vector is longer by ",dif," elements"))
}
}
# Pass two vectors to your `DescribeDifference` function
DescribeDifference(v1,v2)
### Bonus ###
# Rewrite your `DescribeDifference` function to tell you the name of the vector which is longer
DescribeDifference2 <- function(v1,v2) {
dif <- abs(length(v1) - length(v2))
if (length(v1) > length(v2)) {
return (deparse(substitute((v1))))
} else {
return (deparse(substitute((v2))))
}
}
DescribeDifference2(v1,v2)
|
/exercise-2/exercise.R
|
permissive
|
jaketherrien/m7-functions
|
R
| false
| false
| 1,303
|
r
|
# Exercise 2: writing and executing functions (II)
# Write a function `CompareLength` that takes in 2 vectors, and returns the sentence:
# "The difference in lengths is N"
CompareLength <- function(v1, v2) {
dif <- abs(length(v1) - length(v2))
return (paste("The difference in lengths is ", dif))
}
# Pass two vectors of different length to your `CompareLength` function
v1 <- 1:4
v2 <- 1:10
CompareLength(v1,v2)
# Write a function `DescribeDifference` that will return one of the following statements:
# "Your first vector is longer by N elements"
# "Your second vector is longer by N elements"
DescribeDifference <- function(v1,v2) {
dif <- abs(length(v1) - length(v2))
if (length(v1) > length(v2)) {
return (paste("Your first vector is longer by ",dif," elements"))
} else {
return (paste("Your second vector is longer by ",dif," elements"))
}
}
# Pass two vectors to your `DescribeDifference` function
DescribeDifference(v1,v2)
### Bonus ###
# Rewrite your `DescribeDifference` function to tell you the name of the vector which is longer
DescribeDifference2 <- function(v1,v2) {
dif <- abs(length(v1) - length(v2))
if (length(v1) > length(v2)) {
return (deparse(substitute((v1))))
} else {
return (deparse(substitute((v2))))
}
}
DescribeDifference2(v1,v2)
|
#' link between taxa
#'
#' `geom_taxalink` supports data.frame as input,
#' the `colour`, `size`, `linetype` and `alpha` can be mapped. When the `data` was provided,
#' the `mapping` should be also provided, which `taxa1` and `taxa2` should be mapped created
#' by `aes`, `aes_` or `aes_string`. In addition, the `hratio`, control the height of curve line,
#' when tree layout is `cirular`, default is 1. `ncp`, the number of control points used to draw the
#' curve, more control points creates a smoother curve, default is 1. They also can be mapped to
#' a column of data.
#'
#' @param data data.frame, The data to be displayed in this layer, default is NULL.
#' @param mapping Set of aesthetic mappings, default is NULL.
#' @param taxa1 can be label or node number.
#' @param taxa2 can be label or node number.
#' @param offset numeric, control the shift of curve line (the ratio of axis value,
#' range is "(0-1)"), default is NULL.
#' @param outward logical, control the orientation of curve when the layout of tree is circular,
#' fan or other layout in polar coordinate, default is "auto", meaning It will automatically.
#' @param ..., additional parameter.
#' @section Aesthetics:
#' \code{geom_taxalink()} understands the following aesthethics (required aesthetics are in bold):
#' \itemize{
#' \item \strong{\code{taxa1}} label or node number of tree.
#' \item \strong{\code{taxa2}} label or node number of tree.
#' \item \code{group} group category of link.
#' \item \code{colour} control the color of line, default is black.
#' \item \code{linetype} control the type of line, default is 1 (solid).
#' \item \code{size} control the width of line, default is 0.5.
#' \item \code{curvature} control the curvature of line, default is 0.5,
#' it will be created automatically in polar coordinate .
#' \item \code{hratio} control the height of curve line, default is 1.
#' \item \code{ncp} control the smooth of curve line, default is 1.
#' }
#' @return a list object.
#' @export
geom_taxalink <- function(data=NULL,
mapping=NULL,
taxa1=NULL,
taxa2=NULL,
offset = NULL,
outward = "auto",
...){
if(is.character(data) && is.character(mapping)) {
## may be taxa1 and taxa2 passed by position in previous version
## calls <- names(sapply(match.call(), deparse))[-1]
message("taxa1 and taxa2 is not in the 1st and 2nd positions of the parameter list.\n",
"Please specify parameter name in future as this backward compatibility will be removed.\n" )
taxa1 <- data
taxa2 <- mapping
data <- NULL
mapping <- NULL
}
params <- list(...)
structure(list(data = data,
mapping = mapping,
taxa1 = taxa1,
taxa2 = taxa2,
offset = offset,
outward = outward,
params = params),
class = 'taxalink')
}
## ##' link between taxa
## ##'
## ##'
## ##' @title geom_taxalink
## ##' @param taxa1 taxa1, can be label or node number
## ##' @param taxa2 taxa2, can be label or node number
## ##' @param curvature A numeric value giving the amount of curvature.
## ##' Negative values produce left-hand curves,
## ##' positive values produce right-hand curves, and zero produces a straight line.
## ##' @param arrow specification for arrow heads, as created by arrow().
## ##' @param arrow.fill fill color to usse for the arrow head (if closed). `NULL` means use `colour` aesthetic.
## ##' @param offset numeric, control the shift of curve line (the ratio of axis value,
## ##' range is "(0-1)"), default is NULL.
## ##' @param hratio numeric, the height of curve line, default is 1.
## ##' @param outward logical, control the orientation of curve when the layout of tree is circular,
## ##' fan or other layout in polar coordinate, default is TRUE.
## ##' @param ... additional parameter.
## ##' @return ggplot layer
## ##' @export
## ##' @author Guangchuang Yu
## geom_taxalink <- function(taxa1, taxa2, curvature=0.5, arrow = NULL,
## arrow.fill = NULL, offset=NULL, hratio=1,
## outward = TRUE, ...) {
## position = "identity"
## show.legend = NA
## na.rm = TRUE
## inherit.aes = FALSE
## mapping <- aes_(x=~x, y=~y, node=~node, label=~label, xend=~x, yend=~y)
## layer(stat=StatTaxalink,
## mapping=mapping,
## data = NULL,
## geom=GeomCurvelink,
## position='identity',
## show.legend=show.legend,
## inherit.aes = inherit.aes,
## params = list(taxa1 = taxa1,
## taxa2 = taxa2,
## curvature = curvature,
## na.rm = na.rm,
## arrow = arrow,
## arrow.fill = arrow.fill,
## offset = offset,
## hratio = hratio,
## outward = outward,
## ...),
## check.aes = FALSE
## )
## }
## StatTaxalink <- ggproto("StatTaxalink", Stat,
## compute_group = function(self, data, scales, params, taxa1, taxa2, offset) {
## node1 <- taxa2node(data, taxa1)
## node2 <- taxa2node(data, taxa2)
## x <- data$x
## y <- data$y
## if (!is.null(offset)){
## tmpshift <- offset * (max(x, na.rm=TRUE)-min(x, na.rm=TRUE))
## data.frame(x = x[node1] + tmpshift,
## xend = x[node2] + tmpshift,
## y = y[node1],
## yend = y[node2])
## }else{
## data.frame(x = x[node1],
## xend = x[node2],
## y = y[node1],
## yend = y[node2])
## }
## },
## required_aes = c("x", "y", "xend", "yend")
## )
geom_curvelink <- function(data=NULL,
mapping=NULL,
stat = "identity",
position = "identity",
arrow = NULL,
arrow.fill = NULL,
lineend = "butt",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,...){
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomCurvelink,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
arrow = arrow,
arrow.fill = arrow.fill,
lineend = lineend,
na.rm = na.rm,
...
)
)
}
#' @importFrom ggplot2 GeomSegment
#' @importFrom grid gTree curveGrob gpar
#' @importFrom scales alpha
GeomCurvelink <- ggproto("GeomCurvelink", GeomSegment,
required_aes = c("x", "y", "xend", "yend"),
default_aes = aes(colour = "black", size = 0.5, linetype = 1, alpha = NA, curvature=0.5, hratio=1, ncp=1, curveangle=90, square=FALSE),
draw_panel = function(data, panel_params, coord, shape=0.5, outward=TRUE,
arrow = NULL, arrow.fill=NULL, lineend = "butt", na.rm = FALSE) {
if (!coord$is_linear()) {
tmpgroup <- data$group
starts <- subset(data, select = c(-xend, -yend))
starts$group <- 1
ends <- rename(subset(data, select = c(-x, -y)), c("x" = "xend", "y" = "yend"))
ends$group <- 2
pieces <- rbind(starts, ends)
trans <- coord$transform(pieces, panel_params)
starts <- trans[trans$group==1, ,drop=FALSE]
ends <- trans[trans$group==2, ,drop=FALSE]
if (outward){
curvature <- unlist(mapply(generate_curvature2, starttheta=starts$theta,
endtheta=ends$theta, hratio=starts$hratio, ncp=starts$ncp,
SIMPLIFY=FALSE))
}else{
curvature <- unlist(mapply(generate_curvature, starttheta=starts$theta,
endtheta=ends$theta, hratio=starts$hratio, ncp=starts$ncp,
SIMPLIFY=FALSE))
}
ends <- rename(subset(ends, select=c(x, y)), c("xend"="x", "yend"="y"))
trans <- cbind(starts, ends)
trans$group <- tmpgroup
trans$curvature <- curvature
}else{
trans <- coord$transform(data, panel_params)
}
arrow.fill <- arrow.fill %|||% trans$colour
grobs <- lapply(seq_len(nrow(trans)), function(i){
curveGrob(
trans$x[i], trans$y[i], trans$xend[i], trans$yend[i],
default.units = "native",
curvature = trans$curvature[i], angle = trans$curveangle[i], ncp = trans$ncp[i],
square = trans$square[i], squareShape = 1, inflect = FALSE, open = TRUE,
gp = gpar(col = alpha(trans$colour[i], trans$alpha[i]),
fill = alpha(arrow.fill[i], trans$alpha[i]),
lwd = trans$size[i] * .pt,
lty = trans$linetype[i],
lineend = lineend),
arrow = arrow,
shape = shape)})
class(grobs) <- "gList"
return(ggname("geom_curvelink", gTree(children=grobs)))
}
)
# for inward curve lines
generate_curvature <- function(starttheta, endtheta, hratio, ncp){
flag <- endtheta - starttheta
newflag <- min(c(abs(flag), 2*pi-abs(flag)))
if (flag > 0){
if (flag <= pi){
origin_direction <- 1
}else{
origin_direction <- -1
}
}else{
if (abs(flag)<=pi){
origin_direction <- -1
}else{
origin_direction <- 1
}
}
curvature <- hratio * origin_direction * (1 - newflag/pi)
return(curvature)
}
# for outward curve lines
generate_curvature2 <- function(starttheta, endtheta, hratio, ncp){
flag <- endtheta - starttheta
newflag <- min(c(abs(flag), 2*pi-abs(flag)))
if (flag > 0){
if (flag <= pi){
origin_direction <- -1
}else{
origin_direction <- 1
}
}else{
if (abs(flag)<=pi){
origin_direction <- 1
}else{
origin_direction <- -1
}
}
if (newflag>pi/2){
curvature <- hratio * origin_direction * pi/newflag
}else{
curvature <- hratio * origin_direction * (1-newflag/pi)
}
return (curvature)
}
#' @importFrom utils getFromNamespace
ggname <- getFromNamespace("ggname", "ggplot2")
"%|||%" <- function(x, y){
if (is.null(x)){
return(y)
}
if (is.null(y)) {
return(x)
}
if (length(x)<length(y)) {
return (y)
} else {
return (x)
}
}
|
/R/geom_taxalink.R
|
no_license
|
huipengxi/ggtree
|
R
| false
| false
| 11,644
|
r
|
#' link between taxa
#'
#' `geom_taxalink` supports data.frame as input,
#' the `colour`, `size`, `linetype` and `alpha` can be mapped. When the `data` was provided,
#' the `mapping` should be also provided, which `taxa1` and `taxa2` should be mapped created
#' by `aes`, `aes_` or `aes_string`. In addition, the `hratio`, control the height of curve line,
#' when tree layout is `cirular`, default is 1. `ncp`, the number of control points used to draw the
#' curve, more control points creates a smoother curve, default is 1. They also can be mapped to
#' a column of data.
#'
#' @param data data.frame, The data to be displayed in this layer, default is NULL.
#' @param mapping Set of aesthetic mappings, default is NULL.
#' @param taxa1 can be label or node number.
#' @param taxa2 can be label or node number.
#' @param offset numeric, control the shift of curve line (the ratio of axis value,
#' range is "(0-1)"), default is NULL.
#' @param outward logical, control the orientation of curve when the layout of tree is circular,
#' fan or other layout in polar coordinate, default is "auto", meaning It will automatically.
#' @param ..., additional parameter.
#' @section Aesthetics:
#' \code{geom_taxalink()} understands the following aesthethics (required aesthetics are in bold):
#' \itemize{
#' \item \strong{\code{taxa1}} label or node number of tree.
#' \item \strong{\code{taxa2}} label or node number of tree.
#' \item \code{group} group category of link.
#' \item \code{colour} control the color of line, default is black.
#' \item \code{linetype} control the type of line, default is 1 (solid).
#' \item \code{size} control the width of line, default is 0.5.
#' \item \code{curvature} control the curvature of line, default is 0.5,
#' it will be created automatically in polar coordinate .
#' \item \code{hratio} control the height of curve line, default is 1.
#' \item \code{ncp} control the smooth of curve line, default is 1.
#' }
#' @return a list object.
#' @export
geom_taxalink <- function(data=NULL,
mapping=NULL,
taxa1=NULL,
taxa2=NULL,
offset = NULL,
outward = "auto",
...){
if(is.character(data) && is.character(mapping)) {
## may be taxa1 and taxa2 passed by position in previous version
## calls <- names(sapply(match.call(), deparse))[-1]
message("taxa1 and taxa2 is not in the 1st and 2nd positions of the parameter list.\n",
"Please specify parameter name in future as this backward compatibility will be removed.\n" )
taxa1 <- data
taxa2 <- mapping
data <- NULL
mapping <- NULL
}
params <- list(...)
structure(list(data = data,
mapping = mapping,
taxa1 = taxa1,
taxa2 = taxa2,
offset = offset,
outward = outward,
params = params),
class = 'taxalink')
}
## ##' link between taxa
## ##'
## ##'
## ##' @title geom_taxalink
## ##' @param taxa1 taxa1, can be label or node number
## ##' @param taxa2 taxa2, can be label or node number
## ##' @param curvature A numeric value giving the amount of curvature.
## ##' Negative values produce left-hand curves,
## ##' positive values produce right-hand curves, and zero produces a straight line.
## ##' @param arrow specification for arrow heads, as created by arrow().
## ##' @param arrow.fill fill color to usse for the arrow head (if closed). `NULL` means use `colour` aesthetic.
## ##' @param offset numeric, control the shift of curve line (the ratio of axis value,
## ##' range is "(0-1)"), default is NULL.
## ##' @param hratio numeric, the height of curve line, default is 1.
## ##' @param outward logical, control the orientation of curve when the layout of tree is circular,
## ##' fan or other layout in polar coordinate, default is TRUE.
## ##' @param ... additional parameter.
## ##' @return ggplot layer
## ##' @export
## ##' @author Guangchuang Yu
## geom_taxalink <- function(taxa1, taxa2, curvature=0.5, arrow = NULL,
## arrow.fill = NULL, offset=NULL, hratio=1,
## outward = TRUE, ...) {
## position = "identity"
## show.legend = NA
## na.rm = TRUE
## inherit.aes = FALSE
## mapping <- aes_(x=~x, y=~y, node=~node, label=~label, xend=~x, yend=~y)
## layer(stat=StatTaxalink,
## mapping=mapping,
## data = NULL,
## geom=GeomCurvelink,
## position='identity',
## show.legend=show.legend,
## inherit.aes = inherit.aes,
## params = list(taxa1 = taxa1,
## taxa2 = taxa2,
## curvature = curvature,
## na.rm = na.rm,
## arrow = arrow,
## arrow.fill = arrow.fill,
## offset = offset,
## hratio = hratio,
## outward = outward,
## ...),
## check.aes = FALSE
## )
## }
## StatTaxalink <- ggproto("StatTaxalink", Stat,
## compute_group = function(self, data, scales, params, taxa1, taxa2, offset) {
## node1 <- taxa2node(data, taxa1)
## node2 <- taxa2node(data, taxa2)
## x <- data$x
## y <- data$y
## if (!is.null(offset)){
## tmpshift <- offset * (max(x, na.rm=TRUE)-min(x, na.rm=TRUE))
## data.frame(x = x[node1] + tmpshift,
## xend = x[node2] + tmpshift,
## y = y[node1],
## yend = y[node2])
## }else{
## data.frame(x = x[node1],
## xend = x[node2],
## y = y[node1],
## yend = y[node2])
## }
## },
## required_aes = c("x", "y", "xend", "yend")
## )
geom_curvelink <- function(data=NULL,
mapping=NULL,
stat = "identity",
position = "identity",
arrow = NULL,
arrow.fill = NULL,
lineend = "butt",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,...){
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomCurvelink,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
arrow = arrow,
arrow.fill = arrow.fill,
lineend = lineend,
na.rm = na.rm,
...
)
)
}
#' @importFrom ggplot2 GeomSegment
#' @importFrom grid gTree curveGrob gpar
#' @importFrom scales alpha
GeomCurvelink <- ggproto("GeomCurvelink", GeomSegment,
required_aes = c("x", "y", "xend", "yend"),
default_aes = aes(colour = "black", size = 0.5, linetype = 1, alpha = NA, curvature=0.5, hratio=1, ncp=1, curveangle=90, square=FALSE),
draw_panel = function(data, panel_params, coord, shape=0.5, outward=TRUE,
arrow = NULL, arrow.fill=NULL, lineend = "butt", na.rm = FALSE) {
if (!coord$is_linear()) {
tmpgroup <- data$group
starts <- subset(data, select = c(-xend, -yend))
starts$group <- 1
ends <- rename(subset(data, select = c(-x, -y)), c("x" = "xend", "y" = "yend"))
ends$group <- 2
pieces <- rbind(starts, ends)
trans <- coord$transform(pieces, panel_params)
starts <- trans[trans$group==1, ,drop=FALSE]
ends <- trans[trans$group==2, ,drop=FALSE]
if (outward){
curvature <- unlist(mapply(generate_curvature2, starttheta=starts$theta,
endtheta=ends$theta, hratio=starts$hratio, ncp=starts$ncp,
SIMPLIFY=FALSE))
}else{
curvature <- unlist(mapply(generate_curvature, starttheta=starts$theta,
endtheta=ends$theta, hratio=starts$hratio, ncp=starts$ncp,
SIMPLIFY=FALSE))
}
ends <- rename(subset(ends, select=c(x, y)), c("xend"="x", "yend"="y"))
trans <- cbind(starts, ends)
trans$group <- tmpgroup
trans$curvature <- curvature
}else{
trans <- coord$transform(data, panel_params)
}
arrow.fill <- arrow.fill %|||% trans$colour
grobs <- lapply(seq_len(nrow(trans)), function(i){
curveGrob(
trans$x[i], trans$y[i], trans$xend[i], trans$yend[i],
default.units = "native",
curvature = trans$curvature[i], angle = trans$curveangle[i], ncp = trans$ncp[i],
square = trans$square[i], squareShape = 1, inflect = FALSE, open = TRUE,
gp = gpar(col = alpha(trans$colour[i], trans$alpha[i]),
fill = alpha(arrow.fill[i], trans$alpha[i]),
lwd = trans$size[i] * .pt,
lty = trans$linetype[i],
lineend = lineend),
arrow = arrow,
shape = shape)})
class(grobs) <- "gList"
return(ggname("geom_curvelink", gTree(children=grobs)))
}
)
# for inward curve lines
generate_curvature <- function(starttheta, endtheta, hratio, ncp){
flag <- endtheta - starttheta
newflag <- min(c(abs(flag), 2*pi-abs(flag)))
if (flag > 0){
if (flag <= pi){
origin_direction <- 1
}else{
origin_direction <- -1
}
}else{
if (abs(flag)<=pi){
origin_direction <- -1
}else{
origin_direction <- 1
}
}
curvature <- hratio * origin_direction * (1 - newflag/pi)
return(curvature)
}
# for outward curve lines
generate_curvature2 <- function(starttheta, endtheta, hratio, ncp){
flag <- endtheta - starttheta
newflag <- min(c(abs(flag), 2*pi-abs(flag)))
if (flag > 0){
if (flag <= pi){
origin_direction <- -1
}else{
origin_direction <- 1
}
}else{
if (abs(flag)<=pi){
origin_direction <- 1
}else{
origin_direction <- -1
}
}
if (newflag>pi/2){
curvature <- hratio * origin_direction * pi/newflag
}else{
curvature <- hratio * origin_direction * (1-newflag/pi)
}
return (curvature)
}
#' @importFrom utils getFromNamespace
ggname <- getFromNamespace("ggname", "ggplot2")
"%|||%" <- function(x, y){
if (is.null(x)){
return(y)
}
if (is.null(y)) {
return(x)
}
if (length(x)<length(y)) {
return (y)
} else {
return (x)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LowLevelClasses.R
\docType{class}
\name{Count-class}
\alias{Count-class}
\title{An S4 class for a Count}
\description{
A count class provides a number of occurrences of the query and the timeline that it happens
}
\section{Slots}{
\describe{
\item{\code{Criteria}}{a query class object}
\item{\code{Timeline}}{a timeline class object}
\item{\code{Occurrence}}{an occurrence class object}
}}
|
/man/Count-class.Rd
|
permissive
|
rfherrerac/Capr
|
R
| false
| true
| 473
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LowLevelClasses.R
\docType{class}
\name{Count-class}
\alias{Count-class}
\title{An S4 class for a Count}
\description{
A count class provides a number of occurrences of the query and the timeline that it happens
}
\section{Slots}{
\describe{
\item{\code{Criteria}}{a query class object}
\item{\code{Timeline}}{a timeline class object}
\item{\code{Occurrence}}{an occurrence class object}
}}
|
# Compare the reaction from different sources: RAVEN, kegg and eggnog
# Revised by Hongzhong 2019-8-5
# load library
library(readxl)
library(stringr)
library(tidyverse)
library(hongR)
source('function_general.R')
#-----------------------------------------------------------
# initially compare the new RXN from different sources
#-----------------------------------------------------------
# RAVEN biocyc
# newRxn_biocyc <- read.table("data/newRxn_biocyc_RAVEN.txt", header= TRUE, stringsAsFactors = FALSE)
newRxn_biocyc <- read.table("data/newRxn_biocyc_RAVEN_55_110.txt", header= TRUE, stringsAsFactors = FALSE)
newRxn_biocyc$MNXID <- findRxnMNXid(rxnID = newRxn_biocyc$ID, id_type = 'metacyc')
newRxn_biocyc <- getRxnInfFromMNX(newRxn_biocyc,newRxn_biocyc$MNXID)
# RAVEN KEGG
newRxn_kegg <- read.table("data/newRxn_kegg_RAVEN.txt", header= TRUE, stringsAsFactors = FALSE)
newRxn_kegg$MNXID <- findRxnMNXid(rxnID = newRxn_kegg$ID, id_type = 'kegg')
newRxn_kegg <- getRxnInfFromMNX(newRxn_kegg,newRxn_kegg$MNXID)
# KEGG and eggnog web services
newRxn_kegg_eggnog <- read.table("data/newRxn_all based on kegg and eggnog annotation.txt", header= TRUE, stringsAsFactors = FALSE)
newRxn_kegg_eggnog$rxnID <- str_replace_all(newRxn_kegg_eggnog$rxnID, "rn:", "")
newRxn_kegg_eggnog$MNXID <- findRxnMNXid(rxnID = newRxn_kegg_eggnog$rxnID, id_type = 'kegg')
newRxn_kegg_eggnog <- getRxnInfFromMNX(newRxn_kegg_eggnog,newRxn_kegg_eggnog$MNXID)
rxn_kegg_web <- newRxn_kegg_eggnog[str_detect(newRxn_kegg_eggnog$type, 'kegg'),]
rxn_eggnog_web <- newRxn_kegg_eggnog[str_detect(newRxn_kegg_eggnog$type, 'eggnog'),]
# compare the common reaction from raven and from kegg and eggnog directly
# plot the vnn graph
kegg_web <- unique(rxn_kegg_web$MNXID)
eggnog_web <- unique(rxn_eggnog_web$MNXID)
RAVEN_kegg <- unique(newRxn_kegg$MNXID)
RAVEN_biocyc <- unique(newRxn_biocyc$MNXID)
new_rxn_all <- unique(c(kegg_web, eggnog_web,RAVEN_kegg,RAVEN_biocyc))
#plot the graph
VennDiagram::venn.diagram(x= list(kegg_web = kegg_web, eggnog_web = eggnog_web, RAVEN_kegg = RAVEN_kegg, RAVEN_biocyc = RAVEN_biocyc),
filename = "result/new reactions for 332 yeast species from different sources.png", height = 1000, width = 1000,resolution =300, imagetype="png", col="transparent",
fill=c("blue","green","red", "grey"),alpha = 0.50, cex=0.45, cat.cex=0.45)
#---------------------------------------------
# if we only choose the balanced reactions
#---------------------------------------------
newRxn_biocyc_b <- newRxn_biocyc[newRxn_biocyc$balance_MNX=='true', ]
newRxn_kegg_b <- newRxn_kegg[newRxn_kegg$balance_MNX=='true', ]
newRxn_kegg_eggnog_b <- newRxn_kegg_eggnog[newRxn_kegg_eggnog$balance_MNX=='true', ]
rxn_kegg_web <- newRxn_kegg_eggnog_b[str_detect(newRxn_kegg_eggnog_b$type, 'kegg'),]
rxn_eggnog_web <- newRxn_kegg_eggnog_b[str_detect(newRxn_kegg_eggnog_b$type, 'eggnog'),]
kegg_web <- unique(rxn_kegg_web$MNXID)
eggnog_web <- unique(rxn_eggnog_web$MNXID)
RAVEN_kegg <- unique(newRxn_kegg_b$MNXID)
RAVEN_biocyc <- unique(newRxn_biocyc_b$MNXID)
#plot the graph
venn.diagram(x= list(kegg_web = kegg_web, eggnog_web = eggnog_web, RAVEN_kegg = RAVEN_kegg, RAVEN_biocyc = RAVEN_biocyc),
filename = "result/new balanced reactions for 332 yeast species from different sources.png", height = 1000, width = 1000,resolution =300, imagetype="png", col="transparent",
fill=c("blue","green","red", "grey"),alpha = 0.50, cex=0.45, cat.cex=0.45)
#----------------------------------------------------------------------------------------------
# specially, here we found MNXID for the panID through the EC number based on eggnog annotation
#----------------------------------------------------------------------------------------------
newEC_eggnog <- read.table("data/new EC based eggnog annotation.txt", header= TRUE, stringsAsFactors = FALSE)
newEC_eggnog$MNXID <- findRxnMNXidFromEC(newEC_eggnog$EC)
newEC_eggnog$rxn_num <- str_count(newEC_eggnog$MNXID, ";")
# as a EC number could be connected with so many reactions, we choose ec with no more than 5 rxns
newEC_eggnog_filter <- newEC_eggnog[newEC_eggnog$rxn_num <= 5,]
newEC_eggnog0 <- splitAndCombine(newEC_eggnog_filter$MNXID, newEC_eggnog_filter$query, sep0 = ";")
rxn_ec1 <- unique(newEC_eggnog0$v1)
# Also we found MNXID for the panID through the EC number based on deepec
newEC_deepec <- read.table("data/newEC_predicted_by_deep_ec_for_pan_genome.txt", header= TRUE, stringsAsFactors = FALSE)
newEC_deepec$MNXID <- findRxnMNXidFromEC(newEC_deepec$Predicted.EC.number)
newEC_deepec$rxn_num <- str_count(newEC_deepec$MNXID, ";")
# as a EC number could be connected with so many reactions, we choose ec with no more than 5 rxns
newEC_deepec_filter <- newEC_deepec[newEC_deepec$rxn_num <= 5,]
newEC_deepec0 <- splitAndCombine(newEC_deepec_filter$MNXID, newEC_deepec_filter$Query.ID, sep0 = ";")
rxn_ec2 <- unique(newEC_deepec0$v1)
# combine new EC from different sources
rxn_ec_all <- union(rxn_ec1, rxn_ec2)
rxn_ec_all0 <- data.frame(MNXID =rxn_ec_all, stringsAsFactors = FALSE)
rxn_ec_all0 <- getRxnInfFromMNX(rxn_ec_all0, rxn_ec_all0$MNXID)
rxn_ec_all0_b <- rxn_ec_all0[rxn_ec_all0$balance_MNX=='true', ]
rxn_ec_combine <- unique(rxn_ec_all0_b$MNXID)
# plot the graph
venn.diagram(x= list(kegg_web = kegg_web,rxn_ec = rxn_ec_combine, RAVEN_kegg = RAVEN_kegg, RAVEN_biocyc = RAVEN_biocyc),
filename = "result/new balanced reactions for 332 yeast species with rxn found by EC number.png", height = 1000, width = 1000,resolution =300, imagetype="png", col="transparent",
fill=c("blue","green","red", "grey"),alpha = 0.50, cex=0.45, cat.cex=0.45)
|
/rxn_annotate/Compare new RXN from different source.R
|
permissive
|
SysBioChalmers/Yeast-Species-GEMs
|
R
| false
| false
| 5,681
|
r
|
# Compare the reaction from different sources: RAVEN, kegg and eggnog
# Revised by Hongzhong 2019-8-5
# load library
library(readxl)
library(stringr)
library(tidyverse)
library(hongR)
source('function_general.R')
#-----------------------------------------------------------
# initially compare the new RXN from different sources
#-----------------------------------------------------------
# RAVEN biocyc
# newRxn_biocyc <- read.table("data/newRxn_biocyc_RAVEN.txt", header= TRUE, stringsAsFactors = FALSE)
newRxn_biocyc <- read.table("data/newRxn_biocyc_RAVEN_55_110.txt", header= TRUE, stringsAsFactors = FALSE)
newRxn_biocyc$MNXID <- findRxnMNXid(rxnID = newRxn_biocyc$ID, id_type = 'metacyc')
newRxn_biocyc <- getRxnInfFromMNX(newRxn_biocyc,newRxn_biocyc$MNXID)
# RAVEN KEGG
newRxn_kegg <- read.table("data/newRxn_kegg_RAVEN.txt", header= TRUE, stringsAsFactors = FALSE)
newRxn_kegg$MNXID <- findRxnMNXid(rxnID = newRxn_kegg$ID, id_type = 'kegg')
newRxn_kegg <- getRxnInfFromMNX(newRxn_kegg,newRxn_kegg$MNXID)
# KEGG and eggnog web services
newRxn_kegg_eggnog <- read.table("data/newRxn_all based on kegg and eggnog annotation.txt", header= TRUE, stringsAsFactors = FALSE)
newRxn_kegg_eggnog$rxnID <- str_replace_all(newRxn_kegg_eggnog$rxnID, "rn:", "")
newRxn_kegg_eggnog$MNXID <- findRxnMNXid(rxnID = newRxn_kegg_eggnog$rxnID, id_type = 'kegg')
newRxn_kegg_eggnog <- getRxnInfFromMNX(newRxn_kegg_eggnog,newRxn_kegg_eggnog$MNXID)
rxn_kegg_web <- newRxn_kegg_eggnog[str_detect(newRxn_kegg_eggnog$type, 'kegg'),]
rxn_eggnog_web <- newRxn_kegg_eggnog[str_detect(newRxn_kegg_eggnog$type, 'eggnog'),]
# compare the common reaction from raven and from kegg and eggnog directly
# plot the vnn graph
kegg_web <- unique(rxn_kegg_web$MNXID)
eggnog_web <- unique(rxn_eggnog_web$MNXID)
RAVEN_kegg <- unique(newRxn_kegg$MNXID)
RAVEN_biocyc <- unique(newRxn_biocyc$MNXID)
new_rxn_all <- unique(c(kegg_web, eggnog_web,RAVEN_kegg,RAVEN_biocyc))
#plot the graph
VennDiagram::venn.diagram(x= list(kegg_web = kegg_web, eggnog_web = eggnog_web, RAVEN_kegg = RAVEN_kegg, RAVEN_biocyc = RAVEN_biocyc),
filename = "result/new reactions for 332 yeast species from different sources.png", height = 1000, width = 1000,resolution =300, imagetype="png", col="transparent",
fill=c("blue","green","red", "grey"),alpha = 0.50, cex=0.45, cat.cex=0.45)
#---------------------------------------------
# if we only choose the balanced reactions
#---------------------------------------------
newRxn_biocyc_b <- newRxn_biocyc[newRxn_biocyc$balance_MNX=='true', ]
newRxn_kegg_b <- newRxn_kegg[newRxn_kegg$balance_MNX=='true', ]
newRxn_kegg_eggnog_b <- newRxn_kegg_eggnog[newRxn_kegg_eggnog$balance_MNX=='true', ]
rxn_kegg_web <- newRxn_kegg_eggnog_b[str_detect(newRxn_kegg_eggnog_b$type, 'kegg'),]
rxn_eggnog_web <- newRxn_kegg_eggnog_b[str_detect(newRxn_kegg_eggnog_b$type, 'eggnog'),]
kegg_web <- unique(rxn_kegg_web$MNXID)
eggnog_web <- unique(rxn_eggnog_web$MNXID)
RAVEN_kegg <- unique(newRxn_kegg_b$MNXID)
RAVEN_biocyc <- unique(newRxn_biocyc_b$MNXID)
#plot the graph
venn.diagram(x= list(kegg_web = kegg_web, eggnog_web = eggnog_web, RAVEN_kegg = RAVEN_kegg, RAVEN_biocyc = RAVEN_biocyc),
filename = "result/new balanced reactions for 332 yeast species from different sources.png", height = 1000, width = 1000,resolution =300, imagetype="png", col="transparent",
fill=c("blue","green","red", "grey"),alpha = 0.50, cex=0.45, cat.cex=0.45)
#----------------------------------------------------------------------------------------------
# specially, here we found MNXID for the panID through the EC number based on eggnog annotation
#----------------------------------------------------------------------------------------------
newEC_eggnog <- read.table("data/new EC based eggnog annotation.txt", header= TRUE, stringsAsFactors = FALSE)
newEC_eggnog$MNXID <- findRxnMNXidFromEC(newEC_eggnog$EC)
newEC_eggnog$rxn_num <- str_count(newEC_eggnog$MNXID, ";")
# as a EC number could be connected with so many reactions, we choose ec with no more than 5 rxns
newEC_eggnog_filter <- newEC_eggnog[newEC_eggnog$rxn_num <= 5,]
newEC_eggnog0 <- splitAndCombine(newEC_eggnog_filter$MNXID, newEC_eggnog_filter$query, sep0 = ";")
rxn_ec1 <- unique(newEC_eggnog0$v1)
# Also we found MNXID for the panID through the EC number based on deepec
newEC_deepec <- read.table("data/newEC_predicted_by_deep_ec_for_pan_genome.txt", header= TRUE, stringsAsFactors = FALSE)
newEC_deepec$MNXID <- findRxnMNXidFromEC(newEC_deepec$Predicted.EC.number)
newEC_deepec$rxn_num <- str_count(newEC_deepec$MNXID, ";")
# as a EC number could be connected with so many reactions, we choose ec with no more than 5 rxns
newEC_deepec_filter <- newEC_deepec[newEC_deepec$rxn_num <= 5,]
newEC_deepec0 <- splitAndCombine(newEC_deepec_filter$MNXID, newEC_deepec_filter$Query.ID, sep0 = ";")
rxn_ec2 <- unique(newEC_deepec0$v1)
# combine new EC from different sources
rxn_ec_all <- union(rxn_ec1, rxn_ec2)
rxn_ec_all0 <- data.frame(MNXID =rxn_ec_all, stringsAsFactors = FALSE)
rxn_ec_all0 <- getRxnInfFromMNX(rxn_ec_all0, rxn_ec_all0$MNXID)
rxn_ec_all0_b <- rxn_ec_all0[rxn_ec_all0$balance_MNX=='true', ]
rxn_ec_combine <- unique(rxn_ec_all0_b$MNXID)
# plot the graph
venn.diagram(x= list(kegg_web = kegg_web,rxn_ec = rxn_ec_combine, RAVEN_kegg = RAVEN_kegg, RAVEN_biocyc = RAVEN_biocyc),
filename = "result/new balanced reactions for 332 yeast species with rxn found by EC number.png", height = 1000, width = 1000,resolution =300, imagetype="png", col="transparent",
fill=c("blue","green","red", "grey"),alpha = 0.50, cex=0.45, cat.cex=0.45)
|
#' @title Internal bigPLS functions
#'
#' @name internal-bootPLS
#'
#' @description These are not to be called by the user.
#'
#' @aliases ust spls.dv correctp correctp.withoutK spls.Cboot cv.split
#' @author Jérémy Magnanensi, Frédéric Bertrand\cr
#' \email{frederic.bertrand@@utt.fr}\cr
#' \url{https://fbertran.github.io/homepage/}
#'
#' @references A new bootstrap-based stopping criterion in PLS component construction,
#' J. Magnanensi, M. Maumy-Bertrand, N. Meyer and F. Bertrand (2016), in The Multiple Facets of Partial Least Squares and Related Methods,
#' \doi{10.1007/978-3-319-40643-5_18}\cr
#'
#' A new universal resample-stable bootstrap-based stopping criterion for PLS component construction,
#' J. Magnanensi, F. Bertrand, M. Maumy-Bertrand and N. Meyer, (2017), Statistics and Computing, 27, 757–774.
#' \doi{10.1007/s11222-016-9651-4}\cr
#'
#' New developments in Sparse PLS regression, J. Magnanensi, M. Maumy-Bertrand,
#' N. Meyer and F. Bertrand, (2021), Frontiers in Applied Mathematics and Statistics,
#' accepted.
#'
#' @keywords internal
NULL
### For spls
ust<-function (b, eta)
{
b.ust <- matrix(0, length(b), 1)
if (eta < 1) {
valb <- abs(b) - eta * max(abs(b))
b.ust[valb >= 0] <- valb[valb >= 0] * (sign(b))[valb >=
0]
}
return(b.ust)
}
spls.dv<-function (Z, eta, kappa, eps, maxstep)
{
p <- nrow(Z)
q <- ncol(Z)
Znorm1 <- median(abs(Z))
Z <- Z/Znorm1
if (q == 1) {
c <- ust(Z, eta)
}
if (q > 1) {
M <- Z %*% t(Z)
dis <- 10
i <- 1
if (kappa == 0.5) {
c <- matrix(10, p, 1)
c.old <- c
while (dis > eps & i <= maxstep) {
mcsvd <- svd(M %*% c)
a <- mcsvd$u %*% t(mcsvd$v)
c <- ust(M %*% a, eta)
dis <- max(abs(c - c.old))
c.old <- c
i <- i + 1
}
}
if (kappa > 0 & kappa < 0.5) {
kappa2 <- (1 - kappa)/(1 - 2 * kappa)
c <- matrix(10, p, 1)
c.old <- c
h <- function(lambda) {
alpha <- solve(M + lambda * diag(p)) %*% M %*%
c
obj <- t(alpha) %*% alpha - 1/kappa2^2
return(obj)
}
if (h(eps) * h(1e+30) > 0) {
while (h(eps) <= 1e+05) {
M <- 2 * M
c <- 2 * c
}
}
while (dis > eps & i <= maxstep) {
if (h(eps) * h(1e+30) > 0) {
while (h(eps) <= 1e+05) {
M <- 2 * M
c <- 2 * c
}
}
lambdas <- uniroot(h, c(eps, 1e+30))$root
a <- kappa2 * solve(M + lambdas * diag(p)) %*%
M %*% c
c <- ust(M %*% a, eta)
dis <- max(abs(c - c.old))
c.old <- c
i <- i + 1
}
}
}
return(c)
}
correctp=function (x, y, eta, K, kappa, select, fit)
{
if (min(eta) < 0 | max(eta) >= 1) {
if (max(eta) == 1) {
stop("eta should be strictly less than 1!")
}
if (length(eta) == 1) {
stop("eta should be between 0 and 1!")
}
else {
stop("eta should be between 0 and 1! \n Choose appropriate range of eta!")
}
}
if (max(K) > ncol(x)) {
stop("K cannot exceed the number of predictors! Pick up smaller K!")
}
if (max(K) >= nrow(x)) {
stop("K cannot exceed the sample size! Pick up smaller K!")
}
if (min(K) <= 0 | !all(K%%1 == 0)) {
if (length(K) == 1) {
stop("K should be a positive integer!")
}
else {
stop("K should be a positive integer! \n Choose appropriate range of K!")
}
}
if (kappa > 0.5 | kappa < 0) {
warning("kappa should be between 0 and 0.5! kappa=0.5 is used. \n\n")
kappa <- 0.5
}
if (select != "pls2" & select != "simpls") {
warning("Invalid PLS algorithm for variable selection.\n")
warning("pls2 algorithm is used. \n\n")
select <- "pls2"
}
fits <- c("simpls", "kernelpls", "widekernelpls", "oscorespls")
if (!any(fit == fits)) {
warning("Invalid PLS algorithm for model fitting\n")
warning("simpls algorithm is used. \n\n")
fit <- "simpls"
}
list(K = K, eta = eta, kappa = kappa, select = select, fit = fit)
}
correctp.withoutK=function (x, y, eta, kappa, select, fit)
{
if (min(eta) < 0 | max(eta) >= 1) {
if (max(eta) == 1) {
stop("eta should be strictly less than 1!")
}
if (length(eta) == 1) {
stop("eta should be between 0 and 1!")
}
else {
stop("eta should be between 0 and 1! \n Choose appropriate range of eta!")
}
}
if (kappa > 0.5 | kappa < 0) {
warning("kappa should be between 0 and 0.5! kappa=0.5 is used. \n\n")
kappa <- 0.5
}
if (select != "pls2" & select != "simpls") {
warning("Invalid PLS algorithm for variable selection.\n")
warning("pls2 algorithm is used. \n\n")
select <- "pls2"
}
fits <- c("simpls", "kernelpls", "widekernelpls", "oscorespls")
if (!any(fit == fits)) {
warning("Invalid PLS algorithm for model fitting\n")
warning("simpls algorithm is used. \n\n")
fit <- "simpls"
}
list(eta = eta, kappa = kappa, select = select, fit = fit)
}
spls.Cboot=function (x, y, K, eta, kappa = 0.5, select = "pls2", fit = "simpls",
scale.x = TRUE, scale.y = FALSE, eps = 1e-04, maxstep = 100,
verbose = FALSE)
{
x <- as.matrix(x)
n <- nrow(x)
p <- ncol(x)
ip <- c(1:p)
y <- as.matrix(y)
q <- ncol(y)
one <- matrix(1, 1, n)
mu <- one %*% y/n
y <- scale(y, drop(mu), FALSE)
meanx <- drop(one %*% x)/n
x <- scale(x, meanx, FALSE)
if (scale.x) {
normx <- sqrt(drop(one %*% (x^2))/(n - 1))
if (any(normx < .Machine$double.eps)) {
stop("Some of the columns of the predictor matrix have zero variance.")
}
x <- scale(x, FALSE, normx)
}
else {
normx <- rep(1, p)
}
if (scale.y) {
normy <- sqrt(drop(one %*% (y^2))/(n - 1))
if (any(normy < .Machine$double.eps)) {
stop("Some of the columns of the response matrix have zero variance.")
}
y <- scale(y, FALSE, normy)
}
else {
normy <- rep(1, q)
}
betahat <- matrix(0, p, q)
betamat <- list()
x1 <- x
y1 <- y
type <- correctp(x, y, eta, K, kappa, select, fit)
eta <- type$eta
K <- type$K
kappa <- type$kappa
select <- type$select
fit <- type$fit
if (is.null(colnames(x))) {
xnames <- c(1:p)
}
else {
xnames <- colnames(x)
}
new2As <- list()
if (verbose) {cat("The variables that join the set of selected variables at each step:\n")}
for (k in 1:K) {
Z <- t(x1) %*% y1
what <- spls.dv(Z, eta, kappa, eps, maxstep)
A <- unique(ip[what != 0 | betahat[, 1] != 0])
new2A <- ip[what != 0 & betahat[, 1] == 0]
xA <- x[, A, drop = FALSE]
plsfit <- pls::plsr(y ~ xA, ncomp = min(k, length(A)),
method = fit, scale = FALSE)
betahat <- matrix(0, p, q)
betahat[A, ] <- matrix(coef(plsfit), length(A), q)
betamat[[k]] <- betahat
pj <- plsfit$projection
if (select == "pls2") {
y1 <- y - x %*% betahat
}
if (select == "simpls") {
pw <- pj %*% solve(t(pj) %*% pj) %*% t(pj)
x1 <- x
x1[, A] <- x[, A, drop = FALSE] - x[, A, drop = FALSE] %*%
pw
}
new2As[[k]] <- new2A
if (verbose) {
if (length(new2A) <= 10) {
cat(paste("- ", k, "th step (K=", k, "):\n",
sep = ""))
cat(xnames[new2A])
cat("\n")
}
else {
cat(paste("- ", k, "th step (K=", k, "):\n",
sep = ""))
nlines <- ceiling(length(new2A)/10)
for (i in 0:(nlines - 2)) {
cat(xnames[new2A[(10 * i + 1):(10 * (i + 1))]])
cat("\n")
}
cat(xnames[new2A[(10 * (nlines - 1) + 1):length(new2A)]])
cat("\n")
}
}
}
coeffC <- pls::Yloadings(plsfit)[,1:min(K, length(A))]
tt <- pls::scores(plsfit)[,1:min(K, length(A))]
if (!is.null(colnames(x))) {
rownames(betahat) <- colnames(x)
}
if (q > 1 & !is.null(colnames(y))) {
colnames(betahat) <- colnames(y)
}
object <- list(x = x, y = y, coeffC = coeffC, tt = tt, betahat = betahat, A = A, betamat = betamat,
new2As = new2As, mu = mu, meanx = meanx, normx = normx,
normy = normy, eta = eta, K = K, kappa = kappa, select = select,
fit = fit, projection = pj)
class(object) <- "spls"
object
}
cv.split=function (y, fold)
{
n <- length(y)
group <- table(y)
x <- c()
for (i in 1:length(group)) {
x.group <- c(1:n)[y == names(group)[i]]
x <- c(x, sample(x.group))
}
foldi <- split(x, rep(1:fold, length = n))
return(foldi)
}
wpls=function (x, y, V, K = ncol(x), type = "pls1", center.x = TRUE,
scale.x = FALSE)
{
n <- nrow(x)
p <- ncol(x)
q <- ncol(y)
x1 <- x
y1 <- y
W <- matrix(0, p, K)
T <- matrix(0, n, K)
Q <- matrix(0, q, K)
P <- matrix(0, p, K)
for (k in 1:K) {
w <- t(x1) %*% as.matrix(V * y1)
w <- w/sqrt(sum(w^2))
W[, k] <- w
t <- x1 %*% w
T[, k] <- t
coef.q <- sum(t * V * y1)/sum(t * V * t)
Q[, k] <- coef.q
coef.p <- t(as.matrix(t * V)) %*% x1/sum(t * V * t)
P[, k] <- coef.p
if (type == "pls1") {
y1 <- y1 - t %*% coef.q
x1 <- x1 - t %*% coef.p
}
if (type == "simpls") {
pj <- w
pw <- pj %*% solve(t(pj) %*% pj) %*% t(pj)
x1 <- x1 - x1 %*% pw
}
}
list(W = W, T = T, Q = Q, P = P)
}
### Updating SGPLS function to get T
sgpls.T=function (x, y, K, eta, scale.x = TRUE, eps = 1e-05, denom.eps = 1e-20,
zero.eps = 1e-05, maxstep = 100, br = TRUE, ftype = "iden")
{
x <- as.matrix(x)
n <- nrow(x)
p <- ncol(x)
ip <- c(1:p)
y <- as.matrix(y)
q <- ncol(y)
one <- matrix(1, 1, n)
mu <- apply(x, 2, mean)
x0 <- scale(x, mu, FALSE)
if (scale.x) {
sigma <- apply(x, 2, sd)
x0 <- scale(x0, FALSE, sigma)
}
else {
sigma <- rep(1, ncol(x))
x0 <- x0
}
beta1hat <- matrix(0, p, q)
beta1hat.old <- beta1hat + 1000
beta0hat <- 0
re <- 100
min.re <- 1000
nstep <- 0
nstep.min <- 0
while (re > eps & nstep < maxstep) {
if (nstep == 0) {
p0 <- (y + 0.5)/2
V <- as.vector(p0 * (1 - p0))
A <- c(1:p)
}
else {
exp.xb <- exp(beta0hat + x0 %*% beta1hat)
p0 <- exp.xb/(1 + exp.xb)
p0[exp.xb == Inf] <- 1 - zero.eps
p0[p0 < zero.eps] <- zero.eps
p0[p0 > (1 - zero.eps)] <- 1 - zero.eps
V <- as.vector(p0 * (1 - p0))
}
switch(ftype, hat = {
H <- hat(sweep(cbind(rep(1, n), x0), 1, sqrt(V),
"*"), intercept = FALSE)
}, iden = {
H <- rep(1, n)
})
if (nstep == 0) {
y0 <- beta0hat + x0 %*% beta1hat + (y - p0)/V
}
else {
V <- V * (H * br + 1)
y0 <- beta0hat + x0 %*% beta1hat + (y + H * br/2 -
(H * br + 1) * p0)/V
}
y1 <- y0
y1 <- y1 - mean(y1)
x1 <- x0
A.old <- c()
for (k in 1:K) {
Z <- t(x1) %*% as.matrix(V * y1)
Znorm1 <- median(abs(Z))
Z <- Z/Znorm1
what <- ust(Z, eta)
A <- sort(unique(c(A.old, ip[what != 0])))
x0A <- x0[, A, drop = FALSE]
plsfit <- wpls(x0A, y0, V, K = min(k, length(A)),
type = "pls1", center.x = FALSE, scale.x = FALSE)
A.old <- A
y1 <- y0 - plsfit$T %*% t(plsfit$Q)
x1 <- x0
x1[, A] <- x0[, A] - plsfit$T %*% t(plsfit$P)
}
x0A <- x0[, A, drop = FALSE]
plsfit <- wpls(x0A, y0, V, K = min(K, length(A)), type = "pls1",
center.x = FALSE, scale.x = FALSE)
W <- plsfit$W
T <- plsfit$T
P <- plsfit$P
Q <- plsfit$Q
beta1hat.old <- beta1hat
beta1hat <- matrix(0, p, q)
beta1hat[A, ] <- W %*% solve(t(P) %*% W) %*% t(Q)
beta0hat <- weighted.mean((y0 - T %*% t(Q)), sqrt(V))
re <- mean(abs(beta1hat - beta1hat.old))/mean(abs(beta1hat.old) +
denom.eps)
nstep <- nstep + 1
if (re < min.re & nstep > 1) {
min.re <- re
nstep.min <- nstep
beta1hat.min <- beta1hat
beta0hat.min <- beta0hat
A.min <- A
W.min <- W
}
}
if (re > eps) {
if (nstep.min > 0) {
converged <- FALSE
beta1hat <- beta1hat.min
beta0hat <- beta0hat.min
A <- A.min
W <- W.min
}
}
betahat <- matrix(c(beta0hat, beta1hat))
if (!is.null(colnames(x))) {
rownames(betahat) <- 1:nrow(betahat)
rownames(betahat)[1] <- "intercept"
rownames(betahat)[2:nrow(betahat)] <- colnames(x)
}
else {
rownames(betahat) <- c(0, paste("x", 1:p, sep = ""))
rownames(betahat)[1] <- "intercept"
}
object <- list(x = x, y = y, x0 = x0, eta = eta, K = K, CoeffC=Q, tt=T, betahat = betahat,
A = A, W = W, mu = mu, sigma = sigma)
class(object) <- "sgpls"
object
}
|
/R/internal-bootPLS.R
|
no_license
|
fbertran/bootPLS
|
R
| false
| false
| 12,861
|
r
|
#' @title Internal bigPLS functions
#'
#' @name internal-bootPLS
#'
#' @description These are not to be called by the user.
#'
#' @aliases ust spls.dv correctp correctp.withoutK spls.Cboot cv.split
#' @author Jérémy Magnanensi, Frédéric Bertrand\cr
#' \email{frederic.bertrand@@utt.fr}\cr
#' \url{https://fbertran.github.io/homepage/}
#'
#' @references A new bootstrap-based stopping criterion in PLS component construction,
#' J. Magnanensi, M. Maumy-Bertrand, N. Meyer and F. Bertrand (2016), in The Multiple Facets of Partial Least Squares and Related Methods,
#' \doi{10.1007/978-3-319-40643-5_18}\cr
#'
#' A new universal resample-stable bootstrap-based stopping criterion for PLS component construction,
#' J. Magnanensi, F. Bertrand, M. Maumy-Bertrand and N. Meyer, (2017), Statistics and Computing, 27, 757–774.
#' \doi{10.1007/s11222-016-9651-4}\cr
#'
#' New developments in Sparse PLS regression, J. Magnanensi, M. Maumy-Bertrand,
#' N. Meyer and F. Bertrand, (2021), Frontiers in Applied Mathematics and Statistics,
#' accepted.
#'
#' @keywords internal
NULL
### For spls
ust<-function (b, eta)
{
b.ust <- matrix(0, length(b), 1)
if (eta < 1) {
valb <- abs(b) - eta * max(abs(b))
b.ust[valb >= 0] <- valb[valb >= 0] * (sign(b))[valb >=
0]
}
return(b.ust)
}
spls.dv<-function (Z, eta, kappa, eps, maxstep)
{
p <- nrow(Z)
q <- ncol(Z)
Znorm1 <- median(abs(Z))
Z <- Z/Znorm1
if (q == 1) {
c <- ust(Z, eta)
}
if (q > 1) {
M <- Z %*% t(Z)
dis <- 10
i <- 1
if (kappa == 0.5) {
c <- matrix(10, p, 1)
c.old <- c
while (dis > eps & i <= maxstep) {
mcsvd <- svd(M %*% c)
a <- mcsvd$u %*% t(mcsvd$v)
c <- ust(M %*% a, eta)
dis <- max(abs(c - c.old))
c.old <- c
i <- i + 1
}
}
if (kappa > 0 & kappa < 0.5) {
kappa2 <- (1 - kappa)/(1 - 2 * kappa)
c <- matrix(10, p, 1)
c.old <- c
h <- function(lambda) {
alpha <- solve(M + lambda * diag(p)) %*% M %*%
c
obj <- t(alpha) %*% alpha - 1/kappa2^2
return(obj)
}
if (h(eps) * h(1e+30) > 0) {
while (h(eps) <= 1e+05) {
M <- 2 * M
c <- 2 * c
}
}
while (dis > eps & i <= maxstep) {
if (h(eps) * h(1e+30) > 0) {
while (h(eps) <= 1e+05) {
M <- 2 * M
c <- 2 * c
}
}
lambdas <- uniroot(h, c(eps, 1e+30))$root
a <- kappa2 * solve(M + lambdas * diag(p)) %*%
M %*% c
c <- ust(M %*% a, eta)
dis <- max(abs(c - c.old))
c.old <- c
i <- i + 1
}
}
}
return(c)
}
correctp=function (x, y, eta, K, kappa, select, fit)
{
if (min(eta) < 0 | max(eta) >= 1) {
if (max(eta) == 1) {
stop("eta should be strictly less than 1!")
}
if (length(eta) == 1) {
stop("eta should be between 0 and 1!")
}
else {
stop("eta should be between 0 and 1! \n Choose appropriate range of eta!")
}
}
if (max(K) > ncol(x)) {
stop("K cannot exceed the number of predictors! Pick up smaller K!")
}
if (max(K) >= nrow(x)) {
stop("K cannot exceed the sample size! Pick up smaller K!")
}
if (min(K) <= 0 | !all(K%%1 == 0)) {
if (length(K) == 1) {
stop("K should be a positive integer!")
}
else {
stop("K should be a positive integer! \n Choose appropriate range of K!")
}
}
if (kappa > 0.5 | kappa < 0) {
warning("kappa should be between 0 and 0.5! kappa=0.5 is used. \n\n")
kappa <- 0.5
}
if (select != "pls2" & select != "simpls") {
warning("Invalid PLS algorithm for variable selection.\n")
warning("pls2 algorithm is used. \n\n")
select <- "pls2"
}
fits <- c("simpls", "kernelpls", "widekernelpls", "oscorespls")
if (!any(fit == fits)) {
warning("Invalid PLS algorithm for model fitting\n")
warning("simpls algorithm is used. \n\n")
fit <- "simpls"
}
list(K = K, eta = eta, kappa = kappa, select = select, fit = fit)
}
correctp.withoutK=function (x, y, eta, kappa, select, fit)
{
if (min(eta) < 0 | max(eta) >= 1) {
if (max(eta) == 1) {
stop("eta should be strictly less than 1!")
}
if (length(eta) == 1) {
stop("eta should be between 0 and 1!")
}
else {
stop("eta should be between 0 and 1! \n Choose appropriate range of eta!")
}
}
if (kappa > 0.5 | kappa < 0) {
warning("kappa should be between 0 and 0.5! kappa=0.5 is used. \n\n")
kappa <- 0.5
}
if (select != "pls2" & select != "simpls") {
warning("Invalid PLS algorithm for variable selection.\n")
warning("pls2 algorithm is used. \n\n")
select <- "pls2"
}
fits <- c("simpls", "kernelpls", "widekernelpls", "oscorespls")
if (!any(fit == fits)) {
warning("Invalid PLS algorithm for model fitting\n")
warning("simpls algorithm is used. \n\n")
fit <- "simpls"
}
list(eta = eta, kappa = kappa, select = select, fit = fit)
}
spls.Cboot=function (x, y, K, eta, kappa = 0.5, select = "pls2", fit = "simpls",
scale.x = TRUE, scale.y = FALSE, eps = 1e-04, maxstep = 100,
verbose = FALSE)
{
x <- as.matrix(x)
n <- nrow(x)
p <- ncol(x)
ip <- c(1:p)
y <- as.matrix(y)
q <- ncol(y)
one <- matrix(1, 1, n)
mu <- one %*% y/n
y <- scale(y, drop(mu), FALSE)
meanx <- drop(one %*% x)/n
x <- scale(x, meanx, FALSE)
if (scale.x) {
normx <- sqrt(drop(one %*% (x^2))/(n - 1))
if (any(normx < .Machine$double.eps)) {
stop("Some of the columns of the predictor matrix have zero variance.")
}
x <- scale(x, FALSE, normx)
}
else {
normx <- rep(1, p)
}
if (scale.y) {
normy <- sqrt(drop(one %*% (y^2))/(n - 1))
if (any(normy < .Machine$double.eps)) {
stop("Some of the columns of the response matrix have zero variance.")
}
y <- scale(y, FALSE, normy)
}
else {
normy <- rep(1, q)
}
betahat <- matrix(0, p, q)
betamat <- list()
x1 <- x
y1 <- y
type <- correctp(x, y, eta, K, kappa, select, fit)
eta <- type$eta
K <- type$K
kappa <- type$kappa
select <- type$select
fit <- type$fit
if (is.null(colnames(x))) {
xnames <- c(1:p)
}
else {
xnames <- colnames(x)
}
new2As <- list()
if (verbose) {cat("The variables that join the set of selected variables at each step:\n")}
for (k in 1:K) {
Z <- t(x1) %*% y1
what <- spls.dv(Z, eta, kappa, eps, maxstep)
A <- unique(ip[what != 0 | betahat[, 1] != 0])
new2A <- ip[what != 0 & betahat[, 1] == 0]
xA <- x[, A, drop = FALSE]
plsfit <- pls::plsr(y ~ xA, ncomp = min(k, length(A)),
method = fit, scale = FALSE)
betahat <- matrix(0, p, q)
betahat[A, ] <- matrix(coef(plsfit), length(A), q)
betamat[[k]] <- betahat
pj <- plsfit$projection
if (select == "pls2") {
y1 <- y - x %*% betahat
}
if (select == "simpls") {
pw <- pj %*% solve(t(pj) %*% pj) %*% t(pj)
x1 <- x
x1[, A] <- x[, A, drop = FALSE] - x[, A, drop = FALSE] %*%
pw
}
new2As[[k]] <- new2A
if (verbose) {
if (length(new2A) <= 10) {
cat(paste("- ", k, "th step (K=", k, "):\n",
sep = ""))
cat(xnames[new2A])
cat("\n")
}
else {
cat(paste("- ", k, "th step (K=", k, "):\n",
sep = ""))
nlines <- ceiling(length(new2A)/10)
for (i in 0:(nlines - 2)) {
cat(xnames[new2A[(10 * i + 1):(10 * (i + 1))]])
cat("\n")
}
cat(xnames[new2A[(10 * (nlines - 1) + 1):length(new2A)]])
cat("\n")
}
}
}
coeffC <- pls::Yloadings(plsfit)[,1:min(K, length(A))]
tt <- pls::scores(plsfit)[,1:min(K, length(A))]
if (!is.null(colnames(x))) {
rownames(betahat) <- colnames(x)
}
if (q > 1 & !is.null(colnames(y))) {
colnames(betahat) <- colnames(y)
}
object <- list(x = x, y = y, coeffC = coeffC, tt = tt, betahat = betahat, A = A, betamat = betamat,
new2As = new2As, mu = mu, meanx = meanx, normx = normx,
normy = normy, eta = eta, K = K, kappa = kappa, select = select,
fit = fit, projection = pj)
class(object) <- "spls"
object
}
cv.split=function (y, fold)
{
n <- length(y)
group <- table(y)
x <- c()
for (i in 1:length(group)) {
x.group <- c(1:n)[y == names(group)[i]]
x <- c(x, sample(x.group))
}
foldi <- split(x, rep(1:fold, length = n))
return(foldi)
}
wpls=function (x, y, V, K = ncol(x), type = "pls1", center.x = TRUE,
scale.x = FALSE)
{
n <- nrow(x)
p <- ncol(x)
q <- ncol(y)
x1 <- x
y1 <- y
W <- matrix(0, p, K)
T <- matrix(0, n, K)
Q <- matrix(0, q, K)
P <- matrix(0, p, K)
for (k in 1:K) {
w <- t(x1) %*% as.matrix(V * y1)
w <- w/sqrt(sum(w^2))
W[, k] <- w
t <- x1 %*% w
T[, k] <- t
coef.q <- sum(t * V * y1)/sum(t * V * t)
Q[, k] <- coef.q
coef.p <- t(as.matrix(t * V)) %*% x1/sum(t * V * t)
P[, k] <- coef.p
if (type == "pls1") {
y1 <- y1 - t %*% coef.q
x1 <- x1 - t %*% coef.p
}
if (type == "simpls") {
pj <- w
pw <- pj %*% solve(t(pj) %*% pj) %*% t(pj)
x1 <- x1 - x1 %*% pw
}
}
list(W = W, T = T, Q = Q, P = P)
}
### Updating SGPLS function to get T
sgpls.T=function (x, y, K, eta, scale.x = TRUE, eps = 1e-05, denom.eps = 1e-20,
zero.eps = 1e-05, maxstep = 100, br = TRUE, ftype = "iden")
{
x <- as.matrix(x)
n <- nrow(x)
p <- ncol(x)
ip <- c(1:p)
y <- as.matrix(y)
q <- ncol(y)
one <- matrix(1, 1, n)
mu <- apply(x, 2, mean)
x0 <- scale(x, mu, FALSE)
if (scale.x) {
sigma <- apply(x, 2, sd)
x0 <- scale(x0, FALSE, sigma)
}
else {
sigma <- rep(1, ncol(x))
x0 <- x0
}
beta1hat <- matrix(0, p, q)
beta1hat.old <- beta1hat + 1000
beta0hat <- 0
re <- 100
min.re <- 1000
nstep <- 0
nstep.min <- 0
while (re > eps & nstep < maxstep) {
if (nstep == 0) {
p0 <- (y + 0.5)/2
V <- as.vector(p0 * (1 - p0))
A <- c(1:p)
}
else {
exp.xb <- exp(beta0hat + x0 %*% beta1hat)
p0 <- exp.xb/(1 + exp.xb)
p0[exp.xb == Inf] <- 1 - zero.eps
p0[p0 < zero.eps] <- zero.eps
p0[p0 > (1 - zero.eps)] <- 1 - zero.eps
V <- as.vector(p0 * (1 - p0))
}
switch(ftype, hat = {
H <- hat(sweep(cbind(rep(1, n), x0), 1, sqrt(V),
"*"), intercept = FALSE)
}, iden = {
H <- rep(1, n)
})
if (nstep == 0) {
y0 <- beta0hat + x0 %*% beta1hat + (y - p0)/V
}
else {
V <- V * (H * br + 1)
y0 <- beta0hat + x0 %*% beta1hat + (y + H * br/2 -
(H * br + 1) * p0)/V
}
y1 <- y0
y1 <- y1 - mean(y1)
x1 <- x0
A.old <- c()
for (k in 1:K) {
Z <- t(x1) %*% as.matrix(V * y1)
Znorm1 <- median(abs(Z))
Z <- Z/Znorm1
what <- ust(Z, eta)
A <- sort(unique(c(A.old, ip[what != 0])))
x0A <- x0[, A, drop = FALSE]
plsfit <- wpls(x0A, y0, V, K = min(k, length(A)),
type = "pls1", center.x = FALSE, scale.x = FALSE)
A.old <- A
y1 <- y0 - plsfit$T %*% t(plsfit$Q)
x1 <- x0
x1[, A] <- x0[, A] - plsfit$T %*% t(plsfit$P)
}
x0A <- x0[, A, drop = FALSE]
plsfit <- wpls(x0A, y0, V, K = min(K, length(A)), type = "pls1",
center.x = FALSE, scale.x = FALSE)
W <- plsfit$W
T <- plsfit$T
P <- plsfit$P
Q <- plsfit$Q
beta1hat.old <- beta1hat
beta1hat <- matrix(0, p, q)
beta1hat[A, ] <- W %*% solve(t(P) %*% W) %*% t(Q)
beta0hat <- weighted.mean((y0 - T %*% t(Q)), sqrt(V))
re <- mean(abs(beta1hat - beta1hat.old))/mean(abs(beta1hat.old) +
denom.eps)
nstep <- nstep + 1
if (re < min.re & nstep > 1) {
min.re <- re
nstep.min <- nstep
beta1hat.min <- beta1hat
beta0hat.min <- beta0hat
A.min <- A
W.min <- W
}
}
if (re > eps) {
if (nstep.min > 0) {
converged <- FALSE
beta1hat <- beta1hat.min
beta0hat <- beta0hat.min
A <- A.min
W <- W.min
}
}
betahat <- matrix(c(beta0hat, beta1hat))
if (!is.null(colnames(x))) {
rownames(betahat) <- 1:nrow(betahat)
rownames(betahat)[1] <- "intercept"
rownames(betahat)[2:nrow(betahat)] <- colnames(x)
}
else {
rownames(betahat) <- c(0, paste("x", 1:p, sep = ""))
rownames(betahat)[1] <- "intercept"
}
object <- list(x = x, y = y, x0 = x0, eta = eta, K = K, CoeffC=Q, tt=T, betahat = betahat,
A = A, W = W, mu = mu, sigma = sigma)
class(object) <- "sgpls"
object
}
|
#Jesús Alberto Cuéllar Loera
#06/Agosto/2019
#Clase 1
dbh <- c(16.5, 25.3, 22.1, 17.2, 16.1, 8.1, 34.3, 5.4, 5.7, 11.2, 24.1,
14.5, 7.7, 15.6, 15.9, 10, 17.5, 20.5, 7.8, 27.3, 9.7, 6.5,
23.4, 8.2, 28.5, 10.4, 11.5, 14.3, 17.2, 16.8)
length(dbh)
sum(dbh)/length(dbh)
mean(dbh)
range(dbh)
stem(dbh)
hist(dbh)
moda=function(x)
{
#Función que encuentra la moda de un vector x
m1 <- sort(table(x),decreasing=T)
moda <- names(m1[m1==m1[1]])
moda <- as.numeric(moda)
return(moda)
}
moda(dbh)
quantile(dbh, 0.25)
quantile(dbh, 0.5)
quantile(dbh, 0.75)
fivenum(dbh)
100*(sd(dbh) / mean(dbh))
par(mar=c(1,1,1,1))
set.seed(10)
dbh.10 <- rnorm(10)
hist(dbh.10)
dbh50 <- rnorm(50)
hist(dbh50)
dbh500 <- rnorm(500)
hist(dbh500)
dbh1000 <- rnorm(1000)
hist(dbh1000)
shapiro.test(dbh)
|
/Clase 1/Clase1.R
|
no_license
|
JesusCuellar00/MCF202
|
R
| false
| false
| 814
|
r
|
#Jesús Alberto Cuéllar Loera
#06/Agosto/2019
#Clase 1
dbh <- c(16.5, 25.3, 22.1, 17.2, 16.1, 8.1, 34.3, 5.4, 5.7, 11.2, 24.1,
14.5, 7.7, 15.6, 15.9, 10, 17.5, 20.5, 7.8, 27.3, 9.7, 6.5,
23.4, 8.2, 28.5, 10.4, 11.5, 14.3, 17.2, 16.8)
length(dbh)
sum(dbh)/length(dbh)
mean(dbh)
range(dbh)
stem(dbh)
hist(dbh)
moda=function(x)
{
#Función que encuentra la moda de un vector x
m1 <- sort(table(x),decreasing=T)
moda <- names(m1[m1==m1[1]])
moda <- as.numeric(moda)
return(moda)
}
moda(dbh)
quantile(dbh, 0.25)
quantile(dbh, 0.5)
quantile(dbh, 0.75)
fivenum(dbh)
100*(sd(dbh) / mean(dbh))
par(mar=c(1,1,1,1))
set.seed(10)
dbh.10 <- rnorm(10)
hist(dbh.10)
dbh50 <- rnorm(50)
hist(dbh50)
dbh500 <- rnorm(500)
hist(dbh500)
dbh1000 <- rnorm(1000)
hist(dbh1000)
shapiro.test(dbh)
|
descr_extract_JAGS<-function (res_l, res_0, res_u){
# extraction of descriptive statistics from objects provided by JAGS
kk <- dim(res_0)[2]-4
reff <- paste(rep("theta_", kk), c(1:kk), sep = "")
names_row <- c("mu", "log_tau", reff, "theta_new")
no_rows <- length(names_row)
descr_names_col <- c("m_l", "sd_l", "m_0",
"sd_0", "m_u", "sd_u")
descr_collect <- matrix(NA, nrow = no_rows, ncol = length(descr_names_col),
dimnames = list(names_row, descr_names_col))
# res_l
# descriptives mu
descr_collect[1, 1] <- mean(res_l[, "mu"])
descr_collect[1, 2] <- sd(res_l[, "mu"])
# descriptives log_tau
descr_collect[2, 1] <- mean(res_l[, "log_tau"])
descr_collect[2, 2] <- sd(res_l[, "log_tau"])
# descritives theta_i
reff_name <- paste(rep("theta[", kk), c(1:kk), rep("]", kk), sep = "")
for (i in 1:kk){
descr_collect[i + 2, 1] <- mean(res_l[, reff_name[i]])
descr_collect[i + 2, 2] <- sd(res_l[, reff_name[i]])
}
# descriptives theta_new
descr_collect[kk + 3, 1] <- mean(res_l[, "theta_new"])
descr_collect[kk + 3, 2] <- sd(res_l[, "theta_new"])
# res_0
# descriptives mu
descr_collect[1, 3] <- mean(res_0[, "mu"])
descr_collect[1, 4] <- sd(res_0[, "mu"])
# descriptives log_tau
descr_collect[2, 3] <- mean(res_0[, "log_tau"])
descr_collect[2, 4] <- sd(res_0[, "log_tau"])
# descritives theta_i
reff_name <- paste(rep("theta[", kk), c(1:kk), rep("]", kk), sep = "")
for (i in 1:kk){
descr_collect[i + 2, 3] <- mean(res_0[, reff_name[i]])
descr_collect[i + 2, 4] <- sd(res_0[, reff_name[i]])
}
# descriptives theta_new
descr_collect[kk + 3, 3] <- mean(res_0[, "theta_new"])
descr_collect[kk + 3, 4] <- sd(res_0[, "theta_new"])
# res_u
# descriptives mu
descr_collect[1, 5] <- mean(res_u[, "mu"])
descr_collect[1, 6] <- sd(res_u[, "mu"])
# descriptives log_tau
descr_collect[2, 5] <- mean(res_u[, "log_tau"])
descr_collect[2, 6] <- sd(res_u[, "log_tau"])
# descritives theta_i
reff_name <- paste(rep("theta[", kk), c(1:kk), rep("]", kk), sep = "")
for (i in 1:kk){
descr_collect[i + 2, 5] <- mean(res_u[, reff_name[i]])
descr_collect[i + 2, 6] <- sd(res_u[, reff_name[i]])
}
# descriptives theta_new
descr_collect[kk + 3, 5] <- mean(res_u[, "theta_new"])
descr_collect[kk + 3, 6] <- sd(res_u[, "theta_new"])
return(descr_collect)
}
|
/R/descr_extract_JAGS.R
|
no_license
|
hunansona/si4bayesmeta
|
R
| false
| false
| 2,455
|
r
|
descr_extract_JAGS<-function (res_l, res_0, res_u){
# extraction of descriptive statistics from objects provided by JAGS
kk <- dim(res_0)[2]-4
reff <- paste(rep("theta_", kk), c(1:kk), sep = "")
names_row <- c("mu", "log_tau", reff, "theta_new")
no_rows <- length(names_row)
descr_names_col <- c("m_l", "sd_l", "m_0",
"sd_0", "m_u", "sd_u")
descr_collect <- matrix(NA, nrow = no_rows, ncol = length(descr_names_col),
dimnames = list(names_row, descr_names_col))
# res_l
# descriptives mu
descr_collect[1, 1] <- mean(res_l[, "mu"])
descr_collect[1, 2] <- sd(res_l[, "mu"])
# descriptives log_tau
descr_collect[2, 1] <- mean(res_l[, "log_tau"])
descr_collect[2, 2] <- sd(res_l[, "log_tau"])
# descritives theta_i
reff_name <- paste(rep("theta[", kk), c(1:kk), rep("]", kk), sep = "")
for (i in 1:kk){
descr_collect[i + 2, 1] <- mean(res_l[, reff_name[i]])
descr_collect[i + 2, 2] <- sd(res_l[, reff_name[i]])
}
# descriptives theta_new
descr_collect[kk + 3, 1] <- mean(res_l[, "theta_new"])
descr_collect[kk + 3, 2] <- sd(res_l[, "theta_new"])
# res_0
# descriptives mu
descr_collect[1, 3] <- mean(res_0[, "mu"])
descr_collect[1, 4] <- sd(res_0[, "mu"])
# descriptives log_tau
descr_collect[2, 3] <- mean(res_0[, "log_tau"])
descr_collect[2, 4] <- sd(res_0[, "log_tau"])
# descritives theta_i
reff_name <- paste(rep("theta[", kk), c(1:kk), rep("]", kk), sep = "")
for (i in 1:kk){
descr_collect[i + 2, 3] <- mean(res_0[, reff_name[i]])
descr_collect[i + 2, 4] <- sd(res_0[, reff_name[i]])
}
# descriptives theta_new
descr_collect[kk + 3, 3] <- mean(res_0[, "theta_new"])
descr_collect[kk + 3, 4] <- sd(res_0[, "theta_new"])
# res_u
# descriptives mu
descr_collect[1, 5] <- mean(res_u[, "mu"])
descr_collect[1, 6] <- sd(res_u[, "mu"])
# descriptives log_tau
descr_collect[2, 5] <- mean(res_u[, "log_tau"])
descr_collect[2, 6] <- sd(res_u[, "log_tau"])
# descritives theta_i
reff_name <- paste(rep("theta[", kk), c(1:kk), rep("]", kk), sep = "")
for (i in 1:kk){
descr_collect[i + 2, 5] <- mean(res_u[, reff_name[i]])
descr_collect[i + 2, 6] <- sd(res_u[, reff_name[i]])
}
# descriptives theta_new
descr_collect[kk + 3, 5] <- mean(res_u[, "theta_new"])
descr_collect[kk + 3, 6] <- sd(res_u[, "theta_new"])
return(descr_collect)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sg_3hd.R
\name{sg_3hd}
\alias{sg_3hd}
\title{Function for calculating Specific Gravity - Third Harvest Date (SG_2HD)}
\usage{
sg_3hd(sgs1_3hd, sgs2_3hd)
}
\arguments{
\item{sgs1_3hd}{Specific gravity sample 1 (Third harvest date)}
\item{sgs2_3hd}{Specific gravity sample 2 (Third harvest date)}
}
\value{
sg_2hd Return the specific gravity (Third harvest date)
}
\description{
Function for calculating Specific Gravity - Third Harvest Date (SG_2HD)
}
\details{
This function returns the specific gravity (Third harvest date)
}
\author{
Omar Benites
}
\references{
Protocol for tuber bulking maturity assessment of elite and advanced potato clones. International Potato Center (CIP), 2014
}
\seealso{
Other Bulking-maturity, evaluation, potato: \code{\link{atmw_1hd}},
\code{\link{atmw_2hd}}, \code{\link{atmw_3hd}},
\code{\link{atnomw_1hd}}, \code{\link{atnomw_2hd}},
\code{\link{atnomw_3hd}}, \code{\link{atnomw}},
\code{\link{av_sg}}, \code{\link{sg_2hd}},
\code{\link{sg_average}}, \code{\link{sgs1_1hd}},
\code{\link{sgs1_2hd}}, \code{\link{sgs1_3hd}},
\code{\link{sgs2_1hd}}, \code{\link{sgs2_2hd}},
\code{\link{sgs2_3hd}}
}
|
/man/sg_3hd.Rd
|
permissive
|
c5sire/sbformula
|
R
| false
| true
| 1,226
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sg_3hd.R
\name{sg_3hd}
\alias{sg_3hd}
\title{Function for calculating Specific Gravity - Third Harvest Date (SG_2HD)}
\usage{
sg_3hd(sgs1_3hd, sgs2_3hd)
}
\arguments{
\item{sgs1_3hd}{Specific gravity sample 1 (Third harvest date)}
\item{sgs2_3hd}{Specific gravity sample 2 (Third harvest date)}
}
\value{
sg_2hd Return the specific gravity (Third harvest date)
}
\description{
Function for calculating Specific Gravity - Third Harvest Date (SG_2HD)
}
\details{
This function returns the specific gravity (Third harvest date)
}
\author{
Omar Benites
}
\references{
Protocol for tuber bulking maturity assessment of elite and advanced potato clones. International Potato Center (CIP), 2014
}
\seealso{
Other Bulking-maturity, evaluation, potato: \code{\link{atmw_1hd}},
\code{\link{atmw_2hd}}, \code{\link{atmw_3hd}},
\code{\link{atnomw_1hd}}, \code{\link{atnomw_2hd}},
\code{\link{atnomw_3hd}}, \code{\link{atnomw}},
\code{\link{av_sg}}, \code{\link{sg_2hd}},
\code{\link{sg_average}}, \code{\link{sgs1_1hd}},
\code{\link{sgs1_2hd}}, \code{\link{sgs1_3hd}},
\code{\link{sgs2_1hd}}, \code{\link{sgs2_2hd}},
\code{\link{sgs2_3hd}}
}
|
#' Run PCA on the main data
#'
#' This function takes an object of class iCellR and runs PCA on the main data.
#' @param x An object of class iCellR.
#' @param method Choose from "base.mean.rank" or "gene.model", default is "base.mean.rank". If gene.model is chosen you need to provide gene.list.
#' @param top.rank A number taking the top genes ranked by base mean, default = 500.
#' @param data.type Choose from "main" and "imputed", default = "main"
#' @param plus.log.value A number to add to each value in the matrix before log transformasion to aviond Inf numbers, default = 0.1.
#' @param gene.list A charactor vector of genes to be used for PCA. If "clust.method" is set to "gene.model", default = "my_model_genes.txt".
#' @param scale.data If TRUE the data will be scaled (log2 + plus.log.value), default = TRUE.
#' @return An object of class iCellR.
#' @examples
#' demo.obj <- run.pca(demo.obj, method = "gene.model", gene.list = demo.obj@gene.model)
#'
#' head(demo.obj@pca.data)[1:5]
#'
#' @export
run.pca <- function (x = NULL,
data.type = "main",
method = "base.mean.rank",
top.rank = 500,
plus.log.value = 0.1,
scale.data = TRUE,
gene.list = "character") {
if ("iCellR" != class(x)[1]) {
stop("x should be an object of class iCellR")
}
# geth the genes and scale them based on model
## get main data
if (data.type == "main") {
DATA <- x@main.data
}
if (data.type == "imputed") {
DATA <- x@imputed.data
}
# model base mean rank
if (method == "base.mean.rank") {
raw.data.order <- DATA[ order(rowMeans(DATA), decreasing = TRUE), ]
TopNormLogScale <- head(raw.data.order,top.rank)
if(scale.data == TRUE) {
TopNormLogScale <- log(TopNormLogScale + plus.log.value)
}
# TopNormLogScale <- scale(topGenes)
# TopNormLogScale <- t(TopNormLogScale)
# TopNormLogScale <- as.data.frame(t(scale(TopNormLogScale)))
}
# gene model
if (method == "gene.model") {
if (gene.list[1] == "character") {
stop("please provide gene names for clustering")
} else {
genesForClustering <- gene.list
topGenes <- subset(DATA, rownames(DATA) %in% genesForClustering)
if (data.type == "main") {
TopNormLogScale <- topGenes
if(scale.data == TRUE) {
TopNormLogScale <- log(TopNormLogScale + plus.log.value)
}
if (data.type == "imputed") {
TopNormLogScale <- topGenes
if(scale.data == TRUE) {
TopNormLogScale <- t(scale(t(topGenes)))
# TopNormLogScale <- log(TopNormLogScale + plus.log.value)
}
}
}
# if (batch.norm == TRUE){
# ## new method
# libSiz <- colSums(topGenes)
# norm.facts <- as.numeric(libSiz) / mean(as.numeric(libSiz))
# dataMat <- as.matrix(topGenes)
# normalized <- as.data.frame(sweep(dataMat, 2, norm.facts, `/`))
# TopNormLogScale <- log2(normalized + plus.log.value)
# TopNormLogScale <- normalized
# }
}
}
# Returns
# info
counts.pca <- prcomp(TopNormLogScale, center = FALSE, scale. = FALSE)
attributes(x)$pca.info <- counts.pca
# DATA
dataPCA = data.frame(counts.pca$rotation) # [1:max.dim]
attributes(x)$pca.data <- dataPCA
# optimal
DATA <- counts.pca$sdev
OPTpcs <- mean(DATA)*2
OPTpcs <- (DATA > OPTpcs)
OPTpcs <- length(OPTpcs[OPTpcs==TRUE]) + 1
attributes(x)$opt.pcs <- OPTpcs
# object
return(x)
}
|
/R/F012.run.pca.R
|
no_license
|
kant/iCellR
|
R
| false
| false
| 3,604
|
r
|
#' Run PCA on the main data
#'
#' This function takes an object of class iCellR and runs PCA on the main data.
#' @param x An object of class iCellR.
#' @param method Choose from "base.mean.rank" or "gene.model", default is "base.mean.rank". If gene.model is chosen you need to provide gene.list.
#' @param top.rank A number taking the top genes ranked by base mean, default = 500.
#' @param data.type Choose from "main" and "imputed", default = "main"
#' @param plus.log.value A number to add to each value in the matrix before log transformasion to aviond Inf numbers, default = 0.1.
#' @param gene.list A charactor vector of genes to be used for PCA. If "clust.method" is set to "gene.model", default = "my_model_genes.txt".
#' @param scale.data If TRUE the data will be scaled (log2 + plus.log.value), default = TRUE.
#' @return An object of class iCellR.
#' @examples
#' demo.obj <- run.pca(demo.obj, method = "gene.model", gene.list = demo.obj@gene.model)
#'
#' head(demo.obj@pca.data)[1:5]
#'
#' @export
run.pca <- function (x = NULL,
data.type = "main",
method = "base.mean.rank",
top.rank = 500,
plus.log.value = 0.1,
scale.data = TRUE,
gene.list = "character") {
if ("iCellR" != class(x)[1]) {
stop("x should be an object of class iCellR")
}
# geth the genes and scale them based on model
## get main data
if (data.type == "main") {
DATA <- x@main.data
}
if (data.type == "imputed") {
DATA <- x@imputed.data
}
# model base mean rank
if (method == "base.mean.rank") {
raw.data.order <- DATA[ order(rowMeans(DATA), decreasing = TRUE), ]
TopNormLogScale <- head(raw.data.order,top.rank)
if(scale.data == TRUE) {
TopNormLogScale <- log(TopNormLogScale + plus.log.value)
}
# TopNormLogScale <- scale(topGenes)
# TopNormLogScale <- t(TopNormLogScale)
# TopNormLogScale <- as.data.frame(t(scale(TopNormLogScale)))
}
# gene model
if (method == "gene.model") {
if (gene.list[1] == "character") {
stop("please provide gene names for clustering")
} else {
genesForClustering <- gene.list
topGenes <- subset(DATA, rownames(DATA) %in% genesForClustering)
if (data.type == "main") {
TopNormLogScale <- topGenes
if(scale.data == TRUE) {
TopNormLogScale <- log(TopNormLogScale + plus.log.value)
}
if (data.type == "imputed") {
TopNormLogScale <- topGenes
if(scale.data == TRUE) {
TopNormLogScale <- t(scale(t(topGenes)))
# TopNormLogScale <- log(TopNormLogScale + plus.log.value)
}
}
}
# if (batch.norm == TRUE){
# ## new method
# libSiz <- colSums(topGenes)
# norm.facts <- as.numeric(libSiz) / mean(as.numeric(libSiz))
# dataMat <- as.matrix(topGenes)
# normalized <- as.data.frame(sweep(dataMat, 2, norm.facts, `/`))
# TopNormLogScale <- log2(normalized + plus.log.value)
# TopNormLogScale <- normalized
# }
}
}
# Returns
# info
counts.pca <- prcomp(TopNormLogScale, center = FALSE, scale. = FALSE)
attributes(x)$pca.info <- counts.pca
# DATA
dataPCA = data.frame(counts.pca$rotation) # [1:max.dim]
attributes(x)$pca.data <- dataPCA
# optimal
DATA <- counts.pca$sdev
OPTpcs <- mean(DATA)*2
OPTpcs <- (DATA > OPTpcs)
OPTpcs <- length(OPTpcs[OPTpcs==TRUE]) + 1
attributes(x)$opt.pcs <- OPTpcs
# object
return(x)
}
|
# Set up ------------------------------------------------------------------
# set working directory
setwd("~/Documents/Projects/MIDI")
# initiate libraries
library(tidyverse)
library(tuneR)
library(tidytext)
library(markovchain)
# read in reference pitch table
pitch.tbl <- read_csv("midi pitch table.csv")
# find midi tracks
midi <- list.files(path = paste0(getwd(), '/forbidden knowledge'),
pattern="*.mid")
# check which songs you're loading
cat(midi, sep = '\n')
# import midi tracks and convert to single dataframe
songs <- tibble(filename = midi) %>%
mutate(file_contents = map(filename,
~readMidi(paste0(getwd(), '/forbidden knowledge/', .)) %>%
as_tibble() %>%
getMidiNotes()
)) %>%
unnest()
#remove filename obj from workspace
rm(midi)
# TIMING: markov chain --------------------------------------
# remove duplicate rows by time (indicating chords) to get table of note lengths in songs
duration <- songs %>%
distinct(filename, time, length) %>%
ungroup()
# generate transition matrix (note that there is no pitch info here)
time.chain <- markovchainFit(duration$length)
#number of notes per riff
n <- 16
# create empty vector to store the RIFFS
timings <- NULL
# set seed for consistent results
set.seed(666)
# generate new RIFFS
for(i in 1:10){
timings<- c(timings,
c(paste(
markovchainSequence(n = n, markovchain = time.chain$estimate),
collapse=' ')))
}
# Check out the first few
head(timings)
#translate back to table format
timings <- tibble(time = timings) %>%
rowid_to_column('riff') %>%
separate(time,
c(paste0('note_', LETTERS[1:n])),
sep = ' ') %>%
gather(id, length, -riff) %>%
arrange(riff, id)
# PITCH: markov chain --------------------------------------
# concatenate notes together if occuring at same time (aka, chords!)
pitch <- songs %>%
group_by(filename, time) %>%
summarise(pitch = paste0(note, collapse = '_')) %>%
ungroup()
# generate transition matrix (note that there is no duration info here)
note.chain <- markovchainFit(pitch$pitch)
# create empty vector to store the RIFFS
notes <- NULL
# set seed for consistent results
set.seed(666)
# generate new notes
for(i in 1:10){
notes <- c(notes,
c(paste(
markovchainSequence(n = n, markovchain = note.chain$estimate),
collapse=' ')))
}
# Check out the first few
head(notes)
# CREATE RIFF TABLE -------------------------------------------------------
#translate back to table format
riffs <- tibble(notes = notes) %>%
rowid_to_column('riff') %>%
separate(notes,
c(paste0('note_', LETTERS[1:n])),
sep = ' ') %>%
gather(id, note, -riff) %>%
arrange(riff, id) %>%
left_join(timings) %>%
separate(note,
c(paste0('', 1:3)),
sep = "_", ) %>%
gather(pitch.id, midi.note, -riff, -id, -length) %>%
mutate(midi.note = as.numeric(midi.note)) %>%
left_join(pitch.tbl %>%
select(midi.note,
name,
frequency)) %>%
filter(!is.na(frequency)) %>%
gather(info, value, -riff:-pitch.id) %>%
unite(dummy, info, pitch.id) %>%
spread(dummy, value, fill = '')
# JUNK --------------------------------------------------------------------
# i don't know how to convert 'riffs' back to MIDI...
#set tempo event 51: microseconds per quarter note
# this is so dumb im sorry
|
/chainriffs.R
|
no_license
|
areyes13/metal-skynet
|
R
| false
| false
| 3,669
|
r
|
# Set up ------------------------------------------------------------------
# set working directory
setwd("~/Documents/Projects/MIDI")
# initiate libraries
library(tidyverse)
library(tuneR)
library(tidytext)
library(markovchain)
# read in reference pitch table
pitch.tbl <- read_csv("midi pitch table.csv")
# find midi tracks
midi <- list.files(path = paste0(getwd(), '/forbidden knowledge'),
pattern="*.mid")
# check which songs you're loading
cat(midi, sep = '\n')
# import midi tracks and convert to single dataframe
songs <- tibble(filename = midi) %>%
mutate(file_contents = map(filename,
~readMidi(paste0(getwd(), '/forbidden knowledge/', .)) %>%
as_tibble() %>%
getMidiNotes()
)) %>%
unnest()
#remove filename obj from workspace
rm(midi)
# TIMING: markov chain --------------------------------------
# remove duplicate rows by time (indicating chords) to get table of note lengths in songs
duration <- songs %>%
distinct(filename, time, length) %>%
ungroup()
# generate transition matrix (note that there is no pitch info here)
time.chain <- markovchainFit(duration$length)
#number of notes per riff
n <- 16
# create empty vector to store the RIFFS
timings <- NULL
# set seed for consistent results
set.seed(666)
# generate new RIFFS
for(i in 1:10){
timings<- c(timings,
c(paste(
markovchainSequence(n = n, markovchain = time.chain$estimate),
collapse=' ')))
}
# Check out the first few
head(timings)
#translate back to table format
timings <- tibble(time = timings) %>%
rowid_to_column('riff') %>%
separate(time,
c(paste0('note_', LETTERS[1:n])),
sep = ' ') %>%
gather(id, length, -riff) %>%
arrange(riff, id)
# PITCH: markov chain --------------------------------------
# concatenate notes together if occuring at same time (aka, chords!)
pitch <- songs %>%
group_by(filename, time) %>%
summarise(pitch = paste0(note, collapse = '_')) %>%
ungroup()
# generate transition matrix (note that there is no duration info here)
note.chain <- markovchainFit(pitch$pitch)
# create empty vector to store the RIFFS
notes <- NULL
# set seed for consistent results
set.seed(666)
# generate new notes
for(i in 1:10){
notes <- c(notes,
c(paste(
markovchainSequence(n = n, markovchain = note.chain$estimate),
collapse=' ')))
}
# Check out the first few
head(notes)
# CREATE RIFF TABLE -------------------------------------------------------
#translate back to table format
riffs <- tibble(notes = notes) %>%
rowid_to_column('riff') %>%
separate(notes,
c(paste0('note_', LETTERS[1:n])),
sep = ' ') %>%
gather(id, note, -riff) %>%
arrange(riff, id) %>%
left_join(timings) %>%
separate(note,
c(paste0('', 1:3)),
sep = "_", ) %>%
gather(pitch.id, midi.note, -riff, -id, -length) %>%
mutate(midi.note = as.numeric(midi.note)) %>%
left_join(pitch.tbl %>%
select(midi.note,
name,
frequency)) %>%
filter(!is.na(frequency)) %>%
gather(info, value, -riff:-pitch.id) %>%
unite(dummy, info, pitch.id) %>%
spread(dummy, value, fill = '')
# JUNK --------------------------------------------------------------------
# i don't know how to convert 'riffs' back to MIDI...
#set tempo event 51: microseconds per quarter note
# this is so dumb im sorry
|
#Tag processing script for Zac's NN modeling manuscript
library(gRumble)
source('tag_fxns.R')
#set data drive & folders w data
d.dir <- "/Users/jhmoxley/Documents/Biologia & Animales/[[SharkTank]]/data_for_Biologgingwork/Neral_network_datasets_Zac"
#metadata for deployments
#deployment <- "CC_2_24_PR151106"
#fn <- "LOG_CC_2_24_D1"
#datFreq.desired <- 1 #Hz
###THIS MAY ALREADY BE THE ORIGINAL DATA CFW SENT TO ZAC
#deployment <- "11_CATS_Diary_20161101"
#fn <- "20161128-035053-BIO7379_Diary3"
#datFreq.desired <- 1 #Hz
###ISSUE WITH TIME FORMATTING HH:MM:SS:MS.. fucking final colon needs to be a period
#Scratchy fix w/ substr()
#N.B. THIS CATS DATASET HAS AN EXTRA 0 PRECEEDING THE MILLISECONDS AS WELL
#deployment <- "CC_2_24_PR161108"
#fn <- "CC-2-24_PR161108_PR16110703"
datFreq.desired <- 1 #Hz
df <- read.csv(file.path(d.dir, deployment, paste(fn,".csv",sep="")), stringsAsFactors = F)
#substring(df$Time, 9,10) <- "." #reformating for CATS tags ISSUES
#df$Time <- sub(".0", ".", df$Time, fixed = T)
#converting date/time issues
df$dts <- as.POSIXct(paste(df$Date, df$Time), format = "%d.%m.%Y %H:%M:%OS", tz = "UTC")
#df$Date <- as.Date(df$Date, format = "%d.%m.%Y")
(datFreq <- 1/as.numeric(Mode(round(diff(df$dts),4))))
print(paste("Modal sampling frequency of raw data estimated to be ", datFreq, " Hz"))
print(paste("dataset ", ifelse(datFreq == datFreq.desired, "WILL NOT ", "WILL"), "be downsampled"))
##CACLULATE ACCELERATION METRICS PRIOR TO DOWNSAMPLING
#convert accels from what is assumed to be millibar
df$accel.x <- df$Acceleration..2....channel..1/1000
df$accel.y <- df$Acceleration..2....channel..2/1000
df$accel.z <- df$Acceleration..2....channel..3/1000
gees <-data.frame(Gsep(cbind(df$accel.x, df$accel.y, df$accel.z), filt=rep(1, 5*datFreq)/(5*datFreq)))
#downsampling (Should we smooth depth before downsampling??)
df <- df[seq(1, nrow(df), by = datFreq/datFreq.desired),];
#collapse acceleration data
df$accel.x <- collapse(gees$X_Dynamic, datFreq)
df$accel.y <- collapse(gees$Y_Dynamic, datFreq)
df$accel.z <- collapse(gees$Z_Dynamic, datFreq)
df$odba <- collapse(gees$ODBA, datFreq)
#inset time index
df$tidx <- (as.numeric(df$dts)-as.numeric(min(df$dts)))/3600
#update datFreq
datFreq <- datFreq.desired
####
#Unit conversions
####
#convert pressure from what is assumed to be bar
df$depth..m <- df$Pressure...channel..1 /10.197
#2 stage smoothing & VV
df$depth.m <- stats::filter(df$depth..m, filter = rep(1,5*datFreq)/(5*datFreq), sides = 2, circular = T)
df$VV <- c(0, diff(df$depth.m))
df$VV <- stats::filter(df$VV, filter = rep(1,1*datFreq), sides = 2, circular = T) #smooth VV over 1s
#subset data of interest; extract 24 hrs following 1st quaritle
library(dplyr)
plot(df$depth..m[13012:117861], type = "l"); locator(1) #check for tagON/tagOFF
quart.idx <- which.min(abs(df$tidx-quantile(df$tidx, 0.25)))
df2 <- df %>% slice(quart.idx:(quart.idx + 24*60*60)) %>%
select(dts, tidx, depth.m, VV, accel.x, accel.y, accel.z, odba) %>%
mutate(id = deployment)
#write data out
write.csv(df2, file = file.path(file.path(d.dir, paste(deployment,"_NN.csv",sep=""))))
|
/NN_dataprocessing.R
|
no_license
|
JayMox/CC_CamTags
|
R
| false
| false
| 3,125
|
r
|
#Tag processing script for Zac's NN modeling manuscript
library(gRumble)
source('tag_fxns.R')
#set data drive & folders w data
d.dir <- "/Users/jhmoxley/Documents/Biologia & Animales/[[SharkTank]]/data_for_Biologgingwork/Neral_network_datasets_Zac"
#metadata for deployments
#deployment <- "CC_2_24_PR151106"
#fn <- "LOG_CC_2_24_D1"
#datFreq.desired <- 1 #Hz
###THIS MAY ALREADY BE THE ORIGINAL DATA CFW SENT TO ZAC
#deployment <- "11_CATS_Diary_20161101"
#fn <- "20161128-035053-BIO7379_Diary3"
#datFreq.desired <- 1 #Hz
###ISSUE WITH TIME FORMATTING HH:MM:SS:MS.. fucking final colon needs to be a period
#Scratchy fix w/ substr()
#N.B. THIS CATS DATASET HAS AN EXTRA 0 PRECEEDING THE MILLISECONDS AS WELL
#deployment <- "CC_2_24_PR161108"
#fn <- "CC-2-24_PR161108_PR16110703"
datFreq.desired <- 1 #Hz
df <- read.csv(file.path(d.dir, deployment, paste(fn,".csv",sep="")), stringsAsFactors = F)
#substring(df$Time, 9,10) <- "." #reformating for CATS tags ISSUES
#df$Time <- sub(".0", ".", df$Time, fixed = T)
#converting date/time issues
df$dts <- as.POSIXct(paste(df$Date, df$Time), format = "%d.%m.%Y %H:%M:%OS", tz = "UTC")
#df$Date <- as.Date(df$Date, format = "%d.%m.%Y")
(datFreq <- 1/as.numeric(Mode(round(diff(df$dts),4))))
print(paste("Modal sampling frequency of raw data estimated to be ", datFreq, " Hz"))
print(paste("dataset ", ifelse(datFreq == datFreq.desired, "WILL NOT ", "WILL"), "be downsampled"))
##CACLULATE ACCELERATION METRICS PRIOR TO DOWNSAMPLING
#convert accels from what is assumed to be millibar
df$accel.x <- df$Acceleration..2....channel..1/1000
df$accel.y <- df$Acceleration..2....channel..2/1000
df$accel.z <- df$Acceleration..2....channel..3/1000
gees <-data.frame(Gsep(cbind(df$accel.x, df$accel.y, df$accel.z), filt=rep(1, 5*datFreq)/(5*datFreq)))
#downsampling (Should we smooth depth before downsampling??)
df <- df[seq(1, nrow(df), by = datFreq/datFreq.desired),];
#collapse acceleration data
df$accel.x <- collapse(gees$X_Dynamic, datFreq)
df$accel.y <- collapse(gees$Y_Dynamic, datFreq)
df$accel.z <- collapse(gees$Z_Dynamic, datFreq)
df$odba <- collapse(gees$ODBA, datFreq)
#inset time index
df$tidx <- (as.numeric(df$dts)-as.numeric(min(df$dts)))/3600
#update datFreq
datFreq <- datFreq.desired
####
#Unit conversions
####
#convert pressure from what is assumed to be bar
df$depth..m <- df$Pressure...channel..1 /10.197
#2 stage smoothing & VV
df$depth.m <- stats::filter(df$depth..m, filter = rep(1,5*datFreq)/(5*datFreq), sides = 2, circular = T)
df$VV <- c(0, diff(df$depth.m))
df$VV <- stats::filter(df$VV, filter = rep(1,1*datFreq), sides = 2, circular = T) #smooth VV over 1s
#subset data of interest; extract 24 hrs following 1st quaritle
library(dplyr)
plot(df$depth..m[13012:117861], type = "l"); locator(1) #check for tagON/tagOFF
quart.idx <- which.min(abs(df$tidx-quantile(df$tidx, 0.25)))
df2 <- df %>% slice(quart.idx:(quart.idx + 24*60*60)) %>%
select(dts, tidx, depth.m, VV, accel.x, accel.y, accel.z, odba) %>%
mutate(id = deployment)
#write data out
write.csv(df2, file = file.path(file.path(d.dir, paste(deployment,"_NN.csv",sep=""))))
|
#######################
# XGBOOST weighted 14 #
#######################
# Clear the workspace
rm(list=ls())
# Set working directory
setwd("C:/Users/Tom/Documents/Kaggle/Santander")
# Load the required libraries
library(data.table)
library(bit64)
library(xgboost)
library(stringr)
# Submission date and file name
submissionDate <- "09-12-2016"
loadFile <- "xgboost weighted trainAll 14, ecue jun15 1.4 apr15 0, linear increase jun15 times6 back 15-0 no zeroing, exponential normalisation joint"
submissionFile <- "xgboost weighted trainAll 14 nom pens swap nomina, ecue jun15 1.4 apr15 0, linear increase jun15 times6 back 15-0 no zeroing, exponential normalisation joint"
# Target date
targetDate <- "12-11-2016"
# Target train model folders
trainModelsFolder <- "trainTrainAll Top 100 monthProduct"
trainAll <- grepl("TrainAll", trainModelsFolder)
# Target feature files folder
testFeaturesFolder <- "testNoStagnantRemoval"
# Option to store the product predictions
loadPredictions <- TRUE # If loadPredictions TRUE...
loadBaseModelPredictions <- TRUE # ... loadBaseModelPredictions is ignored
savePredictions <- TRUE
saveBaseModelPredictions <- TRUE
savePredictionsBeforeNormalisation <- TRUE
# Option to drop models that were trained on a subset of the data
dropFoldModels <- TRUE
foldRelativeWeight <- 0.8
# Option to drop bootstrap models
dropBootModels <- TRUE
# Use the relative frequency of the different products in June 2016
normalizeProdProbs <- TRUE
normalizeMode <- c("additive", "linear", "exponential")[3]
additiveNormalizeProds <- NULL #c("ind_cco_fin_ult1")
fractionPosFlankUsers <- 0.035114
expectedCountPerPosFlank <- 1.25
# Marginal normalisation approach - not considered if trainAll
marginalNormalisation <- c("linear", "exponential")[2]
# List the total product weights over all months
weightSum <- 1 # sum(monthsBackModelsWeights)
# Swap nomina and nom pens in rank if they are both not owned in the previous
# period and if the rank of nomina > rank of nom_pens
nomPensAboveNominaBothNotOwned <- TRUE
# Option to predict a subset of the test data
predictSubset <- FALSE
# predictSubsetCount <- 5e4
# Prediction subfolder
predictionsFolder <- "Predictions"
# Zero probability target variable names
zeroTargets <- NULL
# zeroTargets <- c("ind_deco_fin_ult1", "ind_dela_fin_ult1")
# zeroTargets <- c("ind_deco_fin_ult1", "ind_dela_fin_ult1",
# "ind_deme_fin_ult1", "ind_fond_fin_ult1")
# Source the exponential normalisation and weights extraction
source("Common/exponentialNormaliser.R")
source("Common/getModelWeights.R")
# Load the target product weights
dateTargetWeights <- readRDS(file.path(getwd(), "Model weights", targetDate,
"model weights first.rds"))
######################################################################
# Create predictions subfolder
# Create the target folder if it does not exist yet
predictionsPath <- file.path(getwd(), "Submission", submissionDate,
predictionsFolder)
dir.create(predictionsPath, showWarnings = FALSE)
# Create model predictions subfolder
if(saveBaseModelPredictions){
baseModelPredictionsPath <- file.path(predictionsPath, submissionFile)
dir.create(baseModelPredictionsPath, showWarnings = FALSE)
}
if(loadBaseModelPredictions){
baseModelPredictionsPath <- file.path(predictionsPath, loadFile)
}
if(loadPredictions){
rawPredictionsPath <- file.path(predictionsPath,
paste0("prevNorm", loadFile, ".rds"))
} else{
rawPredictionsPath <- file.path(predictionsPath,
paste0("prevNorm", submissionFile, ".rds"))
}
# Extract clients with positive flanks
posFlankClientsFn <- file.path(getwd(), "Feature engineering", targetDate,
"positive flank clients.rds")
posFlankClients <- readRDS(posFlankClientsFn)
# Path to the xgboost train models
modelsBasePath <- file.path(getwd(), "First level learners", targetDate,
trainModelsFolder)
modelGroups <- list.dirs(modelsBasePath)[-1]
modelGroups <- modelGroups[!grepl("Manual tuning", modelGroups)]
modelGroups <- modelGroups[!grepl("no fold BU", modelGroups)] #[-c(6,7)]
nbModelGroups <- length(modelGroups)
# Construct a data table with information on the base models: the number of
# months back, the weight, the target variable and the path to the model
baseModelInfo <- NULL
baseModels <- list()
for(i in 1:nbModelGroups){
# List the files in the considered model group
modelGroup <- modelGroups[i]
slashPositions <- gregexpr("\\/", modelGroup)[[1]]
modelGroupExtension <- substring(modelGroup,
1 + slashPositions[length(slashPositions)])
modelGroupFiles <- list.files(modelGroup)
modelGroupFiles <- modelGroupFiles[!grepl("no fold BU", modelGroupFiles)]
# Option to drop folds of model group files (trained on a subset of the
# train data)
if(dropFoldModels){
modelGroupFiles <- modelGroupFiles[!grepl("Fold", modelGroupFiles)]
}
# Option to drop bootstrap model replicates
if(dropBootModels){
modelGroupFiles <- modelGroupFiles[!grepl("Boot", modelGroupFiles)]
}
nbModels <- length(modelGroupFiles)
monthsBack <- suppressWarnings(
as.numeric(substring(gsub("Lag.*$", "", modelGroupExtension), 5)))
lag <- suppressWarnings(as.numeric(gsub("^.*Lag", "", modelGroupExtension)))
# relativeWeightOrig <- monthsBackModelsWeights[match(monthsBack,
# monthsBackModels)]
# weightDate <- monthsBackWeightDates[match(monthsBack, monthsBackModels)]
# Loop over all models
if(nbModels>0){
for(j in 1:nbModels){
modelGroupFile <- modelGroupFiles[j]
modelInfo <- readRDS(file.path(modelGroup, modelGroupFile))
targetProduct <- modelInfo$targetVar
# Load the product - month weight
relativeWeight <- getModelWeights(monthsBack, targetProduct,
dateTargetWeights)
# Calculate the fold model weight
isFold <- grepl("Fold", modelGroupFile)
# Adjust fold weights because some models didn't store the fifth fold
prodMonthFiles <- modelGroupFiles[grepl(targetProduct, modelGroupFiles)]
nbFoldsProd <- sum(grepl("Fold", prodMonthFiles))
prodMonthFiles <- modelGroupFiles[grepl(targetProduct, modelGroupFiles)]
nbFoldsProd <- sum(grepl("Fold", prodMonthFiles))
foldBaseWeight <- foldRelativeWeight * 4 / nbFoldsProd
if(!is.finite(foldBaseWeight)){
foldBaseWeight <- 0
}
productMonthSum <- 1 + nbFoldsProd*foldBaseWeight
if(isFold){
# Adjust fold weights because some models didn't store the fifth fold
foldModelWeight <- foldBaseWeight/productMonthSum
} else{
foldModelWeight <- 1/productMonthSum
}
# Append the model info
baseModelInfo <- rbind(baseModelInfo,
data.table(
modelGroupExtension = modelGroupExtension,
targetProduct = targetProduct,
monthsBack = monthsBack,
modelLag = lag,
relativeWeight = relativeWeight * foldModelWeight)
)
baseModels <- c(baseModels, list(modelInfo))
}
}
}
baseModelInfo[, modelId := 1:nrow(baseModelInfo)]
# Extract the number of marginal/joint/conditional lags and months back
# Set the base model info to default settings when the base models are
# trained over multiple month periods
if(all(is.na(baseModelInfo$modelLag))){
nbGroups <- length(unique(baseModelInfo$modelGroupExtension))
baseModelInfo <- baseModelInfo[order(targetProduct), ]
# baseModelInfo$monthsBack <- -(1:nbGroups)
baseModelInfo$modelLag <- 5
baseModelInfo$relativeWeight <- 1
monthsBackLags <- rep(defaultTestLag, nbGroups)
nbMarginalLags <- length(monthsBackLags)
nbConditionalLags <- 1
} else{
monthsBackLags <- rev(sort(unique(baseModelInfo$modelLag)))
nbMarginalLags <- length(monthsBackLags)
nbConditionalLags <- length(monthsBackLags)
}
# Normalize the base model weights (necessary since some weights might be set
# to zero)
uniqueBaseModels <- sort(unique(baseModelInfo$targetProduct))
for(i in 1:length(uniqueBaseModels)){
productIds <- baseModelInfo$targetProduct==uniqueBaseModels[i]
productWeightSum <- baseModelInfo[productIds, sum(relativeWeight)]
normalizeWeightRatio <- weightSum/productWeightSum
baseModelInfo[productIds, relativeWeight := relativeWeight*
normalizeWeightRatio]
}
baseModelInfo <- baseModelInfo[order(monthsBack), ]
# Extract the base model names
baseModelNames <- unique(baseModelInfo[monthsBack==0, targetProduct])
# baseModels <- list.files(modelsPath)
# baseModelNames <- gsub("[.]rds$", "", baseModels)
# allModels <- lapply(baseModels, function(x) readRDS(file.path(modelsPath, x)))
# names(allModels) <- baseModelNames
# Load the test data with lag one
testDataLag <- readRDS(file.path(getwd(), "Feature engineering", targetDate,
testFeaturesFolder, "Lag1 features.rds"))
# Optionally subset the test data
if(predictSubset){
predictSubsetIds <- sort(sample(1:nrow(testDataLag), predictSubsetCount))
testDataLag <- testDataLag[predictSubsetIds]
}
# Calculate which test records had at least one positive flank
testDataPosFlank <- testDataLag$ncodpers %in% posFlankClients
# Load the validation data in order to know how to rearrange the target columns
trainFn <- "train/Back15Lag1 features.rds"
colOrderData <- readRDS(file.path(getwd(), "Feature engineering",
targetDate, trainFn))
targetCols <- grep("^ind_.*_ult1$", names(colOrderData), value=TRUE)
rm(colOrderData)
gc()
nbBaseModels <- length(targetCols)
# Load the estimated relative count contributions
countContributions <- readRDS(file.path(getwd(), "Feature engineering",
targetDate,
# "monthlyMAPContributions.rds"))
"monthlyRelativeProductCounts.rds"))
# Predict if there will be any positive flanks
if(!trainAll){
posFlankModelInfo <- baseModelInfo[targetProduct=="hasNewProduct"]
newProdPredictions <- rep(0, nrow(testDataLag))
if(nrow(posFlankModelInfo) != nbMarginalLags) browser()
for(i in 1:nbMarginalLags){
# Show progress message
cat("Generating new product predictions for lag", i, "of", nbMarginalLags,
"\n")
lag <- posFlankModelInfo[i, modelLag]
weight <- posFlankModelInfo[i, relativeWeight]
newProdModel <- baseModels[[posFlankModelInfo[i, modelId]]]
# Load the test data with the appropriate lag
testDataLag <- readRDS(file.path(getwd(), "Feature engineering", targetDate,
testFeaturesFolder,
paste0("Lag", lag, " features.rds")))
# Optionally subset the test data
if(predictSubset){
testDataLag <- testDataLag[predictSubsetIds]
}
predictorData <- testDataLag[, newProdModel$predictors, with=FALSE]
predictorDataM <- data.matrix(predictorData)
rm(predictorData)
gc()
newProdPredictionsLag <- predict(newProdModel$model, predictorDataM)
newProdPredictions <- newProdPredictions + newProdPredictionsLag*weight
}
# Rescale the weighted sum to the [0, 1] interval
newProdPredictions <- newProdPredictions/weightSum
# Calculate the mean predictions depending on the May 2015 flag
meanGroupPredsMayFlag <-
c(mean(newProdPredictions[testDataLag$hasMay15Data==0]),
mean(newProdPredictions[testDataLag$hasMay15Data==1]))
# Calculate the mean predictions depending on the hasAnyPosFlank flag
meanGroupPredsPosFlank <- c(mean(newProdPredictions[!testDataPosFlank]),
mean(newProdPredictions[testDataPosFlank]))
# Compare the number of expected positive flanks versus the extrapolated
# public leaderboard counts
expectedPosFlanks <- sum(newProdPredictions)
leaderboardPosFlanks <- fractionPosFlankUsers*nrow(testDataLag)
normalisedProbRatio <- leaderboardPosFlanks/expectedPosFlanks
cat("Expected/leaderboard positive flank ratio",
round(1/normalisedProbRatio, 2), "\n")
# Normalize the marginal probabilities such that the expected number of
# products with a positive flanks matches the extrapolated public leaderboard
# count
if(marginalNormalisation == "linear"){
newProdPredictions <- newProdPredictions * normalisedProbRatio
} else{
newProdPredictions <- probExponentNormaliser(newProdPredictions,
normalisedProbRatio)
}
} else{
newProdPredictions <- rep(1, nrow(testDataLag))
}
# Optionally load the predictions before normalisation if they are available
if(loadPredictions && file.exists(rawPredictionsPath)){
allPredictions <- readRDS(rawPredictionsPath)
} else{
# Loop over all lags and base models
allPredictions <- NULL
for(lagId in 1:nbConditionalLags){
# Show progress message
cat("\nGenerating positive flank predictions for lag", lagId, "of",
nbConditionalLags, "@", as.character(Sys.time()), "\n\n")
# Set the lag weight and the number of train months back
lag <- monthsBackLags[lagId]
# monthsBack <- monthsBackModels[lagId]
# Load the test data with the appropriate lag
testDataLag <- readRDS(file.path(getwd(), "Feature engineering", targetDate,
testFeaturesFolder,
paste0("Lag", lag, " features.rds")))
# Optionally subset the test data
if(predictSubset){
testDataLag <- testDataLag[predictSubsetIds]
}
for(i in 1:nbBaseModels){
# Extract the target column
targetVar <- targetCols[i]
targetModelIds <- baseModelInfo[targetProduct==targetVar &
modelLag==lag, modelId]
# Show progress message
cat("Generating test predictions for model", i, "of", nbBaseModels, "\n")
# Optionally, load the base model predictions
if(exists("baseModelPredictionsPath")){
baseModelPredPath <- file.path(baseModelPredictionsPath,
paste0(targetVar, " Lag ", lag, ".rds"))
} else{
baseModelPredPath <- ""
}
foldWeights <- baseModelInfo[modelId %in% targetModelIds,
relativeWeight]
weight <- sum(foldWeights)
loadFileExists <- file.exists(baseModelPredPath)
if(loadBaseModelPredictions && loadFileExists){
predictionsDT <- readRDS(baseModelPredPath)
} else{
# Set the predictions to zero if the target variable is in the zeroed
# list
if(targetVar %in% zeroTargets || weight <= 0){
predictions <- rep(0, nrow(testDataLag))
} else{
nbTargetModelFolds <- length(targetModelIds)
foldPredictions <- rep(0, nrow(testDataLag))
alreadyOwned <- is.na(testDataLag[[paste0(targetVar, "Lag1")]]) |
testDataLag[[paste0(targetVar, "Lag1")]] == 1
# Extract predictors data from the features data
predictorData <-
testDataLag[!alreadyOwned,
baseModels[[targetModelIds[1]]]$predictors, with=FALSE]
# Convert the predictor data to a matrix
predictorDataM <- data.matrix(predictorData)
rm(predictorData)
gc()
for(fold in 1:nbTargetModelFolds){
targetModelId <- targetModelIds[fold]
# Loop over all folds and sum the predictions
targetModel <- baseModels[[targetModelId]]
# Extract the model weight
weightFold <- foldWeights[fold]
# if(weight == 0) browser()
# Another check that we are using the right model
# Better safe than sorry :)
if(targetModel$targetVar != targetVar) browser()
# Calculate the test predictions
predictionsPrevNotOwnedFold <- predict(targetModel$model,
predictorDataM)
foldPredictions[!alreadyOwned] <- foldPredictions[!alreadyOwned] +
predictionsPrevNotOwnedFold*weightFold
}
# if(targetVar == "ind_reca_fin_ult1") browser()
predictions <- foldPredictions/weight
# Set the predictions to 0 for products that are already owned
# predictions[alreadyOwned] <- -runif(sum(alreadyOwned))
predictions[alreadyOwned] <- 0
}
# The mean prediction should equal the mean map contribution if the
# predictions are set to zero for the already owned products
# mean(predictions)/mapContributions[17, i]
# Add the predictions to the data table with all target predictions
predictionsDT <- data.table(ncodpers = testDataLag$ncodpers,
predictions = predictions,
product = targetVar)
}
predictionsDT[, weightedPrediction :=
predictionsDT$predictions*weight]
# if(targetVar == "ind_reca_fin_ult1") browser()
# c(lag, sum(predictionsDT$predictions), sum(testDataLag[[19+24*(16-lag)]], na.rm=T))
if(targetVar %in% allPredictions$product){
allPredictions[product==targetVar, weightedPrediction:=
weightedPrediction +
predictionsDT$weightedPrediction]
} else{
allPredictions <- rbind(allPredictions, predictionsDT)
}
# Save the base model predictions
if(saveBaseModelPredictions && (!loadBaseModelPredictions ||
(loadBaseModelPredictions &&
!loadFileExists))){
predictionsDT[, weightedPrediction:=NULL]
saveRDS(predictionsDT, baseModelPredPath)
}
}
}
# Divide the weighted summed predictions by the weight sum
allPredictions[, prediction := weightedPrediction / weightSum]
allPredictions[, weightedPrediction := NULL]
allPredictions[, predictions := NULL]
# meanConditionalProb <- mean(allPredictions$prediction)*24
# Save the predictions to the predictions folder before normalisation
if(savePredictionsBeforeNormalisation){
saveRDS(allPredictions, file=rawPredictionsPath)
}
}
# Optionally, multiply the predictions by the relative count ratio of June 2016
probMultipliers <- rep(NA, nbBaseModels)
if(normalizeProdProbs){
for(i in 1:nbBaseModels){
# Show progress message
cat("Normalizing product predictions", i, "of", nbBaseModels, "\n")
# Extract the target column
targetVar <- targetCols[i]
# Look up if the target variable was already owned
alreadyOwned <- is.na(testDataLag[[paste0(targetVar, "Lag1")]]) |
testDataLag[[paste0(targetVar, "Lag1")]] == 1
predictions <- allPredictions[product==targetVar, prediction]
predictionsPrevNotOwned <- predictions[!alreadyOwned]
if(suppressWarnings(max(predictions[alreadyOwned]))>0) browser()
# Normalize the predicted probabilities
predictedPosFlankCount <- sum(predictionsPrevNotOwned *
newProdPredictions[!alreadyOwned])
probMultiplier <- nrow(testDataLag) * fractionPosFlankUsers *
expectedCountPerPosFlank * countContributions[17, i] /
predictedPosFlankCount
probMultipliers[i] <- probMultiplier
if(i %in% c(3, 5, 7, 13, 18, 19, 22, 23, 24)) browser()
if(is.finite(probMultiplier)){
if(normalizeMode == "additive" || targetVar %in% additiveNormalizeProds){
predictions[!alreadyOwned] <- predictions[!alreadyOwned] +
(probMultiplier-1)*mean(predictions[!alreadyOwned])
} else{
if(normalizeMode == "linear"){
predictions[!alreadyOwned] <- predictions[!alreadyOwned] *
probMultiplier
} else{
predictions[!alreadyOwned] <- probExponentNormaliser(
predictions[!alreadyOwned], probMultiplier,
weights=newProdPredictions[!alreadyOwned])
}
}
# Update the predictions in allPredictions
allPredictions[product==targetVar, prediction:=predictions]
}
}
}
# Order the predicted probabilities for all products by client
setkey(allPredictions, ncodpers)
allPredictions[,order_predict := match(1:length(prediction),
order(-prediction)), by=ncodpers]
allPredictions <- allPredictions[order(ncodpers, -prediction), ]
# Swap nomina and nom pens in rank if they are both not owned in the previous
# period and if the rank of nomina > rank of nom_pens
if(nomPensAboveNominaBothNotOwned){
# Find users where the rank of nomina < rank of nom pens and both prob not
# zero
ncodpers <- unique(allPredictions$ncodpers)
nominaProb <- allPredictions[product == "ind_nomina_ult1", prediction]
nominaProbRank <- allPredictions[product == "ind_nomina_ult1", order_predict]
nomPensProb <- allPredictions[product == "ind_nom_pens_ult1", prediction]
nomPensProbRank <- allPredictions[product == "ind_nom_pens_ult1", order_predict]
swapIds <- nominaProb>0 & nomPensProb>0 & nominaProb>nomPensProb
swapNcodPers <- ncodpers[swapIds]
allPredictions[ncodpers %in% swapNcodPers & product == "ind_nomina_ult1",
order_predict := nomPensProbRank[swapIds]]
allPredictions[ncodpers %in% swapNcodPers & product == "ind_nom_pens_ult1",
order_predict := nominaProbRank[swapIds]]
}
# Make sure that the order of the predictions is unique for each client
orderCount <- allPredictions[, .N, .(ncodpers, order_predict)]
if(max(orderCount$N)>1) browser()
# Show the confidence in the top prediction
hist(allPredictions[order_predict==1, prediction])
# Calculate the top predicted products counts
topPredictions <- allPredictions[order_predict==1, .N, product]
topPredictions <- topPredictions[order(-N)]
topPredictionsPosFlanks <- allPredictions[order_predict==1 &
ncodpers %in% posFlankClients,
.N, product]
topPredictionsPosFlanks <- topPredictionsPosFlanks[order(-N)]
# Study the ranking of specific products
productRankDelaFin <- allPredictions[product=="ind_dela_fin_ult1", .N,
order_predict]
productRankDelaFin <- productRankDelaFin[order(order_predict),]
productRankDecoFin <- allPredictions[product=="ind_deco_fin_ult1", .N,
order_predict]
productRankDecoFin <- productRankDecoFin[order(order_predict),]
productRankTjcrFin <- allPredictions[product=="ind_tjcr_fin_ult1", .N,
order_predict]
productRankTjcrFin <- productRankTjcrFin[order(order_predict),]
productRankRecaFin <- allPredictions[product=="ind_reca_fin_ult1", .N,
order_predict]
productRankRecaFin <- productRankRecaFin[order(order_predict),]
# Verify that the mean prediction aligns with the relative June 15 ratio
allPredictions[, totalProb := prediction * rep(newProdPredictions,
each = nbBaseModels)]
meanProductProbs <- allPredictions[, .(meanCondProb = mean(prediction),
meanProb = mean(totalProb),
totalProb = sum(totalProb)), product]
meanProductProbs <- meanProductProbs[order(-meanProb), ]
# Combine the top seven products to a string vector
productString <- paste(allPredictions[order_predict==1, product],
allPredictions[order_predict==2, product],
allPredictions[order_predict==3, product],
allPredictions[order_predict==4, product],
allPredictions[order_predict==5, product],
allPredictions[order_predict==6, product],
allPredictions[order_predict==7, product])
# Check for ties in the ordering (should not occur)
if(length(productString) != nrow(testDataLag)) browser()
# Add the id and top 7 to the submission file
submission <- data.frame(ncodpers = testDataLag$ncodpers,
added_products = productString)
# Extract template submission file
paddedSubmission <- fread("Data/sample_submission.csv")
# Set the added products to an empty character string
paddedSubmission[, added_products := ""]
# Replace the matched ids in padded submission by the combined submission file
matchIds <- match(submission$ncodpers, paddedSubmission$ncodpers)
paddedSubmission[matchIds, added_products := submission$added_products]
# Write the padded submission to a csv file
write.csv(paddedSubmission, file.path(getwd(), "Submission", submissionDate,
paste0(submissionFile, ".csv")),
row.names = FALSE)
# Save the predictions to the predictions folder
if(savePredictions){
saveRDS(allPredictions, file=file.path(predictionsPath,
paste0(submissionFile, ".rds")))
}
# Display the successful submission message
cat("Submission file created successfully!\n",
nrow(submission)," records were predicted (",
round(nrow(submission)/nrow(paddedSubmission)*100,2), "%)\n", sep="")
|
/Submission/09-12-2016/xgboost weighted 14.R
|
no_license
|
PetrShypila/Santander-Product-Recommendation
|
R
| false
| false
| 25,698
|
r
|
#######################
# XGBOOST weighted 14 #
#######################
# Clear the workspace
rm(list=ls())
# Set working directory
setwd("C:/Users/Tom/Documents/Kaggle/Santander")
# Load the required libraries
library(data.table)
library(bit64)
library(xgboost)
library(stringr)
# Submission date and file name
submissionDate <- "09-12-2016"
loadFile <- "xgboost weighted trainAll 14, ecue jun15 1.4 apr15 0, linear increase jun15 times6 back 15-0 no zeroing, exponential normalisation joint"
submissionFile <- "xgboost weighted trainAll 14 nom pens swap nomina, ecue jun15 1.4 apr15 0, linear increase jun15 times6 back 15-0 no zeroing, exponential normalisation joint"
# Target date
targetDate <- "12-11-2016"
# Target train model folders
trainModelsFolder <- "trainTrainAll Top 100 monthProduct"
trainAll <- grepl("TrainAll", trainModelsFolder)
# Target feature files folder
testFeaturesFolder <- "testNoStagnantRemoval"
# Option to store the product predictions
loadPredictions <- TRUE # If loadPredictions TRUE...
loadBaseModelPredictions <- TRUE # ... loadBaseModelPredictions is ignored
savePredictions <- TRUE
saveBaseModelPredictions <- TRUE
savePredictionsBeforeNormalisation <- TRUE
# Option to drop models that were trained on a subset of the data
dropFoldModels <- TRUE
foldRelativeWeight <- 0.8
# Option to drop bootstrap models
dropBootModels <- TRUE
# Use the relative frequency of the different products in June 2016
normalizeProdProbs <- TRUE
normalizeMode <- c("additive", "linear", "exponential")[3]
additiveNormalizeProds <- NULL #c("ind_cco_fin_ult1")
fractionPosFlankUsers <- 0.035114
expectedCountPerPosFlank <- 1.25
# Marginal normalisation approach - not considered if trainAll
marginalNormalisation <- c("linear", "exponential")[2]
# List the total product weights over all months
weightSum <- 1 # sum(monthsBackModelsWeights)
# Swap nomina and nom pens in rank if they are both not owned in the previous
# period and if the rank of nomina > rank of nom_pens
nomPensAboveNominaBothNotOwned <- TRUE
# Option to predict a subset of the test data
predictSubset <- FALSE
# predictSubsetCount <- 5e4
# Prediction subfolder
predictionsFolder <- "Predictions"
# Zero probability target variable names
zeroTargets <- NULL
# zeroTargets <- c("ind_deco_fin_ult1", "ind_dela_fin_ult1")
# zeroTargets <- c("ind_deco_fin_ult1", "ind_dela_fin_ult1",
# "ind_deme_fin_ult1", "ind_fond_fin_ult1")
# Source the exponential normalisation and weights extraction
source("Common/exponentialNormaliser.R")
source("Common/getModelWeights.R")
# Load the target product weights
dateTargetWeights <- readRDS(file.path(getwd(), "Model weights", targetDate,
"model weights first.rds"))
######################################################################
# Create predictions subfolder
# Create the target folder if it does not exist yet
predictionsPath <- file.path(getwd(), "Submission", submissionDate,
predictionsFolder)
dir.create(predictionsPath, showWarnings = FALSE)
# Create model predictions subfolder
if(saveBaseModelPredictions){
baseModelPredictionsPath <- file.path(predictionsPath, submissionFile)
dir.create(baseModelPredictionsPath, showWarnings = FALSE)
}
if(loadBaseModelPredictions){
baseModelPredictionsPath <- file.path(predictionsPath, loadFile)
}
if(loadPredictions){
rawPredictionsPath <- file.path(predictionsPath,
paste0("prevNorm", loadFile, ".rds"))
} else{
rawPredictionsPath <- file.path(predictionsPath,
paste0("prevNorm", submissionFile, ".rds"))
}
# Extract clients with positive flanks
posFlankClientsFn <- file.path(getwd(), "Feature engineering", targetDate,
"positive flank clients.rds")
posFlankClients <- readRDS(posFlankClientsFn)
# Path to the xgboost train models
modelsBasePath <- file.path(getwd(), "First level learners", targetDate,
trainModelsFolder)
modelGroups <- list.dirs(modelsBasePath)[-1]
modelGroups <- modelGroups[!grepl("Manual tuning", modelGroups)]
modelGroups <- modelGroups[!grepl("no fold BU", modelGroups)] #[-c(6,7)]
nbModelGroups <- length(modelGroups)
# Construct a data table with information on the base models: the number of
# months back, the weight, the target variable and the path to the model
baseModelInfo <- NULL
baseModels <- list()
for(i in 1:nbModelGroups){
# List the files in the considered model group
modelGroup <- modelGroups[i]
slashPositions <- gregexpr("\\/", modelGroup)[[1]]
modelGroupExtension <- substring(modelGroup,
1 + slashPositions[length(slashPositions)])
modelGroupFiles <- list.files(modelGroup)
modelGroupFiles <- modelGroupFiles[!grepl("no fold BU", modelGroupFiles)]
# Option to drop folds of model group files (trained on a subset of the
# train data)
if(dropFoldModels){
modelGroupFiles <- modelGroupFiles[!grepl("Fold", modelGroupFiles)]
}
# Option to drop bootstrap model replicates
if(dropBootModels){
modelGroupFiles <- modelGroupFiles[!grepl("Boot", modelGroupFiles)]
}
nbModels <- length(modelGroupFiles)
monthsBack <- suppressWarnings(
as.numeric(substring(gsub("Lag.*$", "", modelGroupExtension), 5)))
lag <- suppressWarnings(as.numeric(gsub("^.*Lag", "", modelGroupExtension)))
# relativeWeightOrig <- monthsBackModelsWeights[match(monthsBack,
# monthsBackModels)]
# weightDate <- monthsBackWeightDates[match(monthsBack, monthsBackModels)]
# Loop over all models
if(nbModels>0){
for(j in 1:nbModels){
modelGroupFile <- modelGroupFiles[j]
modelInfo <- readRDS(file.path(modelGroup, modelGroupFile))
targetProduct <- modelInfo$targetVar
# Load the product - month weight
relativeWeight <- getModelWeights(monthsBack, targetProduct,
dateTargetWeights)
# Calculate the fold model weight
isFold <- grepl("Fold", modelGroupFile)
# Adjust fold weights because some models didn't store the fifth fold
prodMonthFiles <- modelGroupFiles[grepl(targetProduct, modelGroupFiles)]
nbFoldsProd <- sum(grepl("Fold", prodMonthFiles))
prodMonthFiles <- modelGroupFiles[grepl(targetProduct, modelGroupFiles)]
nbFoldsProd <- sum(grepl("Fold", prodMonthFiles))
foldBaseWeight <- foldRelativeWeight * 4 / nbFoldsProd
if(!is.finite(foldBaseWeight)){
foldBaseWeight <- 0
}
productMonthSum <- 1 + nbFoldsProd*foldBaseWeight
if(isFold){
# Adjust fold weights because some models didn't store the fifth fold
foldModelWeight <- foldBaseWeight/productMonthSum
} else{
foldModelWeight <- 1/productMonthSum
}
# Append the model info
baseModelInfo <- rbind(baseModelInfo,
data.table(
modelGroupExtension = modelGroupExtension,
targetProduct = targetProduct,
monthsBack = monthsBack,
modelLag = lag,
relativeWeight = relativeWeight * foldModelWeight)
)
baseModels <- c(baseModels, list(modelInfo))
}
}
}
baseModelInfo[, modelId := 1:nrow(baseModelInfo)]
# Extract the number of marginal/joint/conditional lags and months back
# Set the base model info to default settings when the base models are
# trained over multiple month periods
if(all(is.na(baseModelInfo$modelLag))){
nbGroups <- length(unique(baseModelInfo$modelGroupExtension))
baseModelInfo <- baseModelInfo[order(targetProduct), ]
# baseModelInfo$monthsBack <- -(1:nbGroups)
baseModelInfo$modelLag <- 5
baseModelInfo$relativeWeight <- 1
monthsBackLags <- rep(defaultTestLag, nbGroups)
nbMarginalLags <- length(monthsBackLags)
nbConditionalLags <- 1
} else{
monthsBackLags <- rev(sort(unique(baseModelInfo$modelLag)))
nbMarginalLags <- length(monthsBackLags)
nbConditionalLags <- length(monthsBackLags)
}
# Normalize the base model weights (necessary since some weights might be set
# to zero)
uniqueBaseModels <- sort(unique(baseModelInfo$targetProduct))
for(i in 1:length(uniqueBaseModels)){
productIds <- baseModelInfo$targetProduct==uniqueBaseModels[i]
productWeightSum <- baseModelInfo[productIds, sum(relativeWeight)]
normalizeWeightRatio <- weightSum/productWeightSum
baseModelInfo[productIds, relativeWeight := relativeWeight*
normalizeWeightRatio]
}
baseModelInfo <- baseModelInfo[order(monthsBack), ]
# Extract the base model names
baseModelNames <- unique(baseModelInfo[monthsBack==0, targetProduct])
# baseModels <- list.files(modelsPath)
# baseModelNames <- gsub("[.]rds$", "", baseModels)
# allModels <- lapply(baseModels, function(x) readRDS(file.path(modelsPath, x)))
# names(allModels) <- baseModelNames
# Load the test data with lag one
testDataLag <- readRDS(file.path(getwd(), "Feature engineering", targetDate,
testFeaturesFolder, "Lag1 features.rds"))
# Optionally subset the test data
if(predictSubset){
predictSubsetIds <- sort(sample(1:nrow(testDataLag), predictSubsetCount))
testDataLag <- testDataLag[predictSubsetIds]
}
# Calculate which test records had at least one positive flank
testDataPosFlank <- testDataLag$ncodpers %in% posFlankClients
# Load the validation data in order to know how to rearrange the target columns
trainFn <- "train/Back15Lag1 features.rds"
colOrderData <- readRDS(file.path(getwd(), "Feature engineering",
targetDate, trainFn))
targetCols <- grep("^ind_.*_ult1$", names(colOrderData), value=TRUE)
rm(colOrderData)
gc()
nbBaseModels <- length(targetCols)
# Load the estimated relative count contributions
countContributions <- readRDS(file.path(getwd(), "Feature engineering",
targetDate,
# "monthlyMAPContributions.rds"))
"monthlyRelativeProductCounts.rds"))
# Predict if there will be any positive flanks
if(!trainAll){
posFlankModelInfo <- baseModelInfo[targetProduct=="hasNewProduct"]
newProdPredictions <- rep(0, nrow(testDataLag))
if(nrow(posFlankModelInfo) != nbMarginalLags) browser()
for(i in 1:nbMarginalLags){
# Show progress message
cat("Generating new product predictions for lag", i, "of", nbMarginalLags,
"\n")
lag <- posFlankModelInfo[i, modelLag]
weight <- posFlankModelInfo[i, relativeWeight]
newProdModel <- baseModels[[posFlankModelInfo[i, modelId]]]
# Load the test data with the appropriate lag
testDataLag <- readRDS(file.path(getwd(), "Feature engineering", targetDate,
testFeaturesFolder,
paste0("Lag", lag, " features.rds")))
# Optionally subset the test data
if(predictSubset){
testDataLag <- testDataLag[predictSubsetIds]
}
predictorData <- testDataLag[, newProdModel$predictors, with=FALSE]
predictorDataM <- data.matrix(predictorData)
rm(predictorData)
gc()
newProdPredictionsLag <- predict(newProdModel$model, predictorDataM)
newProdPredictions <- newProdPredictions + newProdPredictionsLag*weight
}
# Rescale the weighted sum to the [0, 1] interval
newProdPredictions <- newProdPredictions/weightSum
# Calculate the mean predictions depending on the May 2015 flag
meanGroupPredsMayFlag <-
c(mean(newProdPredictions[testDataLag$hasMay15Data==0]),
mean(newProdPredictions[testDataLag$hasMay15Data==1]))
# Calculate the mean predictions depending on the hasAnyPosFlank flag
meanGroupPredsPosFlank <- c(mean(newProdPredictions[!testDataPosFlank]),
mean(newProdPredictions[testDataPosFlank]))
# Compare the number of expected positive flanks versus the extrapolated
# public leaderboard counts
expectedPosFlanks <- sum(newProdPredictions)
leaderboardPosFlanks <- fractionPosFlankUsers*nrow(testDataLag)
normalisedProbRatio <- leaderboardPosFlanks/expectedPosFlanks
cat("Expected/leaderboard positive flank ratio",
round(1/normalisedProbRatio, 2), "\n")
# Normalize the marginal probabilities such that the expected number of
# products with a positive flanks matches the extrapolated public leaderboard
# count
if(marginalNormalisation == "linear"){
newProdPredictions <- newProdPredictions * normalisedProbRatio
} else{
newProdPredictions <- probExponentNormaliser(newProdPredictions,
normalisedProbRatio)
}
} else{
newProdPredictions <- rep(1, nrow(testDataLag))
}
# Optionally load the predictions before normalisation if they are available
if(loadPredictions && file.exists(rawPredictionsPath)){
allPredictions <- readRDS(rawPredictionsPath)
} else{
# Loop over all lags and base models
allPredictions <- NULL
for(lagId in 1:nbConditionalLags){
# Show progress message
cat("\nGenerating positive flank predictions for lag", lagId, "of",
nbConditionalLags, "@", as.character(Sys.time()), "\n\n")
# Set the lag weight and the number of train months back
lag <- monthsBackLags[lagId]
# monthsBack <- monthsBackModels[lagId]
# Load the test data with the appropriate lag
testDataLag <- readRDS(file.path(getwd(), "Feature engineering", targetDate,
testFeaturesFolder,
paste0("Lag", lag, " features.rds")))
# Optionally subset the test data
if(predictSubset){
testDataLag <- testDataLag[predictSubsetIds]
}
for(i in 1:nbBaseModels){
# Extract the target column
targetVar <- targetCols[i]
targetModelIds <- baseModelInfo[targetProduct==targetVar &
modelLag==lag, modelId]
# Show progress message
cat("Generating test predictions for model", i, "of", nbBaseModels, "\n")
# Optionally, load the base model predictions
if(exists("baseModelPredictionsPath")){
baseModelPredPath <- file.path(baseModelPredictionsPath,
paste0(targetVar, " Lag ", lag, ".rds"))
} else{
baseModelPredPath <- ""
}
foldWeights <- baseModelInfo[modelId %in% targetModelIds,
relativeWeight]
weight <- sum(foldWeights)
loadFileExists <- file.exists(baseModelPredPath)
if(loadBaseModelPredictions && loadFileExists){
predictionsDT <- readRDS(baseModelPredPath)
} else{
# Set the predictions to zero if the target variable is in the zeroed
# list
if(targetVar %in% zeroTargets || weight <= 0){
predictions <- rep(0, nrow(testDataLag))
} else{
nbTargetModelFolds <- length(targetModelIds)
foldPredictions <- rep(0, nrow(testDataLag))
alreadyOwned <- is.na(testDataLag[[paste0(targetVar, "Lag1")]]) |
testDataLag[[paste0(targetVar, "Lag1")]] == 1
# Extract predictors data from the features data
predictorData <-
testDataLag[!alreadyOwned,
baseModels[[targetModelIds[1]]]$predictors, with=FALSE]
# Convert the predictor data to a matrix
predictorDataM <- data.matrix(predictorData)
rm(predictorData)
gc()
for(fold in 1:nbTargetModelFolds){
targetModelId <- targetModelIds[fold]
# Loop over all folds and sum the predictions
targetModel <- baseModels[[targetModelId]]
# Extract the model weight
weightFold <- foldWeights[fold]
# if(weight == 0) browser()
# Another check that we are using the right model
# Better safe than sorry :)
if(targetModel$targetVar != targetVar) browser()
# Calculate the test predictions
predictionsPrevNotOwnedFold <- predict(targetModel$model,
predictorDataM)
foldPredictions[!alreadyOwned] <- foldPredictions[!alreadyOwned] +
predictionsPrevNotOwnedFold*weightFold
}
# if(targetVar == "ind_reca_fin_ult1") browser()
predictions <- foldPredictions/weight
# Set the predictions to 0 for products that are already owned
# predictions[alreadyOwned] <- -runif(sum(alreadyOwned))
predictions[alreadyOwned] <- 0
}
# The mean prediction should equal the mean map contribution if the
# predictions are set to zero for the already owned products
# mean(predictions)/mapContributions[17, i]
# Add the predictions to the data table with all target predictions
predictionsDT <- data.table(ncodpers = testDataLag$ncodpers,
predictions = predictions,
product = targetVar)
}
predictionsDT[, weightedPrediction :=
predictionsDT$predictions*weight]
# if(targetVar == "ind_reca_fin_ult1") browser()
# c(lag, sum(predictionsDT$predictions), sum(testDataLag[[19+24*(16-lag)]], na.rm=T))
if(targetVar %in% allPredictions$product){
allPredictions[product==targetVar, weightedPrediction:=
weightedPrediction +
predictionsDT$weightedPrediction]
} else{
allPredictions <- rbind(allPredictions, predictionsDT)
}
# Save the base model predictions
if(saveBaseModelPredictions && (!loadBaseModelPredictions ||
(loadBaseModelPredictions &&
!loadFileExists))){
predictionsDT[, weightedPrediction:=NULL]
saveRDS(predictionsDT, baseModelPredPath)
}
}
}
# Divide the weighted summed predictions by the weight sum
allPredictions[, prediction := weightedPrediction / weightSum]
allPredictions[, weightedPrediction := NULL]
allPredictions[, predictions := NULL]
# meanConditionalProb <- mean(allPredictions$prediction)*24
# Save the predictions to the predictions folder before normalisation
if(savePredictionsBeforeNormalisation){
saveRDS(allPredictions, file=rawPredictionsPath)
}
}
# Optionally, multiply the predictions by the relative count ratio of June 2016
probMultipliers <- rep(NA, nbBaseModels)
if(normalizeProdProbs){
for(i in 1:nbBaseModels){
# Show progress message
cat("Normalizing product predictions", i, "of", nbBaseModels, "\n")
# Extract the target column
targetVar <- targetCols[i]
# Look up if the target variable was already owned
alreadyOwned <- is.na(testDataLag[[paste0(targetVar, "Lag1")]]) |
testDataLag[[paste0(targetVar, "Lag1")]] == 1
predictions <- allPredictions[product==targetVar, prediction]
predictionsPrevNotOwned <- predictions[!alreadyOwned]
if(suppressWarnings(max(predictions[alreadyOwned]))>0) browser()
# Normalize the predicted probabilities
predictedPosFlankCount <- sum(predictionsPrevNotOwned *
newProdPredictions[!alreadyOwned])
probMultiplier <- nrow(testDataLag) * fractionPosFlankUsers *
expectedCountPerPosFlank * countContributions[17, i] /
predictedPosFlankCount
probMultipliers[i] <- probMultiplier
if(i %in% c(3, 5, 7, 13, 18, 19, 22, 23, 24)) browser()
if(is.finite(probMultiplier)){
if(normalizeMode == "additive" || targetVar %in% additiveNormalizeProds){
predictions[!alreadyOwned] <- predictions[!alreadyOwned] +
(probMultiplier-1)*mean(predictions[!alreadyOwned])
} else{
if(normalizeMode == "linear"){
predictions[!alreadyOwned] <- predictions[!alreadyOwned] *
probMultiplier
} else{
predictions[!alreadyOwned] <- probExponentNormaliser(
predictions[!alreadyOwned], probMultiplier,
weights=newProdPredictions[!alreadyOwned])
}
}
# Update the predictions in allPredictions
allPredictions[product==targetVar, prediction:=predictions]
}
}
}
# Order the predicted probabilities for all products by client
setkey(allPredictions, ncodpers)
allPredictions[,order_predict := match(1:length(prediction),
order(-prediction)), by=ncodpers]
allPredictions <- allPredictions[order(ncodpers, -prediction), ]
# Swap nomina and nom pens in rank if they are both not owned in the previous
# period and if the rank of nomina > rank of nom_pens
if(nomPensAboveNominaBothNotOwned){
# Find users where the rank of nomina < rank of nom pens and both prob not
# zero
ncodpers <- unique(allPredictions$ncodpers)
nominaProb <- allPredictions[product == "ind_nomina_ult1", prediction]
nominaProbRank <- allPredictions[product == "ind_nomina_ult1", order_predict]
nomPensProb <- allPredictions[product == "ind_nom_pens_ult1", prediction]
nomPensProbRank <- allPredictions[product == "ind_nom_pens_ult1", order_predict]
swapIds <- nominaProb>0 & nomPensProb>0 & nominaProb>nomPensProb
swapNcodPers <- ncodpers[swapIds]
allPredictions[ncodpers %in% swapNcodPers & product == "ind_nomina_ult1",
order_predict := nomPensProbRank[swapIds]]
allPredictions[ncodpers %in% swapNcodPers & product == "ind_nom_pens_ult1",
order_predict := nominaProbRank[swapIds]]
}
# Make sure that the order of the predictions is unique for each client
orderCount <- allPredictions[, .N, .(ncodpers, order_predict)]
if(max(orderCount$N)>1) browser()
# Show the confidence in the top prediction
hist(allPredictions[order_predict==1, prediction])
# Calculate the top predicted products counts
topPredictions <- allPredictions[order_predict==1, .N, product]
topPredictions <- topPredictions[order(-N)]
topPredictionsPosFlanks <- allPredictions[order_predict==1 &
ncodpers %in% posFlankClients,
.N, product]
topPredictionsPosFlanks <- topPredictionsPosFlanks[order(-N)]
# Study the ranking of specific products
productRankDelaFin <- allPredictions[product=="ind_dela_fin_ult1", .N,
order_predict]
productRankDelaFin <- productRankDelaFin[order(order_predict),]
productRankDecoFin <- allPredictions[product=="ind_deco_fin_ult1", .N,
order_predict]
productRankDecoFin <- productRankDecoFin[order(order_predict),]
productRankTjcrFin <- allPredictions[product=="ind_tjcr_fin_ult1", .N,
order_predict]
productRankTjcrFin <- productRankTjcrFin[order(order_predict),]
productRankRecaFin <- allPredictions[product=="ind_reca_fin_ult1", .N,
order_predict]
productRankRecaFin <- productRankRecaFin[order(order_predict),]
# Verify that the mean prediction aligns with the relative June 15 ratio
allPredictions[, totalProb := prediction * rep(newProdPredictions,
each = nbBaseModels)]
meanProductProbs <- allPredictions[, .(meanCondProb = mean(prediction),
meanProb = mean(totalProb),
totalProb = sum(totalProb)), product]
meanProductProbs <- meanProductProbs[order(-meanProb), ]
# Combine the top seven products to a string vector
productString <- paste(allPredictions[order_predict==1, product],
allPredictions[order_predict==2, product],
allPredictions[order_predict==3, product],
allPredictions[order_predict==4, product],
allPredictions[order_predict==5, product],
allPredictions[order_predict==6, product],
allPredictions[order_predict==7, product])
# Check for ties in the ordering (should not occur)
if(length(productString) != nrow(testDataLag)) browser()
# Add the id and top 7 to the submission file
submission <- data.frame(ncodpers = testDataLag$ncodpers,
added_products = productString)
# Extract template submission file
paddedSubmission <- fread("Data/sample_submission.csv")
# Set the added products to an empty character string
paddedSubmission[, added_products := ""]
# Replace the matched ids in padded submission by the combined submission file
matchIds <- match(submission$ncodpers, paddedSubmission$ncodpers)
paddedSubmission[matchIds, added_products := submission$added_products]
# Write the padded submission to a csv file
write.csv(paddedSubmission, file.path(getwd(), "Submission", submissionDate,
paste0(submissionFile, ".csv")),
row.names = FALSE)
# Save the predictions to the predictions folder
if(savePredictions){
saveRDS(allPredictions, file=file.path(predictionsPath,
paste0(submissionFile, ".rds")))
}
# Display the successful submission message
cat("Submission file created successfully!\n",
nrow(submission)," records were predicted (",
round(nrow(submission)/nrow(paddedSubmission)*100,2), "%)\n", sep="")
|
#https://cran.r-project.org/web/packages/olsrr/olsrr.pdf
install.packages('lubridate')
library(olsrr)
model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
k <- ols_step_all_possible(model)
plot(k)
k
summary(lm(mpg ~ wt, data=mtcars))
summary(lm(mpg ~ wt+ hp, data=mtcars))
|
/dd.R
|
no_license
|
amit2625/FA_5_2018
|
R
| false
| false
| 277
|
r
|
#https://cran.r-project.org/web/packages/olsrr/olsrr.pdf
install.packages('lubridate')
library(olsrr)
model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
k <- ols_step_all_possible(model)
plot(k)
k
summary(lm(mpg ~ wt, data=mtcars))
summary(lm(mpg ~ wt+ hp, data=mtcars))
|
##Basic statistics=group
##Layer1= vector
##Layer2=raster
##ponderation= number 1
##output= output vector
library(raster)
library(sp)
p1=coordinates(Layer1)
result<-cbind()
for (j in 1:dim(p1)[1]){
point<-p1[j,]
r <- readGDAL(Layer2@file@name)
dist <- distanceFromPoints(r, point)
position<-which(dist@data@values<1500)
dist_f<-dist@data@values[position]
z<-as.numeric(unlist(r@data))
a=0
b=0
if (ponderation==0){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*(1/dist_f[i]^2)
b=b+(1/dist_f[i]^2)
}
}
}
}
}
if (ponderation==1){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*(1/(1+dist_f[i]^2))
b=b+(1/(1+dist_f[i]^2))
}
}
}
}
}
if (ponderation==2){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*((1-0.102)/(1+(403/dist_f[i])^2))
b=b+((1-0.102)/(1+(403/dist_f[i])^2))
}
}
}
}
}
if (ponderation==3){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*(1-0.102)*exp(-403/dist_f[i])
b=b+(1-0.102)*exp(-403/dist_f[i])
}
}
}
}
}
if (ponderation==4){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*(1-0.102)*(1-1.5*(403/dist_f[i])+0.5*(403/dist_f[i])^3)
b=b+(1-0.102)*(1-1.5*(403/dist_f[i])+0.5*(403/dist_f[i])^3)
}
}
}
}
}
if (ponderation==5){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
integrand <- function(x) {(1/(0.394*sqrt(2*pi)))*exp(-0.5*((log(x)-1.76)/0.394)^2)}
int<-integrate(integrand, lower = 0, upper =dist_f[i])
C<-1-int$value
a=a+z[position[i]]*C
b=b+C
}
}
}
}
}
if (ponderation==6){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
alpha<-(-2)*dist_f[i]+2
C<-1/(1+exp(-alpha))
a=a+z[position[i]]*C
b=b+C
}
}
}
}
}
result<-rbind(result,cbind(as.numeric(point[1]), as.numeric(point[2]),a/b))
colnames(result)<-c("X","Y","attribut")
}
if (ponderation==7){
result<-cbind()
for (j in 1:dim(p1)[1]){
point<-p1[j,]
r <- readGDAL(Layer2@file@name)
dist <- distanceFromPoints(r, point)
position<-which(dist@data@values<1500)
dist_f<-dist@data@values[position]
z<-as.numeric(unlist(r@data))
a0=a1=a2=a3=a4=a5=a6=0
b0=b1=b2=b3=b4=b5=b6=0
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a0=a0+z[position[i]]*(1/dist_f[i]^2)
b0=b0+(1/dist_f[i]^2)
a1=a1+z[position[i]]*(1/(1+dist_f[i]^2))
b1=b1+(1/(1+dist_f[i]^2))
a2=a2+z[position[i]]*((1-0.102)/(1+(403/dist_f[i])^2))
b2=b2+((1-0.102)/(1+(403/dist_f[i])^2))
a3=a3+z[position[i]]*(1-0.102)*exp(-403/dist_f[i])
b3=b3+(1-0.102)*exp(-403/dist_f[i])
a4=a4+z[position[i]]*(1-0.102)*(1-1.5*(403/dist_f[i])+0.5*(403/dist_f[i])^3)
b4=b4+(1-0.102)*(1-1.5*(403/dist_f[i])+0.5*(403/dist_f[i])^3)
integrand <- function(x) {(1/(0.394*sqrt(2*pi)))*exp(-0.5*((log(x)-1.76)/0.394)^2)}
int<-integrate(integrand, lower = 0, upper =dist_f[i])
C<-1-int$value
a5=a5+z[position[i]]*C
b5=b5+C
alpha<-(-2)*dist_f[i]+2
C<-1/(1+exp(-alpha))
a6=a6+z[position[i]]*C
b6=b6+C
}
}
}
}
result<-rbind(result,cbind(as.numeric(point[1]), as.numeric(point[2]),a0/b0,a1/b1,a2/b2,a3/b3,a4/b4,a5/b5,a6/b6))
}
colnames(result)<-c("X","Y","1/d","1/(d+1)","C_ratio","C_exp","C_sph","C_lit","logit")
}
matrix<-cbind(result[,1],result[,2])
matrix<-as.matrix(matrix)
result<-SpatialPointsDataFrame(matrix, as.data.frame(result, row.names=NULL))
proj4string(Layer1)->crs
proj4string(result)<-crs
output<-result
|
/collections/qgis_rscripts2/rscripts/Inverse_Distance_Weigthing.rsx
|
no_license
|
qgis/QGIS-Resources
|
R
| false
| false
| 3,812
|
rsx
|
##Basic statistics=group
##Layer1= vector
##Layer2=raster
##ponderation= number 1
##output= output vector
library(raster)
library(sp)
p1=coordinates(Layer1)
result<-cbind()
for (j in 1:dim(p1)[1]){
point<-p1[j,]
r <- readGDAL(Layer2@file@name)
dist <- distanceFromPoints(r, point)
position<-which(dist@data@values<1500)
dist_f<-dist@data@values[position]
z<-as.numeric(unlist(r@data))
a=0
b=0
if (ponderation==0){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*(1/dist_f[i]^2)
b=b+(1/dist_f[i]^2)
}
}
}
}
}
if (ponderation==1){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*(1/(1+dist_f[i]^2))
b=b+(1/(1+dist_f[i]^2))
}
}
}
}
}
if (ponderation==2){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*((1-0.102)/(1+(403/dist_f[i])^2))
b=b+((1-0.102)/(1+(403/dist_f[i])^2))
}
}
}
}
}
if (ponderation==3){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*(1-0.102)*exp(-403/dist_f[i])
b=b+(1-0.102)*exp(-403/dist_f[i])
}
}
}
}
}
if (ponderation==4){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a=a+z[position[i]]*(1-0.102)*(1-1.5*(403/dist_f[i])+0.5*(403/dist_f[i])^3)
b=b+(1-0.102)*(1-1.5*(403/dist_f[i])+0.5*(403/dist_f[i])^3)
}
}
}
}
}
if (ponderation==5){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
integrand <- function(x) {(1/(0.394*sqrt(2*pi)))*exp(-0.5*((log(x)-1.76)/0.394)^2)}
int<-integrate(integrand, lower = 0, upper =dist_f[i])
C<-1-int$value
a=a+z[position[i]]*C
b=b+C
}
}
}
}
}
if (ponderation==6){
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
alpha<-(-2)*dist_f[i]+2
C<-1/(1+exp(-alpha))
a=a+z[position[i]]*C
b=b+C
}
}
}
}
}
result<-rbind(result,cbind(as.numeric(point[1]), as.numeric(point[2]),a/b))
colnames(result)<-c("X","Y","attribut")
}
if (ponderation==7){
result<-cbind()
for (j in 1:dim(p1)[1]){
point<-p1[j,]
r <- readGDAL(Layer2@file@name)
dist <- distanceFromPoints(r, point)
position<-which(dist@data@values<1500)
dist_f<-dist@data@values[position]
z<-as.numeric(unlist(r@data))
a0=a1=a2=a3=a4=a5=a6=0
b0=b1=b2=b3=b4=b5=b6=0
for(i in 1:length(position)){
if (dist_f[i]>100){
if (!is.na(z[position[i]])){
if (z[position[i]]!=0){
a0=a0+z[position[i]]*(1/dist_f[i]^2)
b0=b0+(1/dist_f[i]^2)
a1=a1+z[position[i]]*(1/(1+dist_f[i]^2))
b1=b1+(1/(1+dist_f[i]^2))
a2=a2+z[position[i]]*((1-0.102)/(1+(403/dist_f[i])^2))
b2=b2+((1-0.102)/(1+(403/dist_f[i])^2))
a3=a3+z[position[i]]*(1-0.102)*exp(-403/dist_f[i])
b3=b3+(1-0.102)*exp(-403/dist_f[i])
a4=a4+z[position[i]]*(1-0.102)*(1-1.5*(403/dist_f[i])+0.5*(403/dist_f[i])^3)
b4=b4+(1-0.102)*(1-1.5*(403/dist_f[i])+0.5*(403/dist_f[i])^3)
integrand <- function(x) {(1/(0.394*sqrt(2*pi)))*exp(-0.5*((log(x)-1.76)/0.394)^2)}
int<-integrate(integrand, lower = 0, upper =dist_f[i])
C<-1-int$value
a5=a5+z[position[i]]*C
b5=b5+C
alpha<-(-2)*dist_f[i]+2
C<-1/(1+exp(-alpha))
a6=a6+z[position[i]]*C
b6=b6+C
}
}
}
}
result<-rbind(result,cbind(as.numeric(point[1]), as.numeric(point[2]),a0/b0,a1/b1,a2/b2,a3/b3,a4/b4,a5/b5,a6/b6))
}
colnames(result)<-c("X","Y","1/d","1/(d+1)","C_ratio","C_exp","C_sph","C_lit","logit")
}
matrix<-cbind(result[,1],result[,2])
matrix<-as.matrix(matrix)
result<-SpatialPointsDataFrame(matrix, as.data.frame(result, row.names=NULL))
proj4string(Layer1)->crs
proj4string(result)<-crs
output<-result
|
library(aqp)
library(microbenchmark)
library(daff)
load(system.file("data/munsell.rda", package="aqp")[1])
x <- munsell2rgb(munsell$hue, munsell$value, munsell$chroma, return_triplets = TRUE)
y <- munsell2rgb2(munsell$hue, munsell$value, munsell$chroma, return_triplets = TRUE)
all.equal(x, y)
d <- diff_data(x, y)
render_diff(d)
microbenchmark(
join = munsell2rgb(munsell$hue, munsell$value, munsell$chroma),
merge = munsell2rgb2(munsell$hue, munsell$value, munsell$chroma)
)
|
/misc/sandbox/munsell2rgb-DT-testing.R
|
no_license
|
rsbivand/aqp
|
R
| false
| false
| 492
|
r
|
library(aqp)
library(microbenchmark)
library(daff)
load(system.file("data/munsell.rda", package="aqp")[1])
x <- munsell2rgb(munsell$hue, munsell$value, munsell$chroma, return_triplets = TRUE)
y <- munsell2rgb2(munsell$hue, munsell$value, munsell$chroma, return_triplets = TRUE)
all.equal(x, y)
d <- diff_data(x, y)
render_diff(d)
microbenchmark(
join = munsell2rgb(munsell$hue, munsell$value, munsell$chroma),
merge = munsell2rgb2(munsell$hue, munsell$value, munsell$chroma)
)
|
`getDividends` <-
function(Symbol,from='1970-01-01',to=Sys.Date(),env=parent.frame(),src='yahoo',
auto.assign=FALSE,auto.update=FALSE,verbose=FALSE,split.adjust=TRUE,...) {
if(missing(env))
env <- parent.frame(1)
if(is.null(env))
auto.assign <- FALSE
Symbol.name <- ifelse(!is.character(Symbol),
deparse(substitute(Symbol)),
as.character(Symbol))
from.posix <- .dateToUNIX(from)
to.posix <- .dateToUNIX(to)
tmp <- tempfile()
on.exit(unlink(tmp))
handle <- .getHandle()
yahoo.URL <- .yahooURL(Symbol.name, from.posix, to.posix,
"1d", "div", handle)
curl::curl_download(yahoo.URL, destfile=tmp, quiet=!verbose, handle=handle$ch)
fr <- read.csv(tmp)
fr <- xts(fr[,2],as.Date(fr[,1]))
colnames(fr) <- paste(Symbol.name,'div',sep='.')
# dividends from Yahoo are split-adjusted; need to un-adjust
if(src[1] == "yahoo" && !split.adjust) {
splits <- getSplits(Symbol.name, from="1900-01-01")
if(is.xts(splits) && is.xts(fr) && nrow(splits) > 0 && nrow(fr) > 0) {
fr <- fr / adjRatios(splits=merge(splits, index(fr)))[,1]
}
}
if(is.xts(Symbol)) {
if(auto.update) {
xtsAttributes(Symbol) <- list(dividends=fr)
assign(Symbol.name,Symbol,envir=env)
}
} else if(auto.assign) {
assign(paste(Symbol.name,'div',sep='.'),fr,envir=env)
} else fr
}
|
/quantmod-master/R/getDividends.R
|
permissive
|
Sdoof/PyFinTech
|
R
| false
| false
| 1,409
|
r
|
`getDividends` <-
function(Symbol,from='1970-01-01',to=Sys.Date(),env=parent.frame(),src='yahoo',
auto.assign=FALSE,auto.update=FALSE,verbose=FALSE,split.adjust=TRUE,...) {
if(missing(env))
env <- parent.frame(1)
if(is.null(env))
auto.assign <- FALSE
Symbol.name <- ifelse(!is.character(Symbol),
deparse(substitute(Symbol)),
as.character(Symbol))
from.posix <- .dateToUNIX(from)
to.posix <- .dateToUNIX(to)
tmp <- tempfile()
on.exit(unlink(tmp))
handle <- .getHandle()
yahoo.URL <- .yahooURL(Symbol.name, from.posix, to.posix,
"1d", "div", handle)
curl::curl_download(yahoo.URL, destfile=tmp, quiet=!verbose, handle=handle$ch)
fr <- read.csv(tmp)
fr <- xts(fr[,2],as.Date(fr[,1]))
colnames(fr) <- paste(Symbol.name,'div',sep='.')
# dividends from Yahoo are split-adjusted; need to un-adjust
if(src[1] == "yahoo" && !split.adjust) {
splits <- getSplits(Symbol.name, from="1900-01-01")
if(is.xts(splits) && is.xts(fr) && nrow(splits) > 0 && nrow(fr) > 0) {
fr <- fr / adjRatios(splits=merge(splits, index(fr)))[,1]
}
}
if(is.xts(Symbol)) {
if(auto.update) {
xtsAttributes(Symbol) <- list(dividends=fr)
assign(Symbol.name,Symbol,envir=env)
}
} else if(auto.assign) {
assign(paste(Symbol.name,'div',sep='.'),fr,envir=env)
} else fr
}
|
# ---- pkgdown::deploy_site_github() ----
#
# Follows the steps of deploy_site_github() but renders into
# `preview/pr#` of `gh-pages` branch.
# Pull gh-pages branch
callr::run("git", c("remote", "set-branches", "--add", "origin", "gh-pages"), echo_cmd = TRUE)
callr::run("git", c("fetch", "origin", "gh-pages"), echo_cmd = TRUE)
local({
# Setup worktree in tempdir
dest_dir <- fs::dir_create(fs::file_temp())
on.exit(unlink(dest_dir, recursive = TRUE), add = TRUE)
callr::run("git", c("worktree", "add", "--track", "-B", "gh-pages", dest_dir, "origin/gh-pages"), echo_cmd = TRUE)
on.exit(add = TRUE, {
callr::run("git", c("worktree", "remove", dest_dir), echo_cmd = TRUE)
})
# PR preview is in a preview/pr# subdirectory of gh-pages branch
dest_preview <- file.path("preview", paste0("pr", Sys.getenv("PR_NUMBER")))
dest_dir_preview <- fs::dir_create(fs::path(dest_dir, dest_preview))
url_base <- yaml::read_yaml("pkgdown/_pkgdown.yml")$url
# Build the preview site in the <gh-pages>/preview/pr#/ directory
pkgdown:::build_site_github_pages(
dest_dir = dest_dir_preview,
override = list(
url = file.path(url_base, dest_preview)
),
clean = TRUE
)
msg <- paste("[preview]", pkgdown:::construct_commit_message("."))
pkgdown:::github_push(dest_dir, msg, "origin", "gh-pages")
message(
"::notice title=pkgdown preview::",
file.path(url_base, dest_preview)
)
})
|
/.github/pkgdown-pr-preview-build.R
|
permissive
|
rstudio/learnr
|
R
| false
| false
| 1,434
|
r
|
# ---- pkgdown::deploy_site_github() ----
#
# Follows the steps of deploy_site_github() but renders into
# `preview/pr#` of `gh-pages` branch.
# Pull gh-pages branch
callr::run("git", c("remote", "set-branches", "--add", "origin", "gh-pages"), echo_cmd = TRUE)
callr::run("git", c("fetch", "origin", "gh-pages"), echo_cmd = TRUE)
local({
# Setup worktree in tempdir
dest_dir <- fs::dir_create(fs::file_temp())
on.exit(unlink(dest_dir, recursive = TRUE), add = TRUE)
callr::run("git", c("worktree", "add", "--track", "-B", "gh-pages", dest_dir, "origin/gh-pages"), echo_cmd = TRUE)
on.exit(add = TRUE, {
callr::run("git", c("worktree", "remove", dest_dir), echo_cmd = TRUE)
})
# PR preview is in a preview/pr# subdirectory of gh-pages branch
dest_preview <- file.path("preview", paste0("pr", Sys.getenv("PR_NUMBER")))
dest_dir_preview <- fs::dir_create(fs::path(dest_dir, dest_preview))
url_base <- yaml::read_yaml("pkgdown/_pkgdown.yml")$url
# Build the preview site in the <gh-pages>/preview/pr#/ directory
pkgdown:::build_site_github_pages(
dest_dir = dest_dir_preview,
override = list(
url = file.path(url_base, dest_preview)
),
clean = TRUE
)
msg <- paste("[preview]", pkgdown:::construct_commit_message("."))
pkgdown:::github_push(dest_dir, msg, "origin", "gh-pages")
message(
"::notice title=pkgdown preview::",
file.path(url_base, dest_preview)
)
})
|
DPMdensity = function(y,
ngrid=1000L, grid=NULL,
method="truncated", nclusters=50L,
updateAlpha=TRUE, useHyperpriors=TRUE,
status=TRUE, state=NULL,
nskip=1000L, ndpost=1000L, keepevery=1L, printevery=1000L,
alpha=10.0, a0=10.0, b0=1.0,
m=NULL, m0=NULL, S0=NULL,
lambda=0.5, gamma1=3.0, gamma2=2.0,
nu=NULL, Psi=NULL , nu0=NULL, Psi0=NULL,
diag=FALSE,
seed = 123
) {
#----------------------------------------------
# check and process arguments
#----------------------------------------------
##-----------------------------
## y
##-----------------------------
if (is.matrix(y) & (ncol(y) > 1)) {
n = dim(y)[1]
d = dim(y)[2]
} else {
stop("y is required to be a matrix with more than 1 column.")
}
##-----------------------------
## ngrid, grid: only evaluate grid points when d=2
##-----------------------------
if ((d == 2) & ((ngrid > 0) | !is.null(grid))) {
prediction = TRUE
if (is.null(grid)) {
left = right = rep(0, 2)
for (j in 1:2) {
left[j] = min(y[, j]) - 0.5 * sd(y[, j])
right[j] = max(y[, j]) + 0.5 * sd(y[, j])
}
ngrid = as.integer(sqrt(ngrid))
grid1 = seq(left[1], right[1], length.out = ngrid)
grid2 = seq(left[2], right[2], length.out = ngrid)
} else {
if (is.matrix(grid)) {
ngrid = nrow(grid)
grid1 = grid[, 1]
grid2 = grid[, 2]
} else {
stop("grid is required to be a matrix or NULL.")
}
}
} else {
prediction = FALSE
ngrid = 0
grid1 = grid2 = NULL
}
##-----------------------------
## method
##-----------------------------
if(!(method %in% c("truncated", "neal")))
stop("Only two available sampling methods: truncated or neal.")
##-----------------------------
## state, status
##-----------------------------
if (status == FALSE) {
## use previous analysis
method = state$method
nclusters = state$nclusters
updateAlpha = state$updateAlpha
a0 = state$a0
b0 = state$b0
alpha = state$alpha
useHyperpriors = state$useHyperpriors
m0 = state$m0
S0 = state$S0
m = state$m
gamma1 = state$gamma1
gamma2 = state$gamma2
lambda = state$lambda
nu0 = state$nu0
Psi0 = state$Psi0
nu = state$nu
Psi = state$Psi
Zeta = t(state$Zeta)
Omega = state$Omega
kappa = state$kappa
if(method == "truncated") {
lw = state$lw
a_gd = state$a_gd
b_gd = state$b_gd
}
} else {
## start new analysis
##-----------------------------
## alpha ~ Gamma(a0, b0) or fixed
##-----------------------------
if (updateAlpha) {
if ((a0 > 0) & (b0 > 0))
alpha = 1.0 # initialize
else
stop("a0 and b0 are required to be positive scalars.")
} else {
if (alpha > 0)
a0 = b0 = -1
else
stop("alpha is required to be a positive scalar.")
}
##-----------------------------
## Hyperpriors for the base distribution (Normal-Inverse-Wishart: N(zeta|m, Omega/lambda)xIW(Omega|nu, Psi))
##-----------------------------
if(is.null(nu)) {
nu = ncol(y) + 2
} else {
if (nu < d)
stop("nu is required to be a scalar greater than ncol(y)-1.")
}
if (useHyperpriors) {
### m ~ Normal(m0, S0)
if(is.null(m0)) {
m0 = colMeans(y)
} else {
if (!(is.vector(m0) & (length(m0) == d)))
stop("m0 is required to be a vector of length equal to ncol(y).")
}
if (is.null(S0))
S0 = diag(apply(y, 2, function(s) (range(s)[2]-range(s)[1])^2/16))
m = m0 + rnorm(d, 0, 100) # initialize
### lambda ~ Gamma(gamma1, gamma2)
if ((gamma1 > 0) & (gamma2 > 0))
lambda = rgamma(1, shape = gamma1, rate = gamma2) # initialize
else
stop("gamma1 and gamma2 are required to be positive scalars.")
### Psi ~ Wishart(nu0, Psi0)
if(is.null(nu0)) {
nu0 = ncol(y) + 2
} else {
if (nu0 < d)
stop("nu0 is required to be a scalar greater than ncol(y)-1.")
}
if (is.null(Psi0))
Psi0 = S0 / nu0
Psi = nu0 * Psi0 # initialize
} else {
### m, lambda and Psi are fixed
if(is.null(m)) {
m = colMeans(y)
} else {
if (is.vector(m) & (length(m) == d)) {
m0 = rep(-1, d)
S0 = diag(-1, d)
} else {
stop("m is required to be a vector of length equal to ncol(y).")
}
}
if (lambda > 0)
gamma1 = gamma2 = -1
else
stop("lambda is required to be a positive scalar.")
if (is.null(Psi)) {
nu0 = -1
Psi0 = diag(-1, d)
Psi = diag(apply(y, 2, function(s) (range(s)[2]-range(s)[1])^2/16))
} else if (!is.positive.definite(Psi)) {
stop("Psi is required to be a positive definite matrix.")
}
}
if(method == "truncated") {
a_gd = rep(1.0, (nclusters-1))
b_gd = rep(alpha, (nclusters-1))
lw = NULL
}
Omega = Zeta = kappa = NULL # will initialize in cpp function
}
#----------------------------------------------
## print information
#----------------------------------------------
cat("*****Into main of DPMM\n")
cat("*****Data: n, d: ", n, ", ", d, "\n", sep = "")
if(prediction)
cat("*****Prediction: ngrid1, ngrid2: ", ngrid, ", ", ngrid, "\n", sep = "")
else
cat("*****Prediction: FALSE\n")
if(method == "truncated") {
cat("*****Posterior sampling method: Blocked Gibbs Sampling with", nclusters, "clusters\n")
} else {
cat("*****Posterior sampling method: Algorithm 8 with m = 1 in Neal (2000)\n")
}
cat("*****Prior: updateAlpha, useHyperpriors: ", updateAlpha, ", ", useHyperpriors, "\n", sep="")
cat("*****MCMC: nskip, ndpost, keepevery, printevery: ", nskip, ", ", ndpost, ", ", keepevery, ", ", printevery, "\n", sep = "")
if(status)
cat("*****Start a new MCMC...", "\n", sep = "")
else
cat("*****Continue previous MCMC...", "\n", sep = "")
#----------------------------------------------
# set random seed
#----------------------------------------------
set.seed(seed = seed)
#----------------------------------------------
## call Cpp function
#----------------------------------------------
ptm <- proc.time()
if(method == "truncated") {
res = .Call("_BNPqte_cDPMdensity",
n,
d,
y,
status,
diag,
prediction,
ngrid,
updateAlpha,
useHyperpriors,
a0,
b0,
m0,
S0,
gamma1,
gamma2,
nu0,
Psi0,
nu,
nclusters,
nskip,
ndpost,
keepevery,
printevery,
alpha,
lambda,
m,
Psi,
a_gd,
b_gd,
Zeta,
Omega,
lw,
kappa,
grid1,
grid2
)
} else {
res = .Call("_BNPqte_cDPMdensityNeal",
n,
d,
y,
status,
diag,
prediction,
ngrid,
updateAlpha,
useHyperpriors,
a0,
b0,
m0,
S0,
gamma1,
gamma2,
nu0,
Psi0,
nu,
nclusters,
nskip,
ndpost,
keepevery,
printevery,
alpha,
lambda,
m,
Psi,
Zeta,
Omega,
kappa,
grid1,
grid2
)
}
cat("Finished!", "\n")
#----------------------------------------------
# returns
#----------------------------------------------
res$proc.time = proc.time() - ptm
attr(res, 'class') <- 'DPMdensity'
return(res)
}
|
/R/DPMdensity.R
|
no_license
|
chujiluo/BNPqte
|
R
| false
| false
| 8,632
|
r
|
DPMdensity = function(y,
ngrid=1000L, grid=NULL,
method="truncated", nclusters=50L,
updateAlpha=TRUE, useHyperpriors=TRUE,
status=TRUE, state=NULL,
nskip=1000L, ndpost=1000L, keepevery=1L, printevery=1000L,
alpha=10.0, a0=10.0, b0=1.0,
m=NULL, m0=NULL, S0=NULL,
lambda=0.5, gamma1=3.0, gamma2=2.0,
nu=NULL, Psi=NULL , nu0=NULL, Psi0=NULL,
diag=FALSE,
seed = 123
) {
#----------------------------------------------
# check and process arguments
#----------------------------------------------
##-----------------------------
## y
##-----------------------------
if (is.matrix(y) & (ncol(y) > 1)) {
n = dim(y)[1]
d = dim(y)[2]
} else {
stop("y is required to be a matrix with more than 1 column.")
}
##-----------------------------
## ngrid, grid: only evaluate grid points when d=2
##-----------------------------
if ((d == 2) & ((ngrid > 0) | !is.null(grid))) {
prediction = TRUE
if (is.null(grid)) {
left = right = rep(0, 2)
for (j in 1:2) {
left[j] = min(y[, j]) - 0.5 * sd(y[, j])
right[j] = max(y[, j]) + 0.5 * sd(y[, j])
}
ngrid = as.integer(sqrt(ngrid))
grid1 = seq(left[1], right[1], length.out = ngrid)
grid2 = seq(left[2], right[2], length.out = ngrid)
} else {
if (is.matrix(grid)) {
ngrid = nrow(grid)
grid1 = grid[, 1]
grid2 = grid[, 2]
} else {
stop("grid is required to be a matrix or NULL.")
}
}
} else {
prediction = FALSE
ngrid = 0
grid1 = grid2 = NULL
}
##-----------------------------
## method
##-----------------------------
if(!(method %in% c("truncated", "neal")))
stop("Only two available sampling methods: truncated or neal.")
##-----------------------------
## state, status
##-----------------------------
if (status == FALSE) {
## use previous analysis
method = state$method
nclusters = state$nclusters
updateAlpha = state$updateAlpha
a0 = state$a0
b0 = state$b0
alpha = state$alpha
useHyperpriors = state$useHyperpriors
m0 = state$m0
S0 = state$S0
m = state$m
gamma1 = state$gamma1
gamma2 = state$gamma2
lambda = state$lambda
nu0 = state$nu0
Psi0 = state$Psi0
nu = state$nu
Psi = state$Psi
Zeta = t(state$Zeta)
Omega = state$Omega
kappa = state$kappa
if(method == "truncated") {
lw = state$lw
a_gd = state$a_gd
b_gd = state$b_gd
}
} else {
## start new analysis
##-----------------------------
## alpha ~ Gamma(a0, b0) or fixed
##-----------------------------
if (updateAlpha) {
if ((a0 > 0) & (b0 > 0))
alpha = 1.0 # initialize
else
stop("a0 and b0 are required to be positive scalars.")
} else {
if (alpha > 0)
a0 = b0 = -1
else
stop("alpha is required to be a positive scalar.")
}
##-----------------------------
## Hyperpriors for the base distribution (Normal-Inverse-Wishart: N(zeta|m, Omega/lambda)xIW(Omega|nu, Psi))
##-----------------------------
if(is.null(nu)) {
nu = ncol(y) + 2
} else {
if (nu < d)
stop("nu is required to be a scalar greater than ncol(y)-1.")
}
if (useHyperpriors) {
### m ~ Normal(m0, S0)
if(is.null(m0)) {
m0 = colMeans(y)
} else {
if (!(is.vector(m0) & (length(m0) == d)))
stop("m0 is required to be a vector of length equal to ncol(y).")
}
if (is.null(S0))
S0 = diag(apply(y, 2, function(s) (range(s)[2]-range(s)[1])^2/16))
m = m0 + rnorm(d, 0, 100) # initialize
### lambda ~ Gamma(gamma1, gamma2)
if ((gamma1 > 0) & (gamma2 > 0))
lambda = rgamma(1, shape = gamma1, rate = gamma2) # initialize
else
stop("gamma1 and gamma2 are required to be positive scalars.")
### Psi ~ Wishart(nu0, Psi0)
if(is.null(nu0)) {
nu0 = ncol(y) + 2
} else {
if (nu0 < d)
stop("nu0 is required to be a scalar greater than ncol(y)-1.")
}
if (is.null(Psi0))
Psi0 = S0 / nu0
Psi = nu0 * Psi0 # initialize
} else {
### m, lambda and Psi are fixed
if(is.null(m)) {
m = colMeans(y)
} else {
if (is.vector(m) & (length(m) == d)) {
m0 = rep(-1, d)
S0 = diag(-1, d)
} else {
stop("m is required to be a vector of length equal to ncol(y).")
}
}
if (lambda > 0)
gamma1 = gamma2 = -1
else
stop("lambda is required to be a positive scalar.")
if (is.null(Psi)) {
nu0 = -1
Psi0 = diag(-1, d)
Psi = diag(apply(y, 2, function(s) (range(s)[2]-range(s)[1])^2/16))
} else if (!is.positive.definite(Psi)) {
stop("Psi is required to be a positive definite matrix.")
}
}
if(method == "truncated") {
a_gd = rep(1.0, (nclusters-1))
b_gd = rep(alpha, (nclusters-1))
lw = NULL
}
Omega = Zeta = kappa = NULL # will initialize in cpp function
}
#----------------------------------------------
## print information
#----------------------------------------------
cat("*****Into main of DPMM\n")
cat("*****Data: n, d: ", n, ", ", d, "\n", sep = "")
if(prediction)
cat("*****Prediction: ngrid1, ngrid2: ", ngrid, ", ", ngrid, "\n", sep = "")
else
cat("*****Prediction: FALSE\n")
if(method == "truncated") {
cat("*****Posterior sampling method: Blocked Gibbs Sampling with", nclusters, "clusters\n")
} else {
cat("*****Posterior sampling method: Algorithm 8 with m = 1 in Neal (2000)\n")
}
cat("*****Prior: updateAlpha, useHyperpriors: ", updateAlpha, ", ", useHyperpriors, "\n", sep="")
cat("*****MCMC: nskip, ndpost, keepevery, printevery: ", nskip, ", ", ndpost, ", ", keepevery, ", ", printevery, "\n", sep = "")
if(status)
cat("*****Start a new MCMC...", "\n", sep = "")
else
cat("*****Continue previous MCMC...", "\n", sep = "")
#----------------------------------------------
# set random seed
#----------------------------------------------
set.seed(seed = seed)
#----------------------------------------------
## call Cpp function
#----------------------------------------------
ptm <- proc.time()
if(method == "truncated") {
res = .Call("_BNPqte_cDPMdensity",
n,
d,
y,
status,
diag,
prediction,
ngrid,
updateAlpha,
useHyperpriors,
a0,
b0,
m0,
S0,
gamma1,
gamma2,
nu0,
Psi0,
nu,
nclusters,
nskip,
ndpost,
keepevery,
printevery,
alpha,
lambda,
m,
Psi,
a_gd,
b_gd,
Zeta,
Omega,
lw,
kappa,
grid1,
grid2
)
} else {
res = .Call("_BNPqte_cDPMdensityNeal",
n,
d,
y,
status,
diag,
prediction,
ngrid,
updateAlpha,
useHyperpriors,
a0,
b0,
m0,
S0,
gamma1,
gamma2,
nu0,
Psi0,
nu,
nclusters,
nskip,
ndpost,
keepevery,
printevery,
alpha,
lambda,
m,
Psi,
Zeta,
Omega,
kappa,
grid1,
grid2
)
}
cat("Finished!", "\n")
#----------------------------------------------
# returns
#----------------------------------------------
res$proc.time = proc.time() - ptm
attr(res, 'class') <- 'DPMdensity'
return(res)
}
|
# function [Yraw,yearlab] = transform(ydata,tcode,yearlab)
# %TRANSFORM Transform large dataset to stationarity
# %This code corrects the number of observations lost from transformations
# Yraw = 0*ydata;
# for i=1:size(ydata,2)
# Yraw(:,i) = transx(ydata(:,i),tcode(i));
# end
# end
transform <- function(ydata, tcode, yearlab) {
# TRANSFORM Transform large dataset to stationarity
# This code corrects the number of observations lost from transformations
Yraw <- 0 * ydata
for (i in 1:ncol(ydata)) {
Yraw[ , i] <- transx(ydata[ , i], tcode[i, ])
}
out <- list(Yraw = Yraw, yearlab = yearlab)
return(out)
}
|
/transform.R
|
no_license
|
Ruangoose/Large-TVP-VAR
|
R
| false
| false
| 653
|
r
|
# function [Yraw,yearlab] = transform(ydata,tcode,yearlab)
# %TRANSFORM Transform large dataset to stationarity
# %This code corrects the number of observations lost from transformations
# Yraw = 0*ydata;
# for i=1:size(ydata,2)
# Yraw(:,i) = transx(ydata(:,i),tcode(i));
# end
# end
transform <- function(ydata, tcode, yearlab) {
# TRANSFORM Transform large dataset to stationarity
# This code corrects the number of observations lost from transformations
Yraw <- 0 * ydata
for (i in 1:ncol(ydata)) {
Yraw[ , i] <- transx(ydata[ , i], tcode[i, ])
}
out <- list(Yraw = Yraw, yearlab = yearlab)
return(out)
}
|
library(shiny)
library(shinydashboard)
library(knitr)
library(dplyr)
library(sparkline)
library(highcharter)
library(jsonlite)
library(DT)
library(lazyeval)
library(FSA)
library(rstudioapi)
library(shinyWidgets)
library(shinyBS)
library(tidyr)
library(shinyjs)
library(DBI)
library(leaflet)
library(htmltools)
library(httr)
#Definition de la table qui contient les informations des trackers
Tracker_fleet = data.frame(username = c('wil_trem', 'joh_deg', 'sea_go'))
Tracker_fleet$name = c('William', 'John', 'Sean')
Tracker_fleet$surname = c('Tremendous', 'Degun', 'Gonet')
Tracker_fleet$age = c(23,23,22)
Tracker_fleet$channel_id = c('1180519','1180520', '1198494')
Tracker_fleet$read_key = c('DFMZL2TN2KEC7YTY','W6T1VY3J04MII447','2ZS9GMFT2VKXDO6Y')
Tracker_fleet$write_key = c('1ZZG6G2TPICBV5CG','Z42PSIWFBYOQ75JZ','LWV8JQK8SORIKFPJ')
Tracker_fleet$longitude = c(0, 0,0)
Tracker_fleet$latitude = c(0, 0,0)
Tracker_fleet$pace = c(0, 0,0)
Tracker_fleet$sex = c('M','M','M')
|
/app/global.R
|
no_license
|
castafra/traile
|
R
| false
| false
| 977
|
r
|
library(shiny)
library(shinydashboard)
library(knitr)
library(dplyr)
library(sparkline)
library(highcharter)
library(jsonlite)
library(DT)
library(lazyeval)
library(FSA)
library(rstudioapi)
library(shinyWidgets)
library(shinyBS)
library(tidyr)
library(shinyjs)
library(DBI)
library(leaflet)
library(htmltools)
library(httr)
#Definition de la table qui contient les informations des trackers
Tracker_fleet = data.frame(username = c('wil_trem', 'joh_deg', 'sea_go'))
Tracker_fleet$name = c('William', 'John', 'Sean')
Tracker_fleet$surname = c('Tremendous', 'Degun', 'Gonet')
Tracker_fleet$age = c(23,23,22)
Tracker_fleet$channel_id = c('1180519','1180520', '1198494')
Tracker_fleet$read_key = c('DFMZL2TN2KEC7YTY','W6T1VY3J04MII447','2ZS9GMFT2VKXDO6Y')
Tracker_fleet$write_key = c('1ZZG6G2TPICBV5CG','Z42PSIWFBYOQ75JZ','LWV8JQK8SORIKFPJ')
Tracker_fleet$longitude = c(0, 0,0)
Tracker_fleet$latitude = c(0, 0,0)
Tracker_fleet$pace = c(0, 0,0)
Tracker_fleet$sex = c('M','M','M')
|
#' Specifying hierarchical columns with arguments `pattern` or `by`
#'
#' @name specifying_columns
#'
#' @description
#' Within the `hmatch_` group of functions, there are three ways to specify the
#' hierarchical columns to be matched.
#'
#' In all cases, it is assumed that matched columns are already correctly
#' ordered, with the first matched column reflecting the broadest hierarchical
#' level (lowest-resolution, e.g. country) and the last column reflecting the
#' finest level (highest-resolution, e.g. township).
#'
#' @section (1) All column names common to `raw` and `ref`:
#'
#' If neither `pattern` nor `by` are specified (the default), then the
#' hierarchical columns are assumed to be all column names that are common to
#' both `raw` and `ref`.
#'
#' @section (2) Regex pattern:
#'
#' Arguments `pattern` and `pattern_ref` take regex patterns to match the
#' hierarchical columns in `raw` and `ref`, respectively. Argument `pattern_ref`
#' only needs to be specified if it's different from `pattern` (i.e. if the
#' hierarchical columns have different names in `raw` vs. `ref`).
#'
#' For example, if the hierarchical columns in `raw` are "ADM_1", "ADM_2", and
#' "ADM_3", which correspond respectively to columns within `ref` named
#' "REF_ADM_1", "REF_ADM_2", and "REF_ADM_3", then the pattern arguments can be
#' specified as:
#' - `pattern = "^ADM_[[:digit:]]"`
#' - `pattern_ref = "^REF_ADM_[[:digit:]]"`
#'
#' Alternatively, because `pattern_ref` defaults to the same value as
#' `pattern` (unless otherwise specified), one could specify a single regex pattern
#' that matches the hierarchical columns in both `raw` and `ref`, e.g.
#' - `pattern = "ADM_[[:digit:]]"`
#'
#' However, the user should exercise care to ensure that there are no
#' non-hierarchical columns within `raw` or `ref` that may inadvertently be
#' matched by the given pattern.
#'
#' @section (3) Vector of column names:
#'
#' If the hierarchical columns cannot easily be matched with a regex pattern,
#' one can specify the relevant column names in vector form using arguments `by`
#' and `by_ref`. As with `pattern_ref`, argument `by_ref` only needs to be
#' specified if it's different from `by` (i.e. if the hierarchical columns have
#' different names in `raw` vs. `ref`).
#'
#' For example, if the hierarchical columns in `raw` are "state", "county", and
#' "township", which correspond respectively to columns within `ref` named
#' "admin1", "admin2", and "admin3", then the`by` arguments can be specified
#' with:
#'
#' - `by = c("state", "county", "township")`
#' - `by_ref = c("admin1", "admin2", "admin3")`
#'
NULL
|
/R/doc_specifying_columns.R
|
no_license
|
ntncmch/hmatch
|
R
| false
| false
| 2,622
|
r
|
#' Specifying hierarchical columns with arguments `pattern` or `by`
#'
#' @name specifying_columns
#'
#' @description
#' Within the `hmatch_` group of functions, there are three ways to specify the
#' hierarchical columns to be matched.
#'
#' In all cases, it is assumed that matched columns are already correctly
#' ordered, with the first matched column reflecting the broadest hierarchical
#' level (lowest-resolution, e.g. country) and the last column reflecting the
#' finest level (highest-resolution, e.g. township).
#'
#' @section (1) All column names common to `raw` and `ref`:
#'
#' If neither `pattern` nor `by` are specified (the default), then the
#' hierarchical columns are assumed to be all column names that are common to
#' both `raw` and `ref`.
#'
#' @section (2) Regex pattern:
#'
#' Arguments `pattern` and `pattern_ref` take regex patterns to match the
#' hierarchical columns in `raw` and `ref`, respectively. Argument `pattern_ref`
#' only needs to be specified if it's different from `pattern` (i.e. if the
#' hierarchical columns have different names in `raw` vs. `ref`).
#'
#' For example, if the hierarchical columns in `raw` are "ADM_1", "ADM_2", and
#' "ADM_3", which correspond respectively to columns within `ref` named
#' "REF_ADM_1", "REF_ADM_2", and "REF_ADM_3", then the pattern arguments can be
#' specified as:
#' - `pattern = "^ADM_[[:digit:]]"`
#' - `pattern_ref = "^REF_ADM_[[:digit:]]"`
#'
#' Alternatively, because `pattern_ref` defaults to the same value as
#' `pattern` (unless otherwise specified), one could specify a single regex pattern
#' that matches the hierarchical columns in both `raw` and `ref`, e.g.
#' - `pattern = "ADM_[[:digit:]]"`
#'
#' However, the user should exercise care to ensure that there are no
#' non-hierarchical columns within `raw` or `ref` that may inadvertently be
#' matched by the given pattern.
#'
#' @section (3) Vector of column names:
#'
#' If the hierarchical columns cannot easily be matched with a regex pattern,
#' one can specify the relevant column names in vector form using arguments `by`
#' and `by_ref`. As with `pattern_ref`, argument `by_ref` only needs to be
#' specified if it's different from `by` (i.e. if the hierarchical columns have
#' different names in `raw` vs. `ref`).
#'
#' For example, if the hierarchical columns in `raw` are "state", "county", and
#' "township", which correspond respectively to columns within `ref` named
#' "admin1", "admin2", and "admin3", then the`by` arguments can be specified
#' with:
#'
#' - `by = c("state", "county", "township")`
#' - `by_ref = c("admin1", "admin2", "admin3")`
#'
NULL
|
rm(list=ls())
# values of logical type:
# TRUE, T
# FALSE, F
4 < 5
10 > 100
4 == 5
# logical operators:
# == ... Equal to
# != ... Not equal to
# < ... Less than
# > ... Greater
# <= ... Less or equal to
# >= ... Greater or equal to
# ! ... NOT
# | ... OR
# & ... AND
# isTRUE(var)
result <- 4 < 5
result
typeof(result)
result2 <- !TRUE
result2
result | result2
result & result2
isTRUE(result)
|
/section02/logicaloperators.R
|
no_license
|
AmundsenJunior/r-programming-udemy
|
R
| false
| false
| 403
|
r
|
rm(list=ls())
# values of logical type:
# TRUE, T
# FALSE, F
4 < 5
10 > 100
4 == 5
# logical operators:
# == ... Equal to
# != ... Not equal to
# < ... Less than
# > ... Greater
# <= ... Less or equal to
# >= ... Greater or equal to
# ! ... NOT
# | ... OR
# & ... AND
# isTRUE(var)
result <- 4 < 5
result
typeof(result)
result2 <- !TRUE
result2
result | result2
result & result2
isTRUE(result)
|
# load libraries ----
library(tidyverse) # install.packages('tidyverse')
library(stringr)
library(rgdal)
library(raster)
library(rasterVis)
library(maps)
library(mapproj)
select = dplyr::select
stack = raster::stack
# define functions ----
process_singledir = function(dir_results, dir_simulation, do_csv=T, do_tif=T, do_png=T){
# dir_results = 'G:/Team_Folders/Steph/bsb_2015/2_2_15_FM_bsb_50day_results'
# dir_simulation = 'G:/Team_Folders/Steph/bsb_2015/2_2_15_FM_bsb_50day_simulation'
run = str_replace(basename(dir_results), '_results', '')
# read geodatabase
conn_lns = readOGR(file.path(dir_results, 'output.gdb'), 'Connectivity', verbose=F)
# aggregate across all ToPatchIDs to Gray's Reef (n=4)
conn_tbl = conn_lns@data %>%
as_tibble() %>%
group_by(FromPatchID) %>%
summarize(
quantity = sum(Quantity)) %>%
ungroup() %>%
mutate(
percent = quantity / sum(quantity) * 100) %>%
arrange(desc(percent))
# write to csv
if(do_csv){
write_csv(conn_tbl, sprintf('%s/connectivity.csv', dir_results))
}
# get patch id raster, and determine which cells are NA
r_id = raster(sprintf('%s/PatchData/patch_ids', dir_simulation)) # plot(r_id)
id_NA = !getValues(r_id) %in% conn_tbl$FromPatchID
# create rasters for quantity and percent
for (v in c('quantity','percent')){
# reclassify from patch id to value
r = reclassify(r_id, conn_tbl[,c('FromPatchID', v)])
# set patch ids without a value to NA
r[id_NA] = NA
# write to GeoTIFF
if(do_tif){
writeRaster(r, sprintf('%s/%s.tif', dir_results, v), overwrite=T)
}
# plot to PNG for easy preview
if (do_png){
png(sprintf('%s/%s.png', dir_results, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', run, v))
print(p)
dev.off()
}
}
}
process_sppyr_dirs = function(dir_sppyr, ...){
# process all model runs for given species & year
dirs_results = list.files(dir_sppyr, '.*_results$', full.names=T)
for (i in 1:length(dirs_results)){
dir_results = dirs_results[i]
dir_simulation = str_replace(dir_results, '_results', '_simulation')
cat(sprintf('%03d of %d: %s\n', i, length(dirs_results), basename(dir_results)))
# process from geodatabase to results csv, tifs, pngs
process_singledir(dir_results, dir_simulation, ...)
}
}
summarize_sppyr = function(dir_sppyr){
dirs_results = list.files(dir_sppyr, '.*_results$', full.names=T)
rasters_quantity = sprintf('%s/quantity.tif', dirs_results)
stack_quantity = stack(rasters_quantity)
r_mean = mean(stack_quantity, na.rm=T)
r_sd = calc(stack_quantity, fun=function(x) sd(x, na.rm=T))
r_cv = r_sd / r_mean * 100
for (v in c('mean','cv')){
r = get(sprintf('r_%s',v))
# write to GeoTIFF
writeRaster(r, sprintf('%s/%s.tif', dir_sppyr, v), overwrite=T)
# plot to PNG for easy preview
png(sprintf('%s/%s.png', dir_sppyr, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', basename(dir_sppyr), v))
print(p)
dev.off()
}
}
summarize_spp = function(dir_root, sp){
# given top-level directory and species code, eg "sp" or "rs" or "bsb",
# summarize sp_yr/mean.tif across years as sp/mean.tif and sp/cv.tif,
# ie average dispersal across year means and variation across year means
# dir_root = 'G:/Team_Folders/Steph'; sp='bsb'
dirs_results = list.files(dir_root, sprintf('%s_[0-9]{4}$', sp), full.names=T)
rasters_mean = sprintf('%s/mean.tif', dirs_results)
stack_mean = stack(rasters_mean)
dir_sp = file.path(dir_root, sp)
if (!file.exists(dir_sp)) dir.create(dir_sp)
r_mean = mean(stack_mean, na.rm=T)
r_sd = calc(stack_mean, fun=function(x) sd(x, na.rm=T))
r_cv = r_sd / r_mean * 100
for (v in c('mean','cv')){
r = get(sprintf('r_%s',v))
# write to GeoTIFF
writeRaster(r, sprintf('%s/%s.tif', dir_sp, v), overwrite=T)
# plot to PNG for easy preview
png(sprintf('%s/%s.png', dir_sp, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', basename(dir_sp), v))
print(p)
dev.off()
}
}
#summarize_spp('G:/Team_Folders/Steph', sp='bsb')
for (sp in c('bsb','gg','rs','sp')){
summarize_spp('G:/Team_Folders/Steph', sp)
}
summarize_spp = function(dir_root='G:/Team_Folders/Steph', spp=c('bsb','gg','rs','sp')){
# given top-level directory and species code, eg "sp" or "rs" or "bsb",
# summarize sp_yr/mean.tif across years as sp/mean.tif and sp/cv.tif,
# ie average dispersal across year means and variation across year means
# dir_root = 'G:/Team_Folders/Steph'; sp='bsb'
dirs_results = file.path(dir_root, spp)
rasters_mean = sprintf('%s/mean.tif', dirs_results)
stack_mean = stack(rasters_mean)
dir_spp = file.path(dir_root, '_allspp')
if (!file.exists(dir_spp)) dir.create(dir_spp)
r_mean = mean(stack_mean, na.rm=T)
r_sd = calc(stack_mean, fun=function(x) sd(x, na.rm=T))
r_cv = r_sd / r_mean * 100
for (v in c('mean','cv')){
r = get(sprintf('r_%s',v))
# write to GeoTIFF
writeRaster(r, sprintf('%s/%s.tif', dir_spp, v), overwrite=T)
# plot to PNG for easy preview
png(sprintf('%s/%s.png', dir_spp, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', basename(dir_spp), v))
print(p)
dev.off()
}
}
summarize_spp(dir_root='G:/Team_Folders/Steph', spp=c('bsb','gg','rs','sp'))
####Processing for mortality because it has a differently named geodatabase----
process_singledir = function(dir_results, dir_simulation, do_csv=T, do_tif=T, do_png=T){
# dir_results = 'G:/Team_Folders/Steph/bsb_2015/2_2_15_FM_bsb_50day_results'
# dir_simulation = 'G:/Team_Folders/Steph/bsb_2015/2_2_15_FM_bsb_50day_simulation'
run = str_replace(basename(dir_results), '_results', '')
# read geodatabase
conn_lns = readOGR(file.path(dir_results, 'mortality_0.1_A.gdb'), 'Connectivity', verbose=F)
# aggregate across all ToPatchIDs to Gray's Reef (n=4)
conn_tbl = conn_lns@data %>%
as_tibble() %>%
group_by(FromPatchID) %>%
summarize(
quantity = sum(Quantity)) %>%
ungroup() %>%
mutate(
percent = quantity / sum(quantity) * 100) %>%
arrange(desc(percent))
# write to csv
if(do_csv){
write_csv(conn_tbl, sprintf('%s/connectivity.csv', dir_results))
}
# get patch id raster, and determine which cells are NA
r_id = raster(sprintf('%s/PatchData/patch_ids', dir_simulation)) # plot(r_id)
id_NA = !getValues(r_id) %in% conn_tbl$FromPatchID
# create rasters for quantity and percent
for (v in c('quantity','percent')){
# reclassify from patch id to value
r = reclassify(r_id, conn_tbl[,c('FromPatchID', v)])
# set patch ids without a value to NA
r[id_NA] = NA
# write to GeoTIFF
if(do_tif){
writeRaster(r, sprintf('%s/%s.tif', dir_results, v), overwrite=T)
}
# plot to PNG for easy preview
if (do_png){
png(sprintf('%s/%s.png', dir_results, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', run, v))
print(p)
dev.off()
}
}
}
##area maps----
library(tidyverse)
library(raster)
library(plotly)
r = raster('G:/Team_Folders/Steph/bsb/mean.tif')
d = data_frame(
quantity = raster::getValues(r),
cellid = 1:length(quantity),
area_km2 = 8)
d2 = d %>%
filter(!is.na(quantity)) %>%
arrange(desc(quantity)) %>%
mutate(
pct_quantity = quantity/sum(quantity)*100,
cum_pct_quantity = cumsum(quantity/sum(quantity)*100),
cum_area_km2 = cumsum(area_km2))
tail(d2) # 7208 km2
tail(d2$cum_area_km2, 1) # 7208 km2
d3 = d %>%
left_join(d2, by='cellid')
summary(d3)
r2 = setValues(r, d3$cum_pct_quantity)
plot(r2)
x <- rasterToContour(r2, levels=c(10,30,50,80))
x
rgdal::writeOGR(x, "G:/Team_Folders/Steph/contours", layer="contour_bsb_mean", driver="ESRI Shapefile")
plot(r2, col='Spectral')
plot(x, add=TRUE)
library(leaflet)
binpal <- colorBin("Spectral", seq(0,100), 10, pretty = FALSE, na.color = "transparent")
leaflet() %>%
addTiles() %>%
addProviderTiles('Esri.OceanBasemap') %>%
addRasterImage(r2, colors = binpal, opacity = 0.6) %>%
addLegend(
pal = binpal, values = seq(0,100),
title = "cum % larvae")
d_30 = d2 %>% filter(cum_pct_quantity >= 30) %>% head(1)
plot(r)
p = ggplot(d2, aes(y=cum_pct_quantity, x=cum_area_km2)) +
geom_point() +
geom_segment(x=0, xend=d_30$cum_area_km2, y=d_30$cum_pct_quantity, yend=d_30$cum_pct_quantity) +
geom_segment(x=d_30$cum_area_km2, xend=d_30$cum_area_km2, y=0, yend=d_30$cum_pct_quantity) +
scale_y_continuous(expand = c(0,0)) + scale_x_continuous(expand = c(0,0))
#coord_cartesian(xlim = c(0, tail(d$cum_area_km2, 1)), ylim = c(0, 100))
print(p)
ggplot2::ggsave('test.png', p)
ggplotly(p)
plot(r)
# todo ----
# for (dir in c('sp_2009','sp_2010','sp_2011','sp_2012', 'sp_2013', 'sp_2014', 'sp_2015')){
# summarize_sppyr('G:/Team_Folders/Steph/sp_2009')
# }
# - create github.com/graysreef organization
# - create R package inside github.com/graysreef/mget-conn-process repository
# using http://ucsb-bren.github.io/env-info/wk07_package.html
# - create Dan's plot: x) cumulative percent larvel input vs y) area of included ranked patches
#aggregate csvs ----
path <- 'G:/Team_Folders/Steph/rs_2015'
setwd(path)
my.dirs <- dir(pattern = "results", include.dirs = T)
for (i in 1:length(my.dirs)){
file <- paste0("./",my.dirs[i], "/connectivity.csv")
print(file)
my.csv <- read.csv(file)
}
# done ----
# process_geodb(
# 'G:/Team_Folders/Steph/bsb_2015/5_4_15_FM_bsb_50day_results',
# 'G:/Team_Folders/Steph/bsb_2015/5_4_15_FM_bsb_50day_simulation')
#process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2015', do_csv=F, do_tif=F, do_png=T)
#summarize_sppyr('G:/Team_Folders/Steph/bsb_2015')
##sensitivities
process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2009_diffusivity')
process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2009_mortality')
# processed speices per Individual year----
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2009')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2010')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2011')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2012')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2013')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2014')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2015')
#
# summarize_sppyr('G:/Team_Folders/Steph/gg_2009')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2010')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2011')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2012')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2013')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2014')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2015')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2009')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2010')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2011')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2012')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2013')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2014')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2015')
#
# summarize_sppyr('G:/Team_Folders/Steph/sp_2009')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2010')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2011')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2012')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2013')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2014')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2015')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2009')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2010')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2011')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2012')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2013')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2014')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2015')
#
# summarize_sppyr('G:/Team_Folders/Steph/rs_2009')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2010')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2011')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2012')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2013')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2014')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2015')
#
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2009')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2009_all')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2010')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2011')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2012')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2012_all')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2013')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2014')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2015')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2015_all')
#
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2009')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2009_all')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2010')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2011')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2012')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2012_all')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2013')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2014')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2015')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2015_all')
|
/process_results.R
|
no_license
|
bbest/graysreef
|
R
| false
| false
| 13,480
|
r
|
# load libraries ----
library(tidyverse) # install.packages('tidyverse')
library(stringr)
library(rgdal)
library(raster)
library(rasterVis)
library(maps)
library(mapproj)
select = dplyr::select
stack = raster::stack
# define functions ----
process_singledir = function(dir_results, dir_simulation, do_csv=T, do_tif=T, do_png=T){
# dir_results = 'G:/Team_Folders/Steph/bsb_2015/2_2_15_FM_bsb_50day_results'
# dir_simulation = 'G:/Team_Folders/Steph/bsb_2015/2_2_15_FM_bsb_50day_simulation'
run = str_replace(basename(dir_results), '_results', '')
# read geodatabase
conn_lns = readOGR(file.path(dir_results, 'output.gdb'), 'Connectivity', verbose=F)
# aggregate across all ToPatchIDs to Gray's Reef (n=4)
conn_tbl = conn_lns@data %>%
as_tibble() %>%
group_by(FromPatchID) %>%
summarize(
quantity = sum(Quantity)) %>%
ungroup() %>%
mutate(
percent = quantity / sum(quantity) * 100) %>%
arrange(desc(percent))
# write to csv
if(do_csv){
write_csv(conn_tbl, sprintf('%s/connectivity.csv', dir_results))
}
# get patch id raster, and determine which cells are NA
r_id = raster(sprintf('%s/PatchData/patch_ids', dir_simulation)) # plot(r_id)
id_NA = !getValues(r_id) %in% conn_tbl$FromPatchID
# create rasters for quantity and percent
for (v in c('quantity','percent')){
# reclassify from patch id to value
r = reclassify(r_id, conn_tbl[,c('FromPatchID', v)])
# set patch ids without a value to NA
r[id_NA] = NA
# write to GeoTIFF
if(do_tif){
writeRaster(r, sprintf('%s/%s.tif', dir_results, v), overwrite=T)
}
# plot to PNG for easy preview
if (do_png){
png(sprintf('%s/%s.png', dir_results, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', run, v))
print(p)
dev.off()
}
}
}
process_sppyr_dirs = function(dir_sppyr, ...){
# process all model runs for given species & year
dirs_results = list.files(dir_sppyr, '.*_results$', full.names=T)
for (i in 1:length(dirs_results)){
dir_results = dirs_results[i]
dir_simulation = str_replace(dir_results, '_results', '_simulation')
cat(sprintf('%03d of %d: %s\n', i, length(dirs_results), basename(dir_results)))
# process from geodatabase to results csv, tifs, pngs
process_singledir(dir_results, dir_simulation, ...)
}
}
summarize_sppyr = function(dir_sppyr){
dirs_results = list.files(dir_sppyr, '.*_results$', full.names=T)
rasters_quantity = sprintf('%s/quantity.tif', dirs_results)
stack_quantity = stack(rasters_quantity)
r_mean = mean(stack_quantity, na.rm=T)
r_sd = calc(stack_quantity, fun=function(x) sd(x, na.rm=T))
r_cv = r_sd / r_mean * 100
for (v in c('mean','cv')){
r = get(sprintf('r_%s',v))
# write to GeoTIFF
writeRaster(r, sprintf('%s/%s.tif', dir_sppyr, v), overwrite=T)
# plot to PNG for easy preview
png(sprintf('%s/%s.png', dir_sppyr, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', basename(dir_sppyr), v))
print(p)
dev.off()
}
}
summarize_spp = function(dir_root, sp){
# given top-level directory and species code, eg "sp" or "rs" or "bsb",
# summarize sp_yr/mean.tif across years as sp/mean.tif and sp/cv.tif,
# ie average dispersal across year means and variation across year means
# dir_root = 'G:/Team_Folders/Steph'; sp='bsb'
dirs_results = list.files(dir_root, sprintf('%s_[0-9]{4}$', sp), full.names=T)
rasters_mean = sprintf('%s/mean.tif', dirs_results)
stack_mean = stack(rasters_mean)
dir_sp = file.path(dir_root, sp)
if (!file.exists(dir_sp)) dir.create(dir_sp)
r_mean = mean(stack_mean, na.rm=T)
r_sd = calc(stack_mean, fun=function(x) sd(x, na.rm=T))
r_cv = r_sd / r_mean * 100
for (v in c('mean','cv')){
r = get(sprintf('r_%s',v))
# write to GeoTIFF
writeRaster(r, sprintf('%s/%s.tif', dir_sp, v), overwrite=T)
# plot to PNG for easy preview
png(sprintf('%s/%s.png', dir_sp, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', basename(dir_sp), v))
print(p)
dev.off()
}
}
#summarize_spp('G:/Team_Folders/Steph', sp='bsb')
for (sp in c('bsb','gg','rs','sp')){
summarize_spp('G:/Team_Folders/Steph', sp)
}
summarize_spp = function(dir_root='G:/Team_Folders/Steph', spp=c('bsb','gg','rs','sp')){
# given top-level directory and species code, eg "sp" or "rs" or "bsb",
# summarize sp_yr/mean.tif across years as sp/mean.tif and sp/cv.tif,
# ie average dispersal across year means and variation across year means
# dir_root = 'G:/Team_Folders/Steph'; sp='bsb'
dirs_results = file.path(dir_root, spp)
rasters_mean = sprintf('%s/mean.tif', dirs_results)
stack_mean = stack(rasters_mean)
dir_spp = file.path(dir_root, '_allspp')
if (!file.exists(dir_spp)) dir.create(dir_spp)
r_mean = mean(stack_mean, na.rm=T)
r_sd = calc(stack_mean, fun=function(x) sd(x, na.rm=T))
r_cv = r_sd / r_mean * 100
for (v in c('mean','cv')){
r = get(sprintf('r_%s',v))
# write to GeoTIFF
writeRaster(r, sprintf('%s/%s.tif', dir_spp, v), overwrite=T)
# plot to PNG for easy preview
png(sprintf('%s/%s.png', dir_spp, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', basename(dir_spp), v))
print(p)
dev.off()
}
}
summarize_spp(dir_root='G:/Team_Folders/Steph', spp=c('bsb','gg','rs','sp'))
####Processing for mortality because it has a differently named geodatabase----
process_singledir = function(dir_results, dir_simulation, do_csv=T, do_tif=T, do_png=T){
# dir_results = 'G:/Team_Folders/Steph/bsb_2015/2_2_15_FM_bsb_50day_results'
# dir_simulation = 'G:/Team_Folders/Steph/bsb_2015/2_2_15_FM_bsb_50day_simulation'
run = str_replace(basename(dir_results), '_results', '')
# read geodatabase
conn_lns = readOGR(file.path(dir_results, 'mortality_0.1_A.gdb'), 'Connectivity', verbose=F)
# aggregate across all ToPatchIDs to Gray's Reef (n=4)
conn_tbl = conn_lns@data %>%
as_tibble() %>%
group_by(FromPatchID) %>%
summarize(
quantity = sum(Quantity)) %>%
ungroup() %>%
mutate(
percent = quantity / sum(quantity) * 100) %>%
arrange(desc(percent))
# write to csv
if(do_csv){
write_csv(conn_tbl, sprintf('%s/connectivity.csv', dir_results))
}
# get patch id raster, and determine which cells are NA
r_id = raster(sprintf('%s/PatchData/patch_ids', dir_simulation)) # plot(r_id)
id_NA = !getValues(r_id) %in% conn_tbl$FromPatchID
# create rasters for quantity and percent
for (v in c('quantity','percent')){
# reclassify from patch id to value
r = reclassify(r_id, conn_tbl[,c('FromPatchID', v)])
# set patch ids without a value to NA
r[id_NA] = NA
# write to GeoTIFF
if(do_tif){
writeRaster(r, sprintf('%s/%s.tif', dir_results, v), overwrite=T)
}
# plot to PNG for easy preview
if (do_png){
png(sprintf('%s/%s.png', dir_results, v))
p = levelplot(r, par.settings=viridisTheme, main=sprintf('%s %s', run, v))
print(p)
dev.off()
}
}
}
##area maps----
library(tidyverse)
library(raster)
library(plotly)
r = raster('G:/Team_Folders/Steph/bsb/mean.tif')
d = data_frame(
quantity = raster::getValues(r),
cellid = 1:length(quantity),
area_km2 = 8)
d2 = d %>%
filter(!is.na(quantity)) %>%
arrange(desc(quantity)) %>%
mutate(
pct_quantity = quantity/sum(quantity)*100,
cum_pct_quantity = cumsum(quantity/sum(quantity)*100),
cum_area_km2 = cumsum(area_km2))
tail(d2) # 7208 km2
tail(d2$cum_area_km2, 1) # 7208 km2
d3 = d %>%
left_join(d2, by='cellid')
summary(d3)
r2 = setValues(r, d3$cum_pct_quantity)
plot(r2)
x <- rasterToContour(r2, levels=c(10,30,50,80))
x
rgdal::writeOGR(x, "G:/Team_Folders/Steph/contours", layer="contour_bsb_mean", driver="ESRI Shapefile")
plot(r2, col='Spectral')
plot(x, add=TRUE)
library(leaflet)
binpal <- colorBin("Spectral", seq(0,100), 10, pretty = FALSE, na.color = "transparent")
leaflet() %>%
addTiles() %>%
addProviderTiles('Esri.OceanBasemap') %>%
addRasterImage(r2, colors = binpal, opacity = 0.6) %>%
addLegend(
pal = binpal, values = seq(0,100),
title = "cum % larvae")
d_30 = d2 %>% filter(cum_pct_quantity >= 30) %>% head(1)
plot(r)
p = ggplot(d2, aes(y=cum_pct_quantity, x=cum_area_km2)) +
geom_point() +
geom_segment(x=0, xend=d_30$cum_area_km2, y=d_30$cum_pct_quantity, yend=d_30$cum_pct_quantity) +
geom_segment(x=d_30$cum_area_km2, xend=d_30$cum_area_km2, y=0, yend=d_30$cum_pct_quantity) +
scale_y_continuous(expand = c(0,0)) + scale_x_continuous(expand = c(0,0))
#coord_cartesian(xlim = c(0, tail(d$cum_area_km2, 1)), ylim = c(0, 100))
print(p)
ggplot2::ggsave('test.png', p)
ggplotly(p)
plot(r)
# todo ----
# for (dir in c('sp_2009','sp_2010','sp_2011','sp_2012', 'sp_2013', 'sp_2014', 'sp_2015')){
# summarize_sppyr('G:/Team_Folders/Steph/sp_2009')
# }
# - create github.com/graysreef organization
# - create R package inside github.com/graysreef/mget-conn-process repository
# using http://ucsb-bren.github.io/env-info/wk07_package.html
# - create Dan's plot: x) cumulative percent larvel input vs y) area of included ranked patches
#aggregate csvs ----
path <- 'G:/Team_Folders/Steph/rs_2015'
setwd(path)
my.dirs <- dir(pattern = "results", include.dirs = T)
for (i in 1:length(my.dirs)){
file <- paste0("./",my.dirs[i], "/connectivity.csv")
print(file)
my.csv <- read.csv(file)
}
# done ----
# process_geodb(
# 'G:/Team_Folders/Steph/bsb_2015/5_4_15_FM_bsb_50day_results',
# 'G:/Team_Folders/Steph/bsb_2015/5_4_15_FM_bsb_50day_simulation')
#process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2015', do_csv=F, do_tif=F, do_png=T)
#summarize_sppyr('G:/Team_Folders/Steph/bsb_2015')
##sensitivities
process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2009_diffusivity')
process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2009_mortality')
# processed speices per Individual year----
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2009')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2010')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2011')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2012')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2013')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2014')
# process_sppyr_dirs('G:/Team_Folders/Steph/gg_2015')
#
# summarize_sppyr('G:/Team_Folders/Steph/gg_2009')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2010')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2011')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2012')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2013')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2014')
# summarize_sppyr('G:/Team_Folders/Steph/gg_2015')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2009')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2010')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2011')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2012')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2013')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2014')
# process_sppyr_dirs('G:/Team_Folders/Steph/sp_2015')
#
# summarize_sppyr('G:/Team_Folders/Steph/sp_2009')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2010')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2011')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2012')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2013')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2014')
# summarize_sppyr('G:/Team_Folders/Steph/sp_2015')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2009')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2010')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2011')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2012')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2013')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2014')
# process_sppyr_dirs('G:/Team_Folders/Steph/rs_2015')
#
# summarize_sppyr('G:/Team_Folders/Steph/rs_2009')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2010')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2011')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2012')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2013')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2014')
# summarize_sppyr('G:/Team_Folders/Steph/rs_2015')
#
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2009')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2009_all')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2010')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2011')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2012')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2012_all')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2013')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2014')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2015')
# process_sppyr_dirs('G:/Team_Folders/Steph/bsb_2015_all')
#
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2009')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2009_all')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2010')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2011')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2012')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2012_all')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2013')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2014')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2015')
# summarize_sppyr('G:/Team_Folders/Steph/bsb_2015_all')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bertin.r
\name{bertinCluster}
\alias{bertinCluster}
\title{Bertin display with corresponding cluster analysis.}
\usage{
bertinCluster(
x,
dmethod = c("euclidean", "euclidean"),
cmethod = c("ward", "ward"),
p = c(2, 2),
align = TRUE,
trim = NA,
type = c("triangle"),
xsegs = c(0, 0.2, 0.7, 0.9, 1),
ysegs = c(0, 0.1, 0.7, 1),
x.off = 0.01,
y.off = 0.01,
cex.axis = 0.6,
col.axis = grey(0.4),
draw.axis = TRUE,
...
)
}
\arguments{
\item{x}{\code{repgrid} object.}
\item{dmethod}{The distance measure to be used. This must be one of
\code{"euclidean"}, \code{"maximum"}, \code{"manhattan"},
\code{"canberra"}, \code{"binary"}, or \code{"minkowski"}.
Default is \code{"euclidean"}.
Any unambiguous substring can be given (e.g. \code{"euc"}
for \code{"euclidean"}).
A vector of length two can be passed if a different distance measure for
constructs and elements is wanted (e.g.\code{c("euclidean", "manhattan")}).
This will apply euclidean distance to the constructs and
manhattan distance to the elements.
For additional information on the different types see
\code{?dist}.}
\item{cmethod}{The agglomeration method to be used. This should be (an
unambiguous abbreviation of) one of \code{"ward"},
\code{"single"}, \code{"complete"}, \code{"average"},
\code{"mcquitty"}, \code{"median"} or \code{"centroid"}.
Default is \code{"ward"}.
A vector of length two can be passed if a different cluster method for
constructs and elements is wanted (e.g.\code{c("ward", "euclidean")}).
This will apply ward clustering to the constructs and
single linkage clustering to the elements. If only one of either
constructs or elements is to be clustered the value \code{NA}
can be supplied. E.g. to cluster elements only use \code{c(NA, "ward")}.}
\item{p}{The power of the Minkowski distance, in case \code{"minkowski"}
is used as argument for \code{dmethod}. \code{p} can be a vector
of length two if different powers are wanted for constructs and
elements respectively (e.g. \code{c(2,1)}).}
\item{align}{Whether the constructs should be aligned before clustering
(default is \code{TRUE}). If not, the grid matrix is clustered
as is. See Details section in function \code{\link{cluster}} for more information.}
\item{trim}{The number of characters a construct is trimmed to (default is
\code{10}). If \code{NA} no trimming is done. Trimming
simply saves space when displaying the output.}
\item{type}{Type of dendrogram. Either or \code{"triangle"} (default)
or \code{"rectangle"} form.}
\item{xsegs}{Numeric vector of normal device coordinates (ndc i.e. 0 to 1) to mark
the widths of the regions for the left labels, for the
bertin display, for the right labels and for the
vertical dendrogram (i.e. for the constructs).}
\item{ysegs}{Numeric vector of normal device coordinates (ndc i.e. 0 to 1) to mark
the heights of the regions for the horizontal dendrogram
(i.e. for the elements), for the bertin display and for
the element names.}
\item{x.off}{Horizontal offset between construct labels and construct dendrogram and
(default is \code{0.01} in normal device coordinates).}
\item{y.off}{Vertical offset between bertin display and element dendrogram and
(default is \code{0.01} in normal device coordinates).}
\item{cex.axis}{\code{cex} for axis labels, default is \code{.6}.}
\item{col.axis}{Color for axis and axis labels, default is \code{grey(.4)}.}
\item{draw.axis}{Whether to draw axis showing the distance metric for the dendrograms
(default is \code{TRUE}).}
\item{...}{additional parameters to be passed to function \code{\link{bertin}}.}
}
\value{
A list of two \code{\link{hclust}} object, for elements and constructs
respectively.
}
\description{
Element columns and
constructs rows are ordered according to cluster criterion. Various
distance measures as well as cluster methods are supported.
}
\examples{
\dontrun{
# default is euclidean distance and ward clustering
bertinCluster(bell2010)
### applying different distance measures and cluster methods
# euclidean distance and single linkage clustering
bertinCluster(bell2010, cmethod="single")
# manhattan distance and single linkage clustering
bertinCluster(bell2010, dmethod="manhattan", cm="single")
# minkowksi distance with power of 2 = euclidean distance
bertinCluster(bell2010, dm="mink", p=2)
### using different methods for constructs and elements
# ward clustering for constructs, single linkage for elements
bertinCluster(bell2010, cmethod=c("ward", "single"))
# euclidean distance measure for constructs, manhatten
# distance for elements
bertinCluster(bell2010, dmethod=c("euclidean", "man"))
# minkowski metric with different powers for constructs and elements
bertinCluster(bell2010, dmethod="mink", p=c(2,1)))
### clustering either constructs or elements only
# euclidean distance and ward clustering for constructs no
# clustering for elements
bertinCluster(bell2010, cmethod=c("ward", NA))
# euclidean distance and single linkage clustering for elements
# no clustering for constructs
bertinCluster(bell2010, cm=c(NA, "single"))
### changing the appearance
# different dendrogram type
bertinCluster(bell2010, type="rectangle")
# no axis drawn for dendrogram
bertinCluster(bell2010, draw.axis=F)
### passing on arguments to bertin function via ...
# grey cell borders in bertin display
bertinCluster(bell2010, border="grey")
# omit printing of grid scores, i.e. colors only
bertinCluster(bell2010, showvalues=FALSE)
### changing the layout
# making the vertical dendrogram bigger
bertinCluster(bell2010, xsegs=c(0, .2, .5, .7, 1))
# making the horizontal dendrogram bigger
bertinCluster(bell2010, ysegs=c(0, .3, .8, 1))
}
}
\seealso{
\code{\link{cluster}}
}
|
/man/bertinCluster.Rd
|
no_license
|
cran/OpenRepGrid
|
R
| false
| true
| 6,318
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bertin.r
\name{bertinCluster}
\alias{bertinCluster}
\title{Bertin display with corresponding cluster analysis.}
\usage{
bertinCluster(
x,
dmethod = c("euclidean", "euclidean"),
cmethod = c("ward", "ward"),
p = c(2, 2),
align = TRUE,
trim = NA,
type = c("triangle"),
xsegs = c(0, 0.2, 0.7, 0.9, 1),
ysegs = c(0, 0.1, 0.7, 1),
x.off = 0.01,
y.off = 0.01,
cex.axis = 0.6,
col.axis = grey(0.4),
draw.axis = TRUE,
...
)
}
\arguments{
\item{x}{\code{repgrid} object.}
\item{dmethod}{The distance measure to be used. This must be one of
\code{"euclidean"}, \code{"maximum"}, \code{"manhattan"},
\code{"canberra"}, \code{"binary"}, or \code{"minkowski"}.
Default is \code{"euclidean"}.
Any unambiguous substring can be given (e.g. \code{"euc"}
for \code{"euclidean"}).
A vector of length two can be passed if a different distance measure for
constructs and elements is wanted (e.g.\code{c("euclidean", "manhattan")}).
This will apply euclidean distance to the constructs and
manhattan distance to the elements.
For additional information on the different types see
\code{?dist}.}
\item{cmethod}{The agglomeration method to be used. This should be (an
unambiguous abbreviation of) one of \code{"ward"},
\code{"single"}, \code{"complete"}, \code{"average"},
\code{"mcquitty"}, \code{"median"} or \code{"centroid"}.
Default is \code{"ward"}.
A vector of length two can be passed if a different cluster method for
constructs and elements is wanted (e.g.\code{c("ward", "euclidean")}).
This will apply ward clustering to the constructs and
single linkage clustering to the elements. If only one of either
constructs or elements is to be clustered the value \code{NA}
can be supplied. E.g. to cluster elements only use \code{c(NA, "ward")}.}
\item{p}{The power of the Minkowski distance, in case \code{"minkowski"}
is used as argument for \code{dmethod}. \code{p} can be a vector
of length two if different powers are wanted for constructs and
elements respectively (e.g. \code{c(2,1)}).}
\item{align}{Whether the constructs should be aligned before clustering
(default is \code{TRUE}). If not, the grid matrix is clustered
as is. See Details section in function \code{\link{cluster}} for more information.}
\item{trim}{The number of characters a construct is trimmed to (default is
\code{10}). If \code{NA} no trimming is done. Trimming
simply saves space when displaying the output.}
\item{type}{Type of dendrogram. Either or \code{"triangle"} (default)
or \code{"rectangle"} form.}
\item{xsegs}{Numeric vector of normal device coordinates (ndc i.e. 0 to 1) to mark
the widths of the regions for the left labels, for the
bertin display, for the right labels and for the
vertical dendrogram (i.e. for the constructs).}
\item{ysegs}{Numeric vector of normal device coordinates (ndc i.e. 0 to 1) to mark
the heights of the regions for the horizontal dendrogram
(i.e. for the elements), for the bertin display and for
the element names.}
\item{x.off}{Horizontal offset between construct labels and construct dendrogram and
(default is \code{0.01} in normal device coordinates).}
\item{y.off}{Vertical offset between bertin display and element dendrogram and
(default is \code{0.01} in normal device coordinates).}
\item{cex.axis}{\code{cex} for axis labels, default is \code{.6}.}
\item{col.axis}{Color for axis and axis labels, default is \code{grey(.4)}.}
\item{draw.axis}{Whether to draw axis showing the distance metric for the dendrograms
(default is \code{TRUE}).}
\item{...}{additional parameters to be passed to function \code{\link{bertin}}.}
}
\value{
A list of two \code{\link{hclust}} object, for elements and constructs
respectively.
}
\description{
Element columns and
constructs rows are ordered according to cluster criterion. Various
distance measures as well as cluster methods are supported.
}
\examples{
\dontrun{
# default is euclidean distance and ward clustering
bertinCluster(bell2010)
### applying different distance measures and cluster methods
# euclidean distance and single linkage clustering
bertinCluster(bell2010, cmethod="single")
# manhattan distance and single linkage clustering
bertinCluster(bell2010, dmethod="manhattan", cm="single")
# minkowksi distance with power of 2 = euclidean distance
bertinCluster(bell2010, dm="mink", p=2)
### using different methods for constructs and elements
# ward clustering for constructs, single linkage for elements
bertinCluster(bell2010, cmethod=c("ward", "single"))
# euclidean distance measure for constructs, manhatten
# distance for elements
bertinCluster(bell2010, dmethod=c("euclidean", "man"))
# minkowski metric with different powers for constructs and elements
bertinCluster(bell2010, dmethod="mink", p=c(2,1)))
### clustering either constructs or elements only
# euclidean distance and ward clustering for constructs no
# clustering for elements
bertinCluster(bell2010, cmethod=c("ward", NA))
# euclidean distance and single linkage clustering for elements
# no clustering for constructs
bertinCluster(bell2010, cm=c(NA, "single"))
### changing the appearance
# different dendrogram type
bertinCluster(bell2010, type="rectangle")
# no axis drawn for dendrogram
bertinCluster(bell2010, draw.axis=F)
### passing on arguments to bertin function via ...
# grey cell borders in bertin display
bertinCluster(bell2010, border="grey")
# omit printing of grid scores, i.e. colors only
bertinCluster(bell2010, showvalues=FALSE)
### changing the layout
# making the vertical dendrogram bigger
bertinCluster(bell2010, xsegs=c(0, .2, .5, .7, 1))
# making the horizontal dendrogram bigger
bertinCluster(bell2010, ysegs=c(0, .3, .8, 1))
}
}
\seealso{
\code{\link{cluster}}
}
|
#Exploratory Data Analysis:-
# install.packages("tidyverse")
# install.packages("funModeling")
# install.packages("Hmisc")
library(funModeling)
library(tidyverse)
library(Hmisc)
dirty_csv = read.csv(file.choose())
view(dirty_csv)
dim(dirty_csv)
#Observing the data and looking at its summary
glimpse(dirty_csv)
status(dirty_csv)
#Analyzing categorical variables
freq(dirty_csv)
#Analyzing numerical variable Graphically
plot_num(dirty_csv)
#Analyzing numerical variable Quantitatively
data_prof=profiling_num(dirty_csv)
#Analyzing numerical and categorical data at the same time
describe(dirty_csv)
|
/EDA.R
|
no_license
|
shivmistry605/MAST90106-Data-Science-Project-Group-3
|
R
| false
| false
| 646
|
r
|
#Exploratory Data Analysis:-
# install.packages("tidyverse")
# install.packages("funModeling")
# install.packages("Hmisc")
library(funModeling)
library(tidyverse)
library(Hmisc)
dirty_csv = read.csv(file.choose())
view(dirty_csv)
dim(dirty_csv)
#Observing the data and looking at its summary
glimpse(dirty_csv)
status(dirty_csv)
#Analyzing categorical variables
freq(dirty_csv)
#Analyzing numerical variable Graphically
plot_num(dirty_csv)
#Analyzing numerical variable Quantitatively
data_prof=profiling_num(dirty_csv)
#Analyzing numerical and categorical data at the same time
describe(dirty_csv)
|
makeCacheMatrix <- function(y = matrix()) {
inv <- NULL
set <- function(z) {
y <<- z
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(y, ...) {
## Return a matrix that is the inverse of 'x'
inv <- y$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- y$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
adithyap17067/ProgrammingAssignment2
|
R
| false
| false
| 574
|
r
|
makeCacheMatrix <- function(y = matrix()) {
inv <- NULL
set <- function(z) {
y <<- z
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(y, ...) {
## Return a matrix that is the inverse of 'x'
inv <- y$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- y$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
install.packages('caTools') #for train and test data split
install.packages('dplyr') #for Data Manipulation
install.packages('ggplot2') #for Data Visualization
install.packages('class') #KNN
install.packages('caret') #Confusion Matrix
install.packages('corrplot') #Correlation Plot
library(caTools)
library(dplyr)
library(ggplot2)
library(caret)
library(class)
library(corrplot)
glass <- read.csv("C:/Users/tussh/Documents/KNN/glass.csv")
View(glass)
standard.features <- scale(glass[,1:9])
#Join the standardized data with the target column
data <- cbind(standard.features,glass[10])
#Check if there are any missing values to impute.
anyNA(data)
head(data)
corrplot(cor(data))
#We use caTools() to split the datainto train and test datasets with a SplitRatio = 0.70.
set.seed(101)
sample <- sample.split(data$Type,SplitRatio = 0.70)
train <- subset(data,sample==TRUE)
test <- subset(data,sample==FALSE)
#We use knn() to predict our target variable Type of the test dataset with k=1.
predicted.type <- knn(train[1:9],test[1:9],train$Type,k=1)
#Error in prediction
error <- mean(predicted.type!=test$Type)
#Confusion Matrix
confusionMatrix(predicted.type,as.factor(test$Type))
#The above results reveal that our model achieved an accuracy of 72.3076923 %. Lets try different values of k and assess our model.
predicted.type <- NULL
error.rate <- NULL
for (i in 1:10) {
predicted.type <- knn(train[1:9],test[1:9],train$Type,k=i)
error.rate[i] <- mean(predicted.type!=test$Type)
}
knn.error <- as.data.frame(cbind(k=1:10,error.type =error.rate))
#Lets plot error.type vs k using ggplot.
ggplot(knn.error,aes(k,error.type))+
geom_point()+
geom_line() +
scale_x_continuous(breaks=1:10)+
theme_bw() +
xlab("Value of K") +
ylab('Error')
#The above plot reveals that error is lowest when k=3 and then jumps back high revealing that k=3 is the optimum value.
#Now lets build our model using k=3 and assess it.
predicted.type <- knn(train[1:9],test[1:9],train$Type,k=3)
#Error in prediction
error <- mean(predicted.type!=test$Type)
#Confusion Matrix
confusionMatrix(predicted.type,as.factor(test$Type))
#The Above Model gave us an accuracy of 78.4615385 %.
predicted.type <- knn(train[1:9],test[1:9],train$Type,k=3)
#Error in prediction
error <- mean(predicted.type!=test$Type)
#Confusion Matrix
confusionMatrix(predicted.type,as.factor(test$Type))
|
/glass.R
|
no_license
|
sowmyatushar/R-Programing-Language
|
R
| false
| false
| 2,454
|
r
|
install.packages('caTools') #for train and test data split
install.packages('dplyr') #for Data Manipulation
install.packages('ggplot2') #for Data Visualization
install.packages('class') #KNN
install.packages('caret') #Confusion Matrix
install.packages('corrplot') #Correlation Plot
library(caTools)
library(dplyr)
library(ggplot2)
library(caret)
library(class)
library(corrplot)
glass <- read.csv("C:/Users/tussh/Documents/KNN/glass.csv")
View(glass)
standard.features <- scale(glass[,1:9])
#Join the standardized data with the target column
data <- cbind(standard.features,glass[10])
#Check if there are any missing values to impute.
anyNA(data)
head(data)
corrplot(cor(data))
#We use caTools() to split the datainto train and test datasets with a SplitRatio = 0.70.
set.seed(101)
sample <- sample.split(data$Type,SplitRatio = 0.70)
train <- subset(data,sample==TRUE)
test <- subset(data,sample==FALSE)
#We use knn() to predict our target variable Type of the test dataset with k=1.
predicted.type <- knn(train[1:9],test[1:9],train$Type,k=1)
#Error in prediction
error <- mean(predicted.type!=test$Type)
#Confusion Matrix
confusionMatrix(predicted.type,as.factor(test$Type))
#The above results reveal that our model achieved an accuracy of 72.3076923 %. Lets try different values of k and assess our model.
predicted.type <- NULL
error.rate <- NULL
for (i in 1:10) {
predicted.type <- knn(train[1:9],test[1:9],train$Type,k=i)
error.rate[i] <- mean(predicted.type!=test$Type)
}
knn.error <- as.data.frame(cbind(k=1:10,error.type =error.rate))
#Lets plot error.type vs k using ggplot.
ggplot(knn.error,aes(k,error.type))+
geom_point()+
geom_line() +
scale_x_continuous(breaks=1:10)+
theme_bw() +
xlab("Value of K") +
ylab('Error')
#The above plot reveals that error is lowest when k=3 and then jumps back high revealing that k=3 is the optimum value.
#Now lets build our model using k=3 and assess it.
predicted.type <- knn(train[1:9],test[1:9],train$Type,k=3)
#Error in prediction
error <- mean(predicted.type!=test$Type)
#Confusion Matrix
confusionMatrix(predicted.type,as.factor(test$Type))
#The Above Model gave us an accuracy of 78.4615385 %.
predicted.type <- knn(train[1:9],test[1:9],train$Type,k=3)
#Error in prediction
error <- mean(predicted.type!=test$Type)
#Confusion Matrix
confusionMatrix(predicted.type,as.factor(test$Type))
|
# O-stats plots with better formatting.
# Author: QDR
# Project: NEON ITV
# Created: 19 Oct 2016
# Last modified: 02 Dec 2016
# Modified 2 Dec: plots with Harvard removed, and work on formatting.
# Modified 7 Nov: Improve scatterplots
# Modified 30 Oct: add continental
# Modified 20 Oct: change axis labels
source('code/vis/loadplotdat.r')
# Jitter plots ------------------------------------------------------------
library(reshape2)
# Find "significance"
o2015goodsites <- o2015goodsites %>%
mutate(local_significant = ostat_norm_localnull_ses < ostat_norm_localnull_seslower | ostat_norm_localnull_ses > ostat_norm_localnull_sesupper,
reg_significant = ostat_norm_regnull_ses < ostat_norm_regnull_seslower | ostat_norm_regnull_ses > ostat_norm_regnull_sesupper)
jitterplotdat <- o2015goodsites %>%
filter(trait == 'logweight') %>%
select(siteID, ostat_norm_localnull_ses, ostat_norm_regnull_ses, local_significant, reg_significant)
jitterplotdat <- with(jitterplotdat, data.frame(siteID=siteID,
ses = c(ostat_norm_localnull_ses, ostat_norm_regnull_ses),
significant = c(local_significant, reg_significant),
nullmodel = rep(c('Local','Regional'), each=nrow(jitterplotdat))))
jitterplottext <- data.frame(lab = c('More partitioning\nthan expected', 'Neutral', 'More overlap\nthan expected'),
x = c(1.5, 1.5, 1.5),
y = c(-10, 1, 10))
pj <- ggplot(jitterplotdat, aes(x=nullmodel,y=ses)) +
geom_hline(yintercept=0, linetype='dotted', color = 'blue', size=1) +
geom_jitter(aes(color=significant), height=0, width=0.25) +
geom_text(aes(x,y,label=lab), data=jitterplottext, family = 'Helvetica') +
scale_x_discrete(name = 'Null model', labels = c('Local','Regional')) +
scale_y_continuous(name = expression(paste('SES of NO'[local]))) +
scale_color_manual(values = c('gray75', 'black')) +
theme_john + theme(legend.position = c(0.88,0.1))
ggsave('figs/msfigs/ostat_jitterplot.png', pj, height=5, width=5, dpi=400)
# Density plots -----------------------------------------------------------
# Local
good_sites <- unique(o2015$siteID)
sites_temporder <- neonsitedata %>% arrange(bio6) %>% dplyr::select(siteID, bio6)
pdensshade <- ggplot(filter(mam_capture_sitemerge, year == 2015, !siteID %in% c('HEAL','DELA','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID))) +
stat_density(adjust = 2, size = 1, aes(x = log10(weight), group = taxonID), fill = 'black', alpha = 0.25, geom='polygon', position = 'identity') + facet_wrap(~ siteID) +
scale_y_continuous(name = 'probability density', expand = c(0,0)) +
scale_x_continuous(name = 'body mass (g)', breaks = c(1, 2, 3), labels = c(10, 100, 1000), limits = c(0.5, 3.1)) +
geom_text(aes(label = paste0('Overlap = ', round(ostat_norm,3)), x = 1.2, y = 13), color = 'black', data = o2015 %>% filter(!siteID %in% c('HEAL','DELA','DSNY'), trait %in% 'logweight') %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste0('MTCM = ', round(bio6, 1), '°C'), x = 1.2, y = 15), color = 'black', data = neonsitedata %>% filter(siteID %in% good_sites) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
theme_john + theme(axis.text.y=element_blank(), axis.ticks.y=element_blank(), strip.background = element_blank())
source('~/GitHub/NEON/code/facetadjust.r')
png('C:/Users/Q/google_drive/NEON_EAGER/Figures/msfigs2017jan/figs5.png', height = 10, width = 12, res=400, units='in')
#facetAdjust(pdensshade)
pdensshade
dev.off()
# Regional
regpoolplotdat <- list()
for (i in 1:length(siteregpoolsp_mam15_iucn)) {
regpoolplotdat[[i]] <- data.frame(siteID = names(siteregpoollist_mam15_iucn)[i],
taxonID = siteregpoolsp_mam15_iucn[[i]],
logweight = log10(siteregpoollist_mam15_iucn[[i]]$weight))
}
regpoolplotdat <- do.call('rbind',regpoolplotdat)
o2015regstat <- o2015goodsites %>%
filter(trait == 'logweight') %>%
arrange(bio1) %>%
mutate(reg_significant_partition = ostat_norm_regnull_ses < ostat_norm_regnull_seslower,
reg_significant_overlap = ostat_norm_regnull_ses > ostat_norm_regnull_sesupper) %>%
select(siteID, reg_significant_partition, reg_significant_overlap)
stattext <- rep('neutral', nrow(o2015regstat))
stattext[o2015regstat$reg_significant_partition] <- 'significantly partitioned'
stattext[o2015regstat$reg_significant_overlap] <- 'significantly overlapping'
o2015regstat$stattext <- stattext
or2015goodsites <- filter(or2015, !siteID %in% c('DELA','DSNY','HEAL'))
or2015goodsites <- or2015goodsites %>%
mutate(allpool_significant = ostat_reg_allpoolnull_ses < ostat_reg_allpoolnull_seslower | ostat_reg_allpoolnull_ses > ostat_reg_allpoolnull_sesupper,
bysp_significant = ostat_reg_byspnull_ses < ostat_reg_byspnull_seslower | ostat_reg_byspnull_ses > ostat_reg_byspnull_sesupper)
or2015regstat <- or2015goodsites %>%
filter(trait == 'logweight') %>%
arrange(bio1) %>%
mutate(bysp_significant_filter = ostat_reg_byspnull_ses < ostat_reg_byspnull_seslower,
bysp_significant_overdisperse = ostat_reg_byspnull_ses > ostat_reg_byspnull_sesupper,
allpool_significant_filter = ostat_reg_allpoolnull_ses < ostat_reg_allpoolnull_seslower,
allpool_significant_overdisperse = ostat_reg_allpoolnull_ses > ostat_reg_allpoolnull_sesupper) %>%
select(siteID, ostat_reg, bysp_significant_filter, bysp_significant_overdisperse, allpool_significant_filter, allpool_significant_overdisperse)
stattext <- rep('neutral', nrow(or2015regstat))
stattext[or2015regstat$bysp_significant_filter] <- 'filtered'
stattext[or2015regstat$bysp_significant_overdisperse] <- 'overdispersed'
stattextallpool <- rep('neutral', nrow(or2015regstat))
stattextallpool[or2015regstat$allpool_significant_filter] <- 'filtered'
stattextallpool[or2015regstat$allpool_significant_overdisperse] <- 'overdispersed'
or2015regstat$stattextbysp <- stattext
or2015regstat$stattextallpool <- stattextallpool
pdenslabels <- ggplot(filter(mam_capture_sitemerge, year == 2015, !siteID %in% c('HEAL','DELA','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID), logweight=log10(weight))) +
stat_density(adjust = 1, size = 1, aes(x = logweight), fill = 'black', alpha = 1, geom = 'polygon', position = 'identity', data = regpoolplotdat %>% filter(!siteID %in% c('HEAL','DELA','DSNY'))) +
stat_density(adjust = 1, size = 1, aes(x = logweight), fill = 'skyblue', alpha = 0.75, geom='polygon', position = 'identity') + facet_wrap(~ siteID) +
geom_text(aes(label = paste('NM1:',stattextallpool), x = 2, y = 5), data = or2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste('NM2:',stattextbysp), x = 2, y = 4), data = or2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = round(ostat_reg,3), x = 2, y = 3), data = or2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
scale_x_continuous(breaks = c(1, 2), labels = c(10, 100), name = expression(paste('log'[10], ' body mass'))) +
theme_john + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank())
#qSubtitle('Regional species pools (black) and local communities (blue)', 'Significance of "regional overlap stat" shown, sites ordered by temp')
source('~/GitHub/NEON/code/facetadjust.r')
png('figs/msfigs/regionalspeciespoolsdensity_withlabels.png', height=10, width=12, res=400, units='in')
facetAdjust(pdenslabels)
dev.off()
pdensconti <- ggplot(filter(mam_capture_sitemerge, year == 2015, !siteID %in% c('HEAL','DELA','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID), logweight=log10(weight))) +
stat_density(adjust = 1, size = 1, aes(x = logweight), fill = 'black', alpha = 1, geom = 'polygon', position = 'identity', data = filter(mam_capture_sitemerge, !siteID %in% c('HEAL','DELA','DSNY')) %>% select(-siteID) %>% mutate(logweight=log10(weight))) +
stat_density(adjust = 1, size = 1, aes(x = logweight), fill = 'skyblue', alpha = 0.75, geom='polygon', position = 'identity') + facet_wrap(~ siteID) +
geom_text(aes(label = paste('NM1:',stattextallpool), x = 1.5, y = 5), data = orc2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste('NM2:',stattextbysp), x = 1.5, y = 4), data = orc2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = round(ostat_reg,3), x = 1.5, y = 3), data = orc2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID))) +
scale_x_continuous(breaks = c(1, 2), labels = c(10, 100), name = expression(paste('log'[10], ' body mass'))) +
theme_john + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank())
#qSubtitle('Continental species pools (black) and local communities (blue)', 'Significance of "regional overlap stat" shown, sites ordered by temp')
png('figs/msfigs/continentalspeciespoolsdensity_withlabels.png', height=10, width=12, res=400, units='in')
facetAdjust(pdensconti)
dev.off()
# Scatter plots -----------------------------------------------------------
# Local
porawtemp <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=bio1)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = parse(text=bioclimnames[1])) +
theme_john
#qSubtitle('Overlap statistics for 2015 NEON mammals versus MAT', 'Raw values, local and regional nulls')
porawchao <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=chao1)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = 'Species Richness (Chao1)') +
theme_john
#qSubtitle('Overlap statistics for 2015 NEON mammals versus Richness', 'Raw values, local and regional nulls')
porawmpd <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=mpd_z)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = 'Mean Pairwise Distance SES') +
theme_john
#qSubtitle('Overlap statistics for 2015 NEON mammals versus MPD', 'Raw values, local and regional nulls')
porawprecip <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=bio12)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio12), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio12), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = bioclimnames[12]) +
theme_john
porawtempseas <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=bio4)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio4), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio4), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = bioclimnames[4]) +
theme_john
porawprecipseas <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=bio15)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio15), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio15), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = bioclimnames[15]) +
theme_john
porawtempcv <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=cv_bio1)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=cv_bio1), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=cv_bio1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = 'Among-year temperature CV') +
theme_john
porawprecipcv <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=cv_bio12)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=cv_bio12), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=cv_bio12), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = 'Among-year precipitation CV') +
theme_john
library(directlabels)
sitelab <- geom_dl(aes(label = siteID, y = ostat_norm), method = list('top.bumptwice', cex = 0.75, vjust = -0.5, fontfamily = 'Helvetica'))
fp <- 'C:/Users/Q/Google Drive/NEON_EAGER/Figures/msfigs2017jan/figs6'
ggsave(file.path(fp,'scatterlocaltemp.png'), porawtemp + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalprecip.png'), porawprecip + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalrichness.png'), porawchao + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalmpd.png'), porawmpd + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocaltempseas.png'), porawtempseas + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalprecipseas.png'), porawprecipseas + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocaltempcv.png'), porawtempcv + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalprecipcv.png'), porawprecipcv + sitelab, height=5, width=5, dpi=400)
# Regional
porrawtemp <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = parse(text=bioclimnames[1])) +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus MAT', 'Raw values, all-pool and by-species nulls')
porrawprecip <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio12)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio12), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio12), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = bioclimnames[12]) +
theme_john
porrawchao <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = 'Chao1 Species Richness') +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus richness', 'Raw values, all-pool and by-species nulls')
porrawmpd <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=mpd_z)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = 'Mean Pairwise Distance SES') +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus mpd', 'Raw values, all-pool and by-species nulls')
porrawtempseas <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio4)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio4), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio4), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = bioclimnames[4]) +
theme_john
porrawprecipseas <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio15)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio15), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio15), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = bioclimnames[15]) +
theme_john
porrawtempcv <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=cv_bio1)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=cv_bio1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=cv_bio1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = 'Among-year temperature CV') +
theme_john
porrawprecipcv <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=cv_bio12)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=cv_bio12), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=cv_bio12), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = 'Among-year precipitation CV') +
theme_john
sitelabreg <- geom_dl(aes(label = siteID, y = ostat_reg), method = list('top.bumptwice', cex = 0.75, vjust = -0.5, fontfamily = 'Helvetica'))
ggsave('figs/msfigs/scatterregionaltemp.png', porrawtemp + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalprecip.png', porrawprecip + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalrichness.png', porrawchao + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalmpd.png', porrawmpd + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionaltempseas.png', porrawtempseas + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalprecipseas.png', porrawprecipseas + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionaltempcv.png', porrawtempcv + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalprecipcv.png', porrawprecipcv + sitelabreg, height=5, width=5, dpi=400)
# 27 Oct: simpler plots with logistic line --------------------------------
fx <- function(x, b0, b1) 1/(1 + exp(-(b0 + b1 * x)))
tempco <- summary(reglocalbio)$coeff$mean[,1]
chaoco <- summary(reglocalchao)$coeff$mean[,1]
csc <- scale_color_manual(values = c('gray75','black'))
porawtemp <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
#geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'skyblue') +
#geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm, color=local_significant), size = 2) +
stat_function(geom='line', fun = fx, args=list(b0 = tempco[1], b1 = tempco[2]), color = 'blue', size = 1.5, n=9999) +
labs(y = 'Niche Overlap', x = parse(text=bioclimnames[1])) +
theme_john + theme(legend.position = 'none') + csc
#qSubtitle('Overlap statistics for 2015 NEON mammals versus MAT', 'Raw values, local and regional nulls')
porawchao <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
#geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'skyblue') +
#geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm, color=local_significant), size = 2) +
stat_function(geom='line', fun = fx, args=list(b0 = chaoco[1], b1 = chaoco[2]), color = 'blue', size = 1.5, n=9999) +
labs(y = 'Niche Overlap', x = 'Species Richness (Chao1)') +
theme_john + theme(legend.position = 'none') + csc
porawmpd <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=mpd_z)) +
#geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'skyblue') +
#geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm, color=local_significant), size = 2) +
labs(y = 'Niche Overlap', x = 'Mean Pairwise Distance SES') +
theme_john+ theme(legend.position = 'none') + csc
ggsave('figs/msfigs/simplescatterlocaltemp.png', porawtemp + theme(aspect.ratio=1), height=5, width=5, dpi=400)
ggsave('figs/msfigs/simplescatterlocalrichness.png', porawchao + theme(aspect.ratio=1), height=5, width=5, dpi=400)
# Make these into figures a and b
panela <- porawtemp# + theme(aspect.ratio=1)
panelb <- porawchao + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
panelc <- porawmpd + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
library(gridExtra)
grid.arrange(panela, panelb, nrow=1, widths=c(1.05, 0.95))
library(grid)
png('figs/msfigs/simplescatter3panels.png', height=4, width=12, res=400, units='in')
grid.newpage()
grid.draw(cbind(ggplotGrob(panela), ggplotGrob(panelb), ggplotGrob(panelc), size = "last"))
dev.off()
# Logit scale scatterplots ------------------------------------------------
library(scales)
inverse_logit_trans <- trans_new("inverse logit",
transform = plogis,
inverse = qlogis)
logit_trans <- trans_new("logit",
transform = qlogis,
inverse = plogis)
porawtemp + scale_y_continuous(trans = inverse_logit_trans)
porawtemp <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
geom_point(aes(y = ostat_norm, color=local_significant), size = 3) +
# geom_abline(intercept = tempco[1], slope = tempco[2], color = 'dodgerblue', size = 1.5) +
stat_function(geom='line', fun = fx, args=list(b0 = tempco[1], b1 = tempco[2]), color = 'dodgerblue', size = 1.5, n=9999) +
labs(y = 'Niche Overlap', x = parse(text=bioclimnames[1])) +
theme_john + theme(legend.position = 'none') + csc +
coord_trans(y = logit_trans) +
scale_x_continuous(expand = c(0,0), breaks = c(0,10,20), labels=c(0,10,20), limits=c(-0.5,21.5))
porawchao <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
geom_point(aes(y = ostat_norm, color=local_significant), size = 3) +
#geom_abline(intercept = chaoco[1], slope = chaoco[2], color = 'dodgerblue', size = 1.5) +
stat_function(geom='line', fun = fx, args=list(b0 = chaoco[1], b1 = chaoco[2]), color = 'dodgerblue', size = 1.5, n=9999) +
labs(y = 'Niche Overlap', x = 'Species Richness (Chao1)') +
theme_john + theme(legend.position = 'none') + csc +
coord_trans(y = logit_trans) +
scale_x_continuous(expand = c(0,0), breaks = c(5,10,15), labels=c(5,10,15), limits=c(4,16))
# Regional
mpdco <- summary(regregionalmpd)$coeff$mean[,1]
chaoco <- summary(regregionalchao)$coeff$mean[,1]
porrawtemp <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = parse(text=bioclimnames[1])) +
theme_john
porrawchao <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
stat_function(geom='line', fun = fx, args=list(b0 = chaoco[1], b1 = chaoco[2]), color = 'blue', size = 1.5) +
labs(y = expression(NO[regional]), x = 'Chao1 Species Richness') +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus richness', 'Raw values, all-pool and by-species nulls')
porrawmpd <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=mpd_z)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
stat_function(geom='line', fun = fx, args=list(b0 = mpdco[1], b1 = mpdco[2]), color = 'blue', size = 1.5) +
labs(y = expression(NO[regional]), x = 'Mean Pairwise Distance SES') +
theme_john
panela <- porrawtemp# + theme(aspect.ratio=1)
panelb <- porrawchao + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
panelc <- porrawmpd + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
library(grid)
png('figs/msfigs/simplescatter3panels_regional.png', height=5, width=15, res=400, units='in')
grid.newpage()
grid.draw(cbind(ggplotGrob(panela), ggplotGrob(panelb), ggplotGrob(panelc), size = "last"))
dev.off()
# Continental
pocrawtemp <- ggplot(orc2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = parse(text=bioclimnames[1])) +
theme_john
pocrawchao <- ggplot(orc2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
#stat_function(geom='line', fun = fx, args=list(b0 = chaoco[1], b1 = chaoco[2]), color = 'blue', size = 1.5) +
labs(y = expression(NO[regional]), x = 'Chao1 Species Richness') +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus richness', 'Raw values, all-pool and by-species nulls')
pocrawmpd <- ggplot(orc2015goodsites %>% filter(trait=='logweight'), aes(x=mpd_z)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
# stat_function(geom='line', fun = fx, args=list(b0 = mpdco[1], b1 = mpdco[2]), color = 'blue', size = 1.5) +
labs(y = expression(NO[regional]), x = 'Mean Pairwise Distance SES') +
theme_john
panela <- pocrawtemp# + theme(aspect.ratio=1)
panelb <- pocrawchao + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
panelc <- pocrawmpd + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
library(grid)
png('figs/msfigs/simplescatter3panels_continental.png', height=5, width=15, res=400, units='in')
grid.newpage()
grid.draw(cbind(ggplotGrob(panela), ggplotGrob(panelb), ggplotGrob(panelc), size = "last"))
dev.off()
|
/code/vis/formattedplots.r
|
no_license
|
NEON-biodiversity/mammalitv
|
R
| false
| false
| 30,233
|
r
|
# O-stats plots with better formatting.
# Author: QDR
# Project: NEON ITV
# Created: 19 Oct 2016
# Last modified: 02 Dec 2016
# Modified 2 Dec: plots with Harvard removed, and work on formatting.
# Modified 7 Nov: Improve scatterplots
# Modified 30 Oct: add continental
# Modified 20 Oct: change axis labels
source('code/vis/loadplotdat.r')
# Jitter plots ------------------------------------------------------------
library(reshape2)
# Find "significance"
o2015goodsites <- o2015goodsites %>%
mutate(local_significant = ostat_norm_localnull_ses < ostat_norm_localnull_seslower | ostat_norm_localnull_ses > ostat_norm_localnull_sesupper,
reg_significant = ostat_norm_regnull_ses < ostat_norm_regnull_seslower | ostat_norm_regnull_ses > ostat_norm_regnull_sesupper)
jitterplotdat <- o2015goodsites %>%
filter(trait == 'logweight') %>%
select(siteID, ostat_norm_localnull_ses, ostat_norm_regnull_ses, local_significant, reg_significant)
jitterplotdat <- with(jitterplotdat, data.frame(siteID=siteID,
ses = c(ostat_norm_localnull_ses, ostat_norm_regnull_ses),
significant = c(local_significant, reg_significant),
nullmodel = rep(c('Local','Regional'), each=nrow(jitterplotdat))))
jitterplottext <- data.frame(lab = c('More partitioning\nthan expected', 'Neutral', 'More overlap\nthan expected'),
x = c(1.5, 1.5, 1.5),
y = c(-10, 1, 10))
pj <- ggplot(jitterplotdat, aes(x=nullmodel,y=ses)) +
geom_hline(yintercept=0, linetype='dotted', color = 'blue', size=1) +
geom_jitter(aes(color=significant), height=0, width=0.25) +
geom_text(aes(x,y,label=lab), data=jitterplottext, family = 'Helvetica') +
scale_x_discrete(name = 'Null model', labels = c('Local','Regional')) +
scale_y_continuous(name = expression(paste('SES of NO'[local]))) +
scale_color_manual(values = c('gray75', 'black')) +
theme_john + theme(legend.position = c(0.88,0.1))
ggsave('figs/msfigs/ostat_jitterplot.png', pj, height=5, width=5, dpi=400)
# Density plots -----------------------------------------------------------
# Local
good_sites <- unique(o2015$siteID)
sites_temporder <- neonsitedata %>% arrange(bio6) %>% dplyr::select(siteID, bio6)
pdensshade <- ggplot(filter(mam_capture_sitemerge, year == 2015, !siteID %in% c('HEAL','DELA','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID))) +
stat_density(adjust = 2, size = 1, aes(x = log10(weight), group = taxonID), fill = 'black', alpha = 0.25, geom='polygon', position = 'identity') + facet_wrap(~ siteID) +
scale_y_continuous(name = 'probability density', expand = c(0,0)) +
scale_x_continuous(name = 'body mass (g)', breaks = c(1, 2, 3), labels = c(10, 100, 1000), limits = c(0.5, 3.1)) +
geom_text(aes(label = paste0('Overlap = ', round(ostat_norm,3)), x = 1.2, y = 13), color = 'black', data = o2015 %>% filter(!siteID %in% c('HEAL','DELA','DSNY'), trait %in% 'logweight') %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste0('MTCM = ', round(bio6, 1), '°C'), x = 1.2, y = 15), color = 'black', data = neonsitedata %>% filter(siteID %in% good_sites) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
theme_john + theme(axis.text.y=element_blank(), axis.ticks.y=element_blank(), strip.background = element_blank())
source('~/GitHub/NEON/code/facetadjust.r')
png('C:/Users/Q/google_drive/NEON_EAGER/Figures/msfigs2017jan/figs5.png', height = 10, width = 12, res=400, units='in')
#facetAdjust(pdensshade)
pdensshade
dev.off()
# Regional
regpoolplotdat <- list()
for (i in 1:length(siteregpoolsp_mam15_iucn)) {
regpoolplotdat[[i]] <- data.frame(siteID = names(siteregpoollist_mam15_iucn)[i],
taxonID = siteregpoolsp_mam15_iucn[[i]],
logweight = log10(siteregpoollist_mam15_iucn[[i]]$weight))
}
regpoolplotdat <- do.call('rbind',regpoolplotdat)
o2015regstat <- o2015goodsites %>%
filter(trait == 'logweight') %>%
arrange(bio1) %>%
mutate(reg_significant_partition = ostat_norm_regnull_ses < ostat_norm_regnull_seslower,
reg_significant_overlap = ostat_norm_regnull_ses > ostat_norm_regnull_sesupper) %>%
select(siteID, reg_significant_partition, reg_significant_overlap)
stattext <- rep('neutral', nrow(o2015regstat))
stattext[o2015regstat$reg_significant_partition] <- 'significantly partitioned'
stattext[o2015regstat$reg_significant_overlap] <- 'significantly overlapping'
o2015regstat$stattext <- stattext
or2015goodsites <- filter(or2015, !siteID %in% c('DELA','DSNY','HEAL'))
or2015goodsites <- or2015goodsites %>%
mutate(allpool_significant = ostat_reg_allpoolnull_ses < ostat_reg_allpoolnull_seslower | ostat_reg_allpoolnull_ses > ostat_reg_allpoolnull_sesupper,
bysp_significant = ostat_reg_byspnull_ses < ostat_reg_byspnull_seslower | ostat_reg_byspnull_ses > ostat_reg_byspnull_sesupper)
or2015regstat <- or2015goodsites %>%
filter(trait == 'logweight') %>%
arrange(bio1) %>%
mutate(bysp_significant_filter = ostat_reg_byspnull_ses < ostat_reg_byspnull_seslower,
bysp_significant_overdisperse = ostat_reg_byspnull_ses > ostat_reg_byspnull_sesupper,
allpool_significant_filter = ostat_reg_allpoolnull_ses < ostat_reg_allpoolnull_seslower,
allpool_significant_overdisperse = ostat_reg_allpoolnull_ses > ostat_reg_allpoolnull_sesupper) %>%
select(siteID, ostat_reg, bysp_significant_filter, bysp_significant_overdisperse, allpool_significant_filter, allpool_significant_overdisperse)
stattext <- rep('neutral', nrow(or2015regstat))
stattext[or2015regstat$bysp_significant_filter] <- 'filtered'
stattext[or2015regstat$bysp_significant_overdisperse] <- 'overdispersed'
stattextallpool <- rep('neutral', nrow(or2015regstat))
stattextallpool[or2015regstat$allpool_significant_filter] <- 'filtered'
stattextallpool[or2015regstat$allpool_significant_overdisperse] <- 'overdispersed'
or2015regstat$stattextbysp <- stattext
or2015regstat$stattextallpool <- stattextallpool
pdenslabels <- ggplot(filter(mam_capture_sitemerge, year == 2015, !siteID %in% c('HEAL','DELA','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID), logweight=log10(weight))) +
stat_density(adjust = 1, size = 1, aes(x = logweight), fill = 'black', alpha = 1, geom = 'polygon', position = 'identity', data = regpoolplotdat %>% filter(!siteID %in% c('HEAL','DELA','DSNY'))) +
stat_density(adjust = 1, size = 1, aes(x = logweight), fill = 'skyblue', alpha = 0.75, geom='polygon', position = 'identity') + facet_wrap(~ siteID) +
geom_text(aes(label = paste('NM1:',stattextallpool), x = 2, y = 5), data = or2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste('NM2:',stattextbysp), x = 2, y = 4), data = or2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = round(ostat_reg,3), x = 2, y = 3), data = or2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
scale_x_continuous(breaks = c(1, 2), labels = c(10, 100), name = expression(paste('log'[10], ' body mass'))) +
theme_john + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank())
#qSubtitle('Regional species pools (black) and local communities (blue)', 'Significance of "regional overlap stat" shown, sites ordered by temp')
source('~/GitHub/NEON/code/facetadjust.r')
png('figs/msfigs/regionalspeciespoolsdensity_withlabels.png', height=10, width=12, res=400, units='in')
facetAdjust(pdenslabels)
dev.off()
pdensconti <- ggplot(filter(mam_capture_sitemerge, year == 2015, !siteID %in% c('HEAL','DELA','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID), logweight=log10(weight))) +
stat_density(adjust = 1, size = 1, aes(x = logweight), fill = 'black', alpha = 1, geom = 'polygon', position = 'identity', data = filter(mam_capture_sitemerge, !siteID %in% c('HEAL','DELA','DSNY')) %>% select(-siteID) %>% mutate(logweight=log10(weight))) +
stat_density(adjust = 1, size = 1, aes(x = logweight), fill = 'skyblue', alpha = 0.75, geom='polygon', position = 'identity') + facet_wrap(~ siteID) +
geom_text(aes(label = paste('NM1:',stattextallpool), x = 1.5, y = 5), data = orc2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste('NM2:',stattextbysp), x = 1.5, y = 4), data = orc2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = round(ostat_reg,3), x = 1.5, y = 3), data = orc2015regstat %>% filter(!siteID %in% c('DELA','HEAL','DSNY')) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID))) +
scale_x_continuous(breaks = c(1, 2), labels = c(10, 100), name = expression(paste('log'[10], ' body mass'))) +
theme_john + theme(axis.text.y = element_blank(), axis.ticks.y = element_blank())
#qSubtitle('Continental species pools (black) and local communities (blue)', 'Significance of "regional overlap stat" shown, sites ordered by temp')
png('figs/msfigs/continentalspeciespoolsdensity_withlabels.png', height=10, width=12, res=400, units='in')
facetAdjust(pdensconti)
dev.off()
# Scatter plots -----------------------------------------------------------
# Local
porawtemp <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=bio1)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = parse(text=bioclimnames[1])) +
theme_john
#qSubtitle('Overlap statistics for 2015 NEON mammals versus MAT', 'Raw values, local and regional nulls')
porawchao <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=chao1)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = 'Species Richness (Chao1)') +
theme_john
#qSubtitle('Overlap statistics for 2015 NEON mammals versus Richness', 'Raw values, local and regional nulls')
porawmpd <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=mpd_z)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = 'Mean Pairwise Distance SES') +
theme_john
#qSubtitle('Overlap statistics for 2015 NEON mammals versus MPD', 'Raw values, local and regional nulls')
porawprecip <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=bio12)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio12), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio12), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = bioclimnames[12]) +
theme_john
porawtempseas <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=bio4)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio4), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio4), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = bioclimnames[4]) +
theme_john
porawprecipseas <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=bio15)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio15), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio15), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = bioclimnames[15]) +
theme_john
porawtempcv <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=cv_bio1)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=cv_bio1), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=cv_bio1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = 'Among-year temperature CV') +
theme_john
porawprecipcv <- ggplot(o2015 %>% filter(trait=='logweight'), aes(x=cv_bio12)) +
geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=cv_bio12), alpha = 0.5, size = 1.5, color = 'skyblue') +
# geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=cv_bio12), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm), size = 1.5) +
labs(y = 'median overlap', x = 'Among-year precipitation CV') +
theme_john
library(directlabels)
sitelab <- geom_dl(aes(label = siteID, y = ostat_norm), method = list('top.bumptwice', cex = 0.75, vjust = -0.5, fontfamily = 'Helvetica'))
fp <- 'C:/Users/Q/Google Drive/NEON_EAGER/Figures/msfigs2017jan/figs6'
ggsave(file.path(fp,'scatterlocaltemp.png'), porawtemp + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalprecip.png'), porawprecip + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalrichness.png'), porawchao + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalmpd.png'), porawmpd + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocaltempseas.png'), porawtempseas + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalprecipseas.png'), porawprecipseas + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocaltempcv.png'), porawtempcv + sitelab, height=5, width=5, dpi=400)
ggsave(file.path(fp,'scatterlocalprecipcv.png'), porawprecipcv + sitelab, height=5, width=5, dpi=400)
# Regional
porrawtemp <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = parse(text=bioclimnames[1])) +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus MAT', 'Raw values, all-pool and by-species nulls')
porrawprecip <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio12)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio12), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio12), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = bioclimnames[12]) +
theme_john
porrawchao <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = 'Chao1 Species Richness') +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus richness', 'Raw values, all-pool and by-species nulls')
porrawmpd <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=mpd_z)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = 'Mean Pairwise Distance SES') +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus mpd', 'Raw values, all-pool and by-species nulls')
porrawtempseas <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio4)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio4), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio4), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = bioclimnames[4]) +
theme_john
porrawprecipseas <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio15)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio15), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio15), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = bioclimnames[15]) +
theme_john
porrawtempcv <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=cv_bio1)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=cv_bio1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=cv_bio1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = 'Among-year temperature CV') +
theme_john
porrawprecipcv <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=cv_bio12)) +
geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=cv_bio12), alpha = 0.5, size = 1.5, color = 'seagreen3') +
geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=cv_bio12), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = 'Among-year precipitation CV') +
theme_john
sitelabreg <- geom_dl(aes(label = siteID, y = ostat_reg), method = list('top.bumptwice', cex = 0.75, vjust = -0.5, fontfamily = 'Helvetica'))
ggsave('figs/msfigs/scatterregionaltemp.png', porrawtemp + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalprecip.png', porrawprecip + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalrichness.png', porrawchao + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalmpd.png', porrawmpd + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionaltempseas.png', porrawtempseas + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalprecipseas.png', porrawprecipseas + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionaltempcv.png', porrawtempcv + sitelabreg, height=5, width=5, dpi=400)
ggsave('figs/msfigs/scatterregionalprecipcv.png', porrawprecipcv + sitelabreg, height=5, width=5, dpi=400)
# 27 Oct: simpler plots with logistic line --------------------------------
fx <- function(x, b0, b1) 1/(1 + exp(-(b0 + b1 * x)))
tempco <- summary(reglocalbio)$coeff$mean[,1]
chaoco <- summary(reglocalchao)$coeff$mean[,1]
csc <- scale_color_manual(values = c('gray75','black'))
porawtemp <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
#geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'skyblue') +
#geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm, color=local_significant), size = 2) +
stat_function(geom='line', fun = fx, args=list(b0 = tempco[1], b1 = tempco[2]), color = 'blue', size = 1.5, n=9999) +
labs(y = 'Niche Overlap', x = parse(text=bioclimnames[1])) +
theme_john + theme(legend.position = 'none') + csc
#qSubtitle('Overlap statistics for 2015 NEON mammals versus MAT', 'Raw values, local and regional nulls')
porawchao <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
#geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'skyblue') +
#geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm, color=local_significant), size = 2) +
stat_function(geom='line', fun = fx, args=list(b0 = chaoco[1], b1 = chaoco[2]), color = 'blue', size = 1.5, n=9999) +
labs(y = 'Niche Overlap', x = 'Species Richness (Chao1)') +
theme_john + theme(legend.position = 'none') + csc
porawmpd <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=mpd_z)) +
#geom_segment(aes(y = ostat_norm_localnull_lower, yend = ostat_norm_localnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'skyblue') +
#geom_segment(aes(y = ostat_norm_regnull_lower, yend = ostat_norm_regnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'goldenrod') +
geom_point(aes(y = ostat_norm, color=local_significant), size = 2) +
labs(y = 'Niche Overlap', x = 'Mean Pairwise Distance SES') +
theme_john+ theme(legend.position = 'none') + csc
ggsave('figs/msfigs/simplescatterlocaltemp.png', porawtemp + theme(aspect.ratio=1), height=5, width=5, dpi=400)
ggsave('figs/msfigs/simplescatterlocalrichness.png', porawchao + theme(aspect.ratio=1), height=5, width=5, dpi=400)
# Make these into figures a and b
panela <- porawtemp# + theme(aspect.ratio=1)
panelb <- porawchao + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
panelc <- porawmpd + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
library(gridExtra)
grid.arrange(panela, panelb, nrow=1, widths=c(1.05, 0.95))
library(grid)
png('figs/msfigs/simplescatter3panels.png', height=4, width=12, res=400, units='in')
grid.newpage()
grid.draw(cbind(ggplotGrob(panela), ggplotGrob(panelb), ggplotGrob(panelc), size = "last"))
dev.off()
# Logit scale scatterplots ------------------------------------------------
library(scales)
inverse_logit_trans <- trans_new("inverse logit",
transform = plogis,
inverse = qlogis)
logit_trans <- trans_new("logit",
transform = qlogis,
inverse = plogis)
porawtemp + scale_y_continuous(trans = inverse_logit_trans)
porawtemp <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
geom_point(aes(y = ostat_norm, color=local_significant), size = 3) +
# geom_abline(intercept = tempco[1], slope = tempco[2], color = 'dodgerblue', size = 1.5) +
stat_function(geom='line', fun = fx, args=list(b0 = tempco[1], b1 = tempco[2]), color = 'dodgerblue', size = 1.5, n=9999) +
labs(y = 'Niche Overlap', x = parse(text=bioclimnames[1])) +
theme_john + theme(legend.position = 'none') + csc +
coord_trans(y = logit_trans) +
scale_x_continuous(expand = c(0,0), breaks = c(0,10,20), labels=c(0,10,20), limits=c(-0.5,21.5))
porawchao <- ggplot(o2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
geom_point(aes(y = ostat_norm, color=local_significant), size = 3) +
#geom_abline(intercept = chaoco[1], slope = chaoco[2], color = 'dodgerblue', size = 1.5) +
stat_function(geom='line', fun = fx, args=list(b0 = chaoco[1], b1 = chaoco[2]), color = 'dodgerblue', size = 1.5, n=9999) +
labs(y = 'Niche Overlap', x = 'Species Richness (Chao1)') +
theme_john + theme(legend.position = 'none') + csc +
coord_trans(y = logit_trans) +
scale_x_continuous(expand = c(0,0), breaks = c(5,10,15), labels=c(5,10,15), limits=c(4,16))
# Regional
mpdco <- summary(regregionalmpd)$coeff$mean[,1]
chaoco <- summary(regregionalchao)$coeff$mean[,1]
porrawtemp <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = parse(text=bioclimnames[1])) +
theme_john
porrawchao <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
stat_function(geom='line', fun = fx, args=list(b0 = chaoco[1], b1 = chaoco[2]), color = 'blue', size = 1.5) +
labs(y = expression(NO[regional]), x = 'Chao1 Species Richness') +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus richness', 'Raw values, all-pool and by-species nulls')
porrawmpd <- ggplot(or2015goodsites %>% filter(trait=='logweight'), aes(x=mpd_z)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
stat_function(geom='line', fun = fx, args=list(b0 = mpdco[1], b1 = mpdco[2]), color = 'blue', size = 1.5) +
labs(y = expression(NO[regional]), x = 'Mean Pairwise Distance SES') +
theme_john
panela <- porrawtemp# + theme(aspect.ratio=1)
panelb <- porrawchao + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
panelc <- porrawmpd + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
library(grid)
png('figs/msfigs/simplescatter3panels_regional.png', height=5, width=15, res=400, units='in')
grid.newpage()
grid.draw(cbind(ggplotGrob(panela), ggplotGrob(panelb), ggplotGrob(panelc), size = "last"))
dev.off()
# Continental
pocrawtemp <- ggplot(orc2015goodsites %>% filter(trait=='logweight'), aes(x=bio1)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=bio1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
labs(y = expression(NO[regional]), x = parse(text=bioclimnames[1])) +
theme_john
pocrawchao <- ggplot(orc2015goodsites %>% filter(trait=='logweight'), aes(x=chao1)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=chao1), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
#stat_function(geom='line', fun = fx, args=list(b0 = chaoco[1], b1 = chaoco[2]), color = 'blue', size = 1.5) +
labs(y = expression(NO[regional]), x = 'Chao1 Species Richness') +
theme_john
#qSubtitle('Regional O-stats for 2015 NEON mammals versus richness', 'Raw values, all-pool and by-species nulls')
pocrawmpd <- ggplot(orc2015goodsites %>% filter(trait=='logweight'), aes(x=mpd_z)) +
# geom_segment(aes(y = ostat_reg_allpoolnull_lower, yend = ostat_reg_allpoolnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'seagreen3') +
# geom_segment(aes(y = ostat_reg_byspnull_lower, yend = ostat_reg_byspnull_upper, xend=mpd_z), alpha = 0.5, size = 1.5, color = 'plum2') +
geom_point(aes(y = ostat_reg), size = 1.5) +
# stat_function(geom='line', fun = fx, args=list(b0 = mpdco[1], b1 = mpdco[2]), color = 'blue', size = 1.5) +
labs(y = expression(NO[regional]), x = 'Mean Pairwise Distance SES') +
theme_john
panela <- pocrawtemp# + theme(aspect.ratio=1)
panelb <- pocrawchao + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
panelc <- pocrawmpd + theme(axis.text.y=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank())
library(grid)
png('figs/msfigs/simplescatter3panels_continental.png', height=5, width=15, res=400, units='in')
grid.newpage()
grid.draw(cbind(ggplotGrob(panela), ggplotGrob(panelb), ggplotGrob(panelc), size = "last"))
dev.off()
|
## --------------------------------------------- ##
# #
# sE.progress #
# fE.progress #
# #
## --------------------------------------------- ##
## ------------------------------------------------------------------------------- ##
# sE.progress(So, time, Km, Vm, unit_S, unit_t, I, Kic, Kiu, replicates, ...) #
## ------------------------------------------------------------------------------- ##
#' Progress Curve for Enzyme-Catalyzed Reaction
#' @description Simulates the evolution of the substrate concentration along time.
#' @usage sE.progress(So, time, Km, Vm, unit_S = 'mM', unit_t = 'min', I = 0, Kic = Inf, Kia = Inf, replicates = 3, error = 'a', sd = 0.05, plot = TRUE)
#' @param So initial substrate concentration.
#' @param time reaction timespan.
#' @param Km Michaelis contant.
#' @param Vm maximal velocity.
#' @param unit_S concentration unit.
#' @param unit_t time unit.
#' @param I inhibitor concentration.
#' @param Kic competitive inhibition constant.
#' @param Kiu uncompetitive inhibition constant.
#' @param replicates number of replicates for the dependent variable
#' @param error it should be one among c('absolute', 'relative').
#' @param sd standard deviation of the error.
#' @param plot logical. If TRUE, the progress curve is plotted.
#' @details When sd is different to 0, then an abosolute error normally distributed is added to the variable St.
#' @return Returns a a dataframe where the two first columns are time and St (without error). The two last columns are the mean and sd of the variable St.
#' @author Juan Carlos Aledo
#' @examples
#' @seealso fE.progress()
#' @importFrom VGAM lambertW
#' @export
sE.progress <- function(So, time, Km, Vm,
unit_S = 'mM', unit_t = 'min',
I = 0, Kic = Inf, Kiu = Inf,
replicates = 3,
error = 'a',
sd = 0.005,
plot = TRUE){
## -------------- Km and Vm apparent when I is present -------------- ##
Km_a <- Km*( (1 + I/Kic)/(1 + I/Kiu) )
Vm_a <- Vm/(1 + I/Kic)
time <- seq(from = 0, to = time, by = (time/100))
## ---------------- Formating the output dataframe ------------------ ##
output <- as.data.frame(matrix(rep(NA, length(time)*(replicates + 2)),
ncol = replicates + 2))
names(output) <- c('t', 'St', LETTERS[1:replicates])
output$t <- time
## -------- Computing the variable substrate as function of t ------- ##
set.seed(123)
counter <- 0
for (t in time){
counter <- counter + 1
argument <- (So/Km_a)*exp((-Vm_a*t + So)/Km_a)
w <- VGAM::lambertW(argument)
St <- Km_a*w # Substrate at time t
output$St[counter] <- St
for (j in 1:replicates){
if (error == 'r' | error == 'relative'){
Se <- St * rnorm(1, mean = 1, sd = sd)
output[counter, j+2] <- Se
} else if (error == 'a' | error == 'absolute'){
Se <- St + rnorm(1, mean = 0, sd = sd)
output[counter, j+2] <- Se
}
}
}
## -------------- Stop when St drops below a threshold -------------- ##
output[1, -1] <- So
if (So < 2*sd) {stop ('So lower than twice the SD')}
output <- output[output$St > 2*sd, ]
output[output < 0] <- 0
## --------------- Computing mean and sd if required ---------------- ##
if (ncol(output) > 3){
Substrate <- output[,-c(1,2)]
output$S_mean <- apply(Substrate, MARGIN = 1, mean)
output$S_sd <- apply(Substrate, MARGIN = 1, sd)
} else if (ncol(output) == 3){
output$S_mean <- output$A
output$S_sd <- 0
} else {
output$S_mean <- output$St
output$S_sd <- 0
}
## -------- Plotting the results ------------- ##
if (plot){
plot(output$t, output$S_mean, ty = 'l', col = 'blue',
xlab = paste("Time(", unit_t, ")", sep = ""), ylab = paste('[S]', unit_S))
# arrows(output$t, output$S_mean-output$S_sd,
# output$t, output$S_mean-output$S_sd, length=0.05, angle=90, code=3)
}
return(output)
}
## ------------------------------------------------------------------------------- ##
# fE.progress(data) #
## ------------------------------------------------------------------------------- ##
#' Progress Curve for Enzyme-Catalyzed Reaction
#' @description Fits the progress curve of an enzyme-catalyzed reaction.
#' @usage fE.progress(data, unit_S = 'mM', unit_t = 'min')
#' @param data
#' @details
#' @return
#' @author Juan Carlos Aledo
#' @examples
#' @references Biochem Mol Biol Educ.39:117-25 (10.1002/bmb.20479).
#' @seealso sEprogress(), int.MM()
#' @importFrom VGAM lambertW
#' @export
fE.progress <- function(data, unit_S = 'mM', unit_t = 'min'){
names(data) <- c('t', 'St')
So <- data$St[1]
## ----------------------- Estimating the seed ------------------------ ##
t <- int.MM(data)
seed = list(Km = unname(t$parameters[1]), Vm = unname(t$parameters[2]))
## ------------------------ Fitting the curve ---.--------------------- ##
model <- nls(St ~ (Km * VGAM::lambertW((So/Km)*exp((-Vm*t + So)/Km))),
data = data, start = seed, trace = TRUE)
Km <- round(summary(model)$coefficient[1,1], 3)
sd_Km <- round(summary(model)$coefficient[1,2], 3)
Vm <- round(summary(model)$coefficient[2,1], 3)
sd_Vm <- summary(model)$coefficient[2,2]
## ------------------------ Fitted St values ------------------------- ##
argument <- (So/Km)*exp((-Vm*data$t + So)/Km)
w <- VGAM::lambertW(argument)
fitted_St <- Km*w # Substrate at time t according to the fitted curve
data$fitted_St <- fitted_St
## --------------------------- Plotting data ------------------------- ##
parameters <- paste('Km: ', Km, ' Vm: ', Vm, sep = "")
plot(data$t, data$St, ty = 'p', col = 'red', pch = 20,
xlab = paste("time (", unit_t, ")", sep = ""),
ylab = paste("[S] (", unit_S, ")", sep = ""),
main = parameters)
points(data$t, data$fitted_St, ty = 'l', col = 'blue')
## ------------------------------- Output ---------------------------- ##
KmVm <- c(Km, Vm)
names(KmVm) <- c('Km', 'Vm')
output = list(KmVm, data)
names(output) <- c('parameters', 'data')
return(output)
}
|
/R/Progress.R
|
no_license
|
jcaledo/renz
|
R
| false
| false
| 6,407
|
r
|
## --------------------------------------------- ##
# #
# sE.progress #
# fE.progress #
# #
## --------------------------------------------- ##
## ------------------------------------------------------------------------------- ##
# sE.progress(So, time, Km, Vm, unit_S, unit_t, I, Kic, Kiu, replicates, ...) #
## ------------------------------------------------------------------------------- ##
#' Progress Curve for Enzyme-Catalyzed Reaction
#' @description Simulates the evolution of the substrate concentration along time.
#' @usage sE.progress(So, time, Km, Vm, unit_S = 'mM', unit_t = 'min', I = 0, Kic = Inf, Kia = Inf, replicates = 3, error = 'a', sd = 0.05, plot = TRUE)
#' @param So initial substrate concentration.
#' @param time reaction timespan.
#' @param Km Michaelis contant.
#' @param Vm maximal velocity.
#' @param unit_S concentration unit.
#' @param unit_t time unit.
#' @param I inhibitor concentration.
#' @param Kic competitive inhibition constant.
#' @param Kiu uncompetitive inhibition constant.
#' @param replicates number of replicates for the dependent variable
#' @param error it should be one among c('absolute', 'relative').
#' @param sd standard deviation of the error.
#' @param plot logical. If TRUE, the progress curve is plotted.
#' @details When sd is different to 0, then an abosolute error normally distributed is added to the variable St.
#' @return Returns a a dataframe where the two first columns are time and St (without error). The two last columns are the mean and sd of the variable St.
#' @author Juan Carlos Aledo
#' @examples
#' @seealso fE.progress()
#' @importFrom VGAM lambertW
#' @export
sE.progress <- function(So, time, Km, Vm,
unit_S = 'mM', unit_t = 'min',
I = 0, Kic = Inf, Kiu = Inf,
replicates = 3,
error = 'a',
sd = 0.005,
plot = TRUE){
## -------------- Km and Vm apparent when I is present -------------- ##
Km_a <- Km*( (1 + I/Kic)/(1 + I/Kiu) )
Vm_a <- Vm/(1 + I/Kic)
time <- seq(from = 0, to = time, by = (time/100))
## ---------------- Formating the output dataframe ------------------ ##
output <- as.data.frame(matrix(rep(NA, length(time)*(replicates + 2)),
ncol = replicates + 2))
names(output) <- c('t', 'St', LETTERS[1:replicates])
output$t <- time
## -------- Computing the variable substrate as function of t ------- ##
set.seed(123)
counter <- 0
for (t in time){
counter <- counter + 1
argument <- (So/Km_a)*exp((-Vm_a*t + So)/Km_a)
w <- VGAM::lambertW(argument)
St <- Km_a*w # Substrate at time t
output$St[counter] <- St
for (j in 1:replicates){
if (error == 'r' | error == 'relative'){
Se <- St * rnorm(1, mean = 1, sd = sd)
output[counter, j+2] <- Se
} else if (error == 'a' | error == 'absolute'){
Se <- St + rnorm(1, mean = 0, sd = sd)
output[counter, j+2] <- Se
}
}
}
## -------------- Stop when St drops below a threshold -------------- ##
output[1, -1] <- So
if (So < 2*sd) {stop ('So lower than twice the SD')}
output <- output[output$St > 2*sd, ]
output[output < 0] <- 0
## --------------- Computing mean and sd if required ---------------- ##
if (ncol(output) > 3){
Substrate <- output[,-c(1,2)]
output$S_mean <- apply(Substrate, MARGIN = 1, mean)
output$S_sd <- apply(Substrate, MARGIN = 1, sd)
} else if (ncol(output) == 3){
output$S_mean <- output$A
output$S_sd <- 0
} else {
output$S_mean <- output$St
output$S_sd <- 0
}
## -------- Plotting the results ------------- ##
if (plot){
plot(output$t, output$S_mean, ty = 'l', col = 'blue',
xlab = paste("Time(", unit_t, ")", sep = ""), ylab = paste('[S]', unit_S))
# arrows(output$t, output$S_mean-output$S_sd,
# output$t, output$S_mean-output$S_sd, length=0.05, angle=90, code=3)
}
return(output)
}
## ------------------------------------------------------------------------------- ##
# fE.progress(data) #
## ------------------------------------------------------------------------------- ##
#' Progress Curve for Enzyme-Catalyzed Reaction
#' @description Fits the progress curve of an enzyme-catalyzed reaction.
#' @usage fE.progress(data, unit_S = 'mM', unit_t = 'min')
#' @param data
#' @details
#' @return
#' @author Juan Carlos Aledo
#' @examples
#' @references Biochem Mol Biol Educ.39:117-25 (10.1002/bmb.20479).
#' @seealso sEprogress(), int.MM()
#' @importFrom VGAM lambertW
#' @export
fE.progress <- function(data, unit_S = 'mM', unit_t = 'min'){
names(data) <- c('t', 'St')
So <- data$St[1]
## ----------------------- Estimating the seed ------------------------ ##
t <- int.MM(data)
seed = list(Km = unname(t$parameters[1]), Vm = unname(t$parameters[2]))
## ------------------------ Fitting the curve ---.--------------------- ##
model <- nls(St ~ (Km * VGAM::lambertW((So/Km)*exp((-Vm*t + So)/Km))),
data = data, start = seed, trace = TRUE)
Km <- round(summary(model)$coefficient[1,1], 3)
sd_Km <- round(summary(model)$coefficient[1,2], 3)
Vm <- round(summary(model)$coefficient[2,1], 3)
sd_Vm <- summary(model)$coefficient[2,2]
## ------------------------ Fitted St values ------------------------- ##
argument <- (So/Km)*exp((-Vm*data$t + So)/Km)
w <- VGAM::lambertW(argument)
fitted_St <- Km*w # Substrate at time t according to the fitted curve
data$fitted_St <- fitted_St
## --------------------------- Plotting data ------------------------- ##
parameters <- paste('Km: ', Km, ' Vm: ', Vm, sep = "")
plot(data$t, data$St, ty = 'p', col = 'red', pch = 20,
xlab = paste("time (", unit_t, ")", sep = ""),
ylab = paste("[S] (", unit_S, ")", sep = ""),
main = parameters)
points(data$t, data$fitted_St, ty = 'l', col = 'blue')
## ------------------------------- Output ---------------------------- ##
KmVm <- c(Km, Vm)
names(KmVm) <- c('Km', 'Vm')
output = list(KmVm, data)
names(output) <- c('parameters', 'data')
return(output)
}
|
# Team FINANCE 3
# Project Deliverable 2 - Perform analysis on the dataset and build graphical representatons, predictions, etc.
# DATA SET - https://www.kaggle.com/nicapotato/womens-ecommerce-clothing-reviews
# The OBJECTIVE is to perform exploratory analysis, predict the rating of the clothes, and do clustering analysis.
# Line 49 - SECTION 1: Exploratory Analysis of different variables
# Line 120 - SECTION 2: Exploratory Analysis of text column 'Review.Text' and numerical column 'Rating'
# Line 246 - SECTION 3: Sentiment Analysis on text colum 'Review.Text', formation of Wordclouds
# Line 403 - SECTION 4: Data Preparation for Predictive Modelling (TF, TF-IDF of text columns 'Review.Text' and 'Title'),
# and Exploratory Analysis from Corpus for 'Review.Text'
# Line 611 - SECTION 5: Predictive Modelling (CART and Regression) using only text columns 'Review.Text' and 'Title'
# Line 734 - SECTION 6: Clustering and Predictive Modelling using clustering techniques for non-text columns, dendogram for text columns
# Line 926 - SECTION 7: Looking at Future, what else we could have done.
RNGversion(vstr = 3.6)
rm(list=ls())
# Load all necessary libraries
library(ggplot2); library(ggthemes); library(tidyr); library(dplyr)
library(cluster); library(mclust)
library(stringr); library(corrplot);
library(tidytext);library(janeaustenr); library(gridExtra)
# Read the cleaned data set from Project 1.1
getwd();
data = read.csv('/Users/zhouqiao/Desktop/Clean_Womens_Reviews_Simple.csv', stringsAsFactors = F)
# Evaluate the structure and contents of the dataset
str(data)
summary(data)
# Check column names
names(data)
# The first column 'X' is the original (given) serial number for the rows. We rename it to 'id' for simplicity
names(data)[1] = "id"
dim(data)
# Cleaned dataset with 19662 rows and 11 columns
###############################################################################################################
###############################################################################################################
## SECTION 1: Exploratory Analysis of different variables
# Part 1: Ratings - Number of Reviewers by Age (Age Group)
data$bins = cut(data$Age, breaks = c(0,20,40,60,80,100), labels = c("Centennials(0-20)","Young Adults(21-40)",
"Adults(41-60)","Retired(61-80)","Traditionalists(81-100)"))
age_groups = data %>% select(bins,Age) %>% group_by(bins) %>% summarise(count = n())
ggplot(data=age_groups,aes(x=bins,y=count)) + geom_bar(stat = "identity",fill="blue") +
labs(x = 'Age Groups', y = 'Number of Reviews')
## - Ages groups 21-40 are the used who use e-commerce the most, hence they have given the most reviews
## - The lowest raters are the ones below 20 years, reasons maybe limited access to internet or devices
## - See visualization graph
######################################
# Part 2: Distribution of Departments where each Age Group tends to shop the most
age_groups_dept = data %>% select(bins,Class.Name, id) %>% group_by(Class.Name, bins) %>% summarise(count = n())
ggplot(age_groups_dept, aes(x = bins, y = count,fill=Class.Name)) + geom_bar(stat='identity') +
labs(x = 'Age Groups', y = 'Number of Reviews') + theme(axis.text.x = element_text(angle = 90, hjust = 1))
## - 'Dresses' are the most common, and are shopped by age groups 21 to 60
## - See visualization graph
######################################
# Part 3: Most Reviewed Products by 'Class.Name'
most_reviewed_products <- data %>% select(Class.Name) %>% group_by(Class.Name) %>% summarise(count = n()) %>% arrange(desc(count)) %>% head(10)
colnames(most_reviewed_products)[1] = "Class of Product"
colnames(most_reviewed_products)[2] = "Number of Reviews"
#install.packages('gridExtra')
library(gridExtra)
table1 = tableGrob(most_reviewed_products)
grid.arrange(table1,ncol=1)
## - We see that 'Dresses' top the list followed by 'Knits' and 'Blouses'
## - See visualization table
######################################
# Part 4: Understanding the distribution of 'Rating' by 'Department.Name'
ggplot(data.frame(prop.table(table(data$Department.Name))), aes(x=Var1, y = Freq*100)) + geom_bar(stat = 'identity') +
xlab('Department Name') + ylab('Percentage of Reviews/Ratings (%)') + geom_text(aes(label=round(Freq*100,2)), vjust=-0.25) +
ggtitle('Percentage of Reviews By Department')
## - 'Tops' have the highest percentage of reviews and ratings in this dataset, followed by 'dresses'.
## - Items in the 'Jackets' and 'Trend' department received the lowest number of reviews.
## - See visualization graph
###############################################################################################################
###############################################################################################################
## SECTION 2: Exploratory Analysis of text column 'Review.Text' and numerical column 'Rating'
# Explore the numeric column 'Rating' and the text column 'Review.Text' and understand their statistical features and distribution
# Part 1: Ratings - mean and median
# Mean and Median Ratings
data %>%
summarize(Average_rating = mean(Rating), Median_rating = median(Rating))
# Distribution of Ratings
ggplot(data = data, aes(x = Rating)) + geom_histogram(fill = 'black') + theme_grey() + coord_flip()
## - Average Rating = 4.18 and Median Rating = 5
## - Indicates most of the customers have rated all the different products positively, with higher ratings for most reviews
## - See visualization graph
######################################
# Part 2: Review.Text - Character, Words and Sentences counts for all Reviews
# Characters
mean_characters = mean(nchar(data$Review.Text));
median_characters = median(nchar(data$Review.Text))
# Words
mean_words = mean(str_count(string = data$Review.Text,pattern = '\\S+'));
median_words = median(str_count(string = data$Review.Text,pattern = '\\S+'))
# Sentences
mean_sentences = mean(str_count(string = data$Review.Text,pattern = "[A-Za-z,;'\"\\s]+[^.!?]*[.?!]"));
median_sentences = median(str_count(string = data$Review.Text,pattern = "[A-Za-z,;'\"\\s]+[^.!?]*[.?!]"))
counts = data.frame(Variables = c("Characters", "Words", "Sentences"),
Mean = round(c(mean_characters, mean_words, mean_sentences),2),
Median = round(c(median_characters, median_words, median_sentences),2))
counts
## - The counts for each are more or less similar in their own mean and median
## - Implies that the counts distribution is highly symmetric and the skewless is low across the individual counts.
######################################
# Part 3: Review.Text length and Ratings - correlation
# Characters
cor(nchar(data$Review.Text),data$Rating)
cor.test(nchar(data$Review.Text),data$Rating)
# Words
cor(str_count(string = data$Review.Text,pattern = '\\S+'),data$Rating)
cor.test(str_count(string = data$Review.Text,pattern = '\\S+'),data$Rating)
# Sentences
cor(str_count(string = data$Review.Text,pattern = "[A-Za-z,;'\"\\s]+[^.!?]*[.?!]"),data$Rating)
cor.test(str_count(string = data$Review.Text,pattern = "[A-Za-z,;'\"\\s]+[^.!?]*[.?!]"),data$Rating)
## - Cor for: Characters = -0.05478506, Words = -0.05622374, Sentences = 0.01813276
## - Low correlations for all three variables
## - Implies that the length of the 'Review.Text' do not really impact the 'Rating' given.
######################################
# Part 4: 'Review.Text' text characteristics and Ratings - correlation
# Screaming Reviews - Upper Case Letters
proportionUpper = str_count(data$Review.Text,pattern='[A-Z]')/nchar(data$Review.Text)
cor(proportionUpper,data$Rating)
cor.test(proportionUpper,data$Rating)
## - Low correlations for all parameters
## - Implies that the Upper Case letters in 'Review.Text' do not really impact the 'Ratings'
# Exclamation Marks
summary(str_count(data$Review.Text,pattern='!'))
proportionExclamation = str_count(data$Review.Text,pattern='!')/nchar(data$Review.Text)
cor(proportionExclamation,data$Rating)
cor.test(proportionExclamation,data$Rating)
## - Cor for: Upper Case = 0.05779606, Exclamation Marks = 0.1776584
## - Low correlations for both variables
## - Implies that the Exclamation Marks in 'Review.Text' do not greatly impact the 'Ratings'
## - But it has more impact than Upper case letter as its correlation is higher than Upper Case letters
######################################
# Part 5: 'Review.Text' - most common words
# Most common words, out of all words
library(qdap)
freq_terms(text.var = data$Review.Text,top = 10)
plot(freq_terms(text.var = data$Review.Text,top = 10))
## - The most common used words are - the, i, and
## - But this is irrelevant. We need to remove stop words before computing this
## - See visualization for graph
# Most common words, excluding stop words
freq_terms(text.var=data$Review.Text,top=10,stopwords = Top200Words)
plot(freq_terms(text.var=data$Review.Text,top=10,stopwords = Top200Words))
## - The top used words are - dress, size, love
## - See visualization for graph
## - (Check Section 3, Part 5 (Line 367) below for wordcloud of common words)
## - (Check Section 4, Part 5 (Line 595) below for wordcloud from corpus, which removes stop words, punctuations, sparse terms, etc)
###############################################################################################################
###############################################################################################################
## SECTION 3: Sentiment Analysis on text colum 'Review.Text', formation of Wordclouds
# Conduct Sentiment Analysis using the various Lexicons, and bag of words, and word clouds
# Part 1: Binary Sentiment (positive/negative) - Bing Lexicon
data %>% select(id,Review.Text)%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% ungroup()%>% inner_join(get_sentiments('bing'))%>%
group_by(sentiment)%>% summarize(n = n())%>% mutate(proportion = n/sum(n))
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(get_sentiments('bing'))%>% group_by(sentiment)%>%
count()%>% ggplot(aes(x=sentiment,y=n,fill=sentiment))+geom_col()+theme_economist()+guides(fill=F)+ coord_flip()
## - Positive words = 90474 and Negative words - 22938
## - Approx 80% words are positive in the entire reviews set, which justifies the higher review 'Ratings' as seen before
## - See visualization graph
# Correlation between Positive Words and Review helpfulness
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(get_sentiments('bing'))%>% group_by(id,Rating)%>%
summarize(positivity = sum(sentiment=='positive')/n())%>% ungroup()%>% summarize(correlation = cor(positivity,Rating))
## - The correlation is around 36%, which indicates that a lot of positive words doesnt directly imply a good Rating, but does to a limited extent.
######################################
# Part 2: NRC Sentiment Polarity Table - Lexicon
library(lexicon)
data %>% select(id, Review.Text)%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(y = hash_sentiment_nrc,by = c('word'='x'))%>%
ungroup()%>% group_by(y)%>% summarize(count = n())%>% ungroup()
## - Count of '-1' words = 31221 and '1' words = 63759
## - Approx 67% words are in the '1' category
######################################
# Part 3: Emotion Lexicon - NRC Emotion Lexicon
nrc = get_sentiments('nrc')
nrc = read.table(file = 'https://raw.githubusercontent.com/pseudorational/data/master/nrc_lexicon.txt',
header = F,
col.names = c('word','sentiment','num'),
sep = '\t',
stringsAsFactors = F)
nrc = nrc[nrc$num!=0,]
nrc$num = NULL
# Counts of emotions
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(sentiment)%>%count()
# Plot of emotions
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(sentiment)%>% count()%>%
ggplot(aes(x=reorder(sentiment,X = n),y=n,fill=sentiment))+geom_col()+guides(fill=F)+coord_flip()+theme_wsj()
## - 'positive' has the highest count, followed by trust
## - See visualization graph
# Ratings of each Review based on Emotions Expressed
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()
# Ratings of all Reviews based on Emotion Expressed
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()%>%
group_by(sentiment, Rating)%>% summarize(n = mean(n))%>% data.frame()
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()%>%
group_by(sentiment, Rating)%>% summarize(n = mean(n))%>% ungroup()%>% ggplot(aes(x=Rating,y=n,fill=Rating))+ geom_col()+
facet_wrap(~sentiment)+ guides(fill=F)+coord_flip()
## - See visualization graph, shows distribution of 'Rating' across different emotions
# Correlation between emotion expressed and review rating
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()%>%
ungroup()%>% group_by(sentiment)%>% summarize(correlation = cor(n,Rating))
# Scatterplot of relationship
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()%>%
ungroup()%>% group_by(sentiment)%>% ggplot(aes(x=Rating,y=n))+geom_point()+facet_wrap(~sentiment)+geom_smooth(method='lm',se=F)
## - There is a rise in the number of 'joy' and 'positive' words as the 'Rating' goes up.
## - And a drop in the number of 'negative' and 'disgust' words as the 'Rating' goes up.
## - See visualization graph
######################################
# Part 4: Sentiment score Lexicons - afinn Lexicon
afinn = get_sentiments('afinn')
afinn = read.table('https://raw.githubusercontent.com/pseudorational/data/master/AFINN-111.txt',
header = F,
quote="",
sep = '\t',
col.names = c('word','value'),
encoding='UTF-8',
stringsAsFactors = F)
data %>% select(id,Review.Text)%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% inner_join(afinn)%>%
summarize(reviewSentiment = mean(value))%>% ungroup()%>%
summarize(min=min(reviewSentiment),max=max(reviewSentiment),median=median(reviewSentiment),mean=mean(reviewSentiment))
data %>% select(id,Review.Text)%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% inner_join(afinn)%>%
summarize(reviewSentiment = mean(value))%>% ungroup()%>% ggplot(aes(x=reviewSentiment,fill=reviewSentiment>0))+ geom_histogram(binwidth = 0.1)+
scale_x_continuous(breaks=seq(-5,5,1))+scale_fill_manual(values=c('tomato','seagreen'))+ guides(fill=F)+ theme_wsj()
## - The lowest sentiment score for any 'Review.Text' is -3 and the maximum is 5.
## - The mean sentiment score is 1.71 and the median is 1.85
## - See visualization graph, shows distribution of sentiment scores and their counts
######################################
# Part 5: Wordcloud of 150 words (except stop words)
library(wordcloud)
wordcloudData = data%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% anti_join(stop_words)%>% group_by(word)%>%
summarize(freq = n())%>% arrange(desc(freq))%>% ungroup()%>% data.frame()
set.seed(123)
wordcloud(words = wordcloudData$word,wordcloudData$freq,scale=c(3,1),max.words = 150,colors=brewer.pal(11,"Spectral"))
## - See visualization wordcloud
## - (Check Line 592 for wordcloud from corpus, which removes stop words, punctuations, sparse terms, etc)
######################################
# Part 6: Wordcloud of 100 Positive vs Negative words (except stop words)
wordcloudData = data%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% anti_join(stop_words)%>%
inner_join(get_sentiments('bing'))%>% ungroup()%>% count(sentiment,word,sort=T)%>% spread(key=sentiment,value = n,fill=0)%>% data.frame()
rownames(wordcloudData) = wordcloudData[,'word']
wordcloudData = wordcloudData[,c('positive','negative')]
set.seed(123)
comparison.cloud(term.matrix = wordcloudData,scale = c(2.5,0.8),max.words = 100, rot.per=0)
## - See visualization wordcloud, Green = Positive words, Red = Negative words
###############################################################################################################
###############################################################################################################
## SECTION 4: Data Preparation for Predictive Modelling (TF, TF-IDF of text columns 'Review.Text' and 'Title'),
## and Exploratory Analysis from Corpus for 'Review.Text'
# Re-run the steps for data preparation - tokenizaton, as was outlined in the previous Project 1.1 file (Line 113 of Project 1.1).
# Part 1: Data Preparation - Tokenization, for both 'Review.Text' and 'Title'
# for Review.Text
# 1 -- Create a corpus from the variable 'Review.Text'
# install.packages('tm')
library(tm)
corpus = Corpus(VectorSource(data$Review.Text))
# 2 -- Use tm_map to
#(a) transform text to lower case,
corpus = tm_map(corpus,FUN = content_transformer(tolower))
#(b)URL'S
corpus = tm_map(corpus, FUN = content_transformer(FUN = function(x)gsub(pattern = 'http[[:alnum:][:punct:]]*',
replacement = ' ',x = x)))
#(c) remove punctuation,
corpus = tm_map(corpus,FUN = removePunctuation)
#(d) remove English stopwords using the following dictionary tm::stopwords('english)
corpus = tm_map(corpus,FUN = removeWords,c(stopwords('english')))
#(e) remove whitespace
corpus = tm_map(corpus,FUN = stripWhitespace)
# 3 -- Create a dictionary
dict = findFreqTerms(DocumentTermMatrix(Corpus(VectorSource(data$Review.Text))), lowfreq = 0)
dict_corpus = Corpus(VectorSource(dict))
# 4 -- Use tm_map to stem words
corpus = tm_map(corpus,FUN = stemDocument)
# 5 -- Create a DocumentTermMatrix
dtm = DocumentTermMatrix(corpus)
inspect(dtm)
dim(dtm)
## - 19662 documents with a total of 13633 terms
# for Title
# 1 -- Create a corpus from the variable 'Title'
corpus2 = Corpus(VectorSource(data$Title))
# 2 -- Use tm_map to
#(a) transform text to lower case,
corpus2 = tm_map(corpus2,FUN = content_transformer(tolower))
#(b) URL'S
corpus2 = tm_map(corpus2,FUN = content_transformer(FUN = function(x)gsub(pattern = 'http[[:alnum:][:punct:]]*',
replacement = ' ',x = x)))
#(c) remove punctuation,
corpus2 = tm_map(corpus2,FUN = removePunctuation)
#(d) remove English stopwords using the following dictionary tm::stopwords('english)
corpus2 = tm_map(corpus2,FUN = removeWords,c(stopwords('english')))
#(e) remove whitespace
corpus2 = tm_map(corpus2,FUN = stripWhitespace)
# 3 -- Create a dictionary
dict2 = findFreqTerms(DocumentTermMatrix(Corpus(VectorSource(data$Title))),lowfreq = 0)
dict_corpus2 = Corpus(VectorSource(dict2))
# 4 -- Use tm_map to stem words
corpus2 = tm_map(corpus2,FUN = stemDocument)
# 5 -- Create a DocumentTermMatrix
dtm2 = DocumentTermMatrix(corpus2)
inspect(dtm2)
dim(dtm2)
## - 19662 documents with a total of 3204 terms
####################
# Remove Sparse Terms - We will remove those words which appear in less than 3% of the reviews
# for Review.Text
xdtm = removeSparseTerms(dtm,sparse = 0.97)
xdtm
xdtm_cluster = xdtm # to be used later for clustering
# for Title
xdtm2 = removeSparseTerms(dtm2,sparse = 0.97)
xdtm2; xdtm2_cluster = xdtm2
####################
# Complete Stems and Sort Tokens
# for Review.Text
xdtm = as.data.frame(as.matrix(xdtm))
colnames(xdtm) = stemCompletion(x = colnames(xdtm),
dictionary = dict_corpus,
type='prevalent')
colnames(xdtm) = make.names(colnames(xdtm))
sort(colSums(xdtm),decreasing = T)
## - sort to see most common terms
# for Title
xdtm2 = as.data.frame(as.matrix(xdtm2))
colnames(xdtm2) = stemCompletion(x = colnames(xdtm2),
dictionary = dict_corpus,
type='prevalent')
colnames(xdtm2) = make.names(colnames(xdtm2))
sort(colSums(xdtm2),decreasing = T)
## - sort to see most common terms
######################################
# Part 2: Document Term Matrix using Inverse Document Frequency - tfidf
# for Review.Text
dtm_tfidf = DocumentTermMatrix(x=corpus,
control = list(weighting=function(x) weightTfIdf(x,normalize=F)))
xdtm_tfidf = removeSparseTerms(dtm_tfidf,sparse = 0.97)
xdtm_tfidf = as.data.frame(as.matrix(xdtm_tfidf))
colnames(xdtm_tfidf) = stemCompletion(x = colnames(xdtm_tfidf),
dictionary = dict_corpus,
type='prevalent')
colnames(xdtm_tfidf) = make.names(colnames(xdtm_tfidf))
sort(colSums(xdtm_tfidf),decreasing = T)
## - sort to see most common terms
# for Title
dtm_tfidf2 = DocumentTermMatrix(x=corpus2,
control = list(weighting=function(x) weightTfIdf(x,normalize=F)))
xdtm_tfidf2 = removeSparseTerms(dtm_tfidf2,sparse = 0.97)
xdtm_tfidf2 = as.data.frame(as.matrix(xdtm_tfidf2))
colnames(xdtm_tfidf2) = stemCompletion(x = colnames(xdtm_tfidf2),
dictionary = dict_corpus2,
type='prevalent')
colnames(xdtm_tfidf2) = make.names(colnames(xdtm_tfidf2))
sort(colSums(xdtm_tfidf2),decreasing = T)
## - sort to see most common terms
######################################
# Part 3: Compare both DTM methods' results using graph
# for Review.Text
data.frame(term = colnames(xdtm),tf = colMeans(xdtm), tfidf = colMeans(xdtm_tfidf))%>%
arrange(desc(tf))%>%
top_n(9)%>%
gather(key=weighting_method,value=weight,2:3)%>%
ggplot(aes(x=term,y=weight,fill=weighting_method))+
geom_col(position='dodge')+
coord_flip()+
theme_economist()
## - the term dress was assigned a much higher weight in the tf method, becasue it occured in most of the reviews
## - but was assigned a lower weight in the tditf method, becasue it has little diagnostic value, since it occurs on most reviews.
## - See visualization graph
# for Title
data.frame(term = colnames(xdtm2),tf = colMeans(xdtm2), tfidf = colMeans(xdtm_tfidf2))%>%
arrange(desc(tf))%>%
top_n(10)%>%
gather(key=weighting_method,value=weight,2:3)%>%
ggplot(aes(x=term,y=weight,fill=weighting_method))+
geom_col(position='dodge')+
coord_flip()+
theme_economist()
## - the term love and great were assigned a much higher weight in the tf method, becasue they occured in most of the titles
## - but was assigned a lower weight in the tditf method, becasue they have little diadnostic value, since they occur on most titles
## - See visualization graph
######################################
# Part 4: Add Rating back to dataframe of features
# for Review.Text
clothes_data = cbind(Rating = data$Rating, xdtm)
clothes_data_tfidf = cbind(Rating = data$Rating, xdtm_tfidf)
# for Title
clothes_data2 = cbind(Rating = data$Rating,xdtm2)
clothes_data_tfidf2 = cbind(Rating = data$Rating,xdtm_tfidf2)
######################################
# Part 5: WordCloud from the prepared corpus set (removing all stop words, punctuations, etc)
# for Review.Text
set.seed(123)
wordcloud(corpus, scale=c(6,0.5), max.words=170, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, 'Dark2'))
## - See visualization wordcloud
###############################################################################################################
###############################################################################################################
## SECTION 5: Predictive Modelling (CART and Regression) using only text columns 'Review.Text' and 'Title'
# Part 1: Predictive Models (using TF)
# for Review.Text
set.seed(617)
split = sample(1:nrow(clothes_data), size = 0.75*nrow(clothes_data))
train = clothes_data[split,]
test = clothes_data[-split,]
# CART Method
library(rpart); library(rpart.plot)
tree = rpart(Rating~.,train)
rpart.plot(tree)
pred_tree = predict(tree,newdata=test)
rmse_tree = round(sqrt(mean((pred_tree - test$Rating)^2)),5); rmse_tree
## - See visualization Tree
## - RMSE = 1.009915
# Regression Method
reg = lm(Rating~.,train)
pred_reg = predict(reg, newdata=test)
rmse_reg = round(sqrt(mean((pred_reg-test$Rating)^2)),5); rmse_reg
## - RMSE = 0.9013822
# for Title
set.seed(617)
split = sample(1:nrow(clothes_data2), size = 0.75*nrow(clothes_data2))
train2 = clothes_data2[split,]
test2 = clothes_data2[-split,]
# CART Method
tree2 = rpart(Rating~.,train2)
rpart.plot(tree2)
pred_tree2 = predict(tree2,newdata=test2)
rmse_tree2 = round(sqrt(mean((pred_tree2 - test2$Rating)^2)),5); rmse_tree2
## - See visualization Tree
## - RMSE = 1.075686
# Regression Method
reg2 = lm(Rating~.,train2)
pred_reg2 = predict(reg2, newdata=test2)
rmse_reg2 = round(sqrt(mean((pred_reg2-test2$Rating)^2)),5); rmse_reg2
## - RMSE = 1.06697
## - Title is also not a bad predictor as well, the rmse lies within close range of Review.Text. But Review.Text gives the lowest rmse.
######################################
# Part 2: Predictive Models (using TF-IDF)
# for Review.Text
set.seed(617)
split = sample(1:nrow(clothes_data_tfidf), size = 0.75*nrow(clothes_data_tfidf))
train = clothes_data_tfidf[split,]
test = clothes_data_tfidf[-split,]
# CART Method
tree = rpart(Rating~.,train)
rpart.plot(tree)
pred_tree = predict(tree,newdata=test)
rmse_tree_idf = round(sqrt(mean((pred_tree - test$Rating)^2)),5); rmse_tree_idf
## - RMSE = 1.009915
## - See visualization Tree
# Regression Method
reg = lm(Rating~.,train)
pred_reg = predict(reg, newdata=test)
rmse_reg_idf = round(sqrt(mean((pred_reg-test$Rating)^2)),5); rmse_reg_idf
## - RMSE = 0.9013822
# for Title
set.seed(617)
split = sample(1:nrow(clothes_data_tfidf2), size = 0.75*nrow(clothes_data_tfidf2))
train2 = clothes_data_tfidf2[split,]
test2 = clothes_data_tfidf2[-split,]
# CART Method
tree2 = rpart(Rating~.,train2)
rpart.plot(tree2)
pred_tree2 = predict(tree2,newdata=test2)
rmse_tree2_idf = round(sqrt(mean((pred_tree2 - test2$Rating)^2)),5); rmse_tree2_idf
## - RMSE = 1.075686
## - See visualization Tree
# Regression Method
reg2 = lm(Rating~.,train2)
pred_reg2 = predict(reg2, newdata=test2)
rmse_reg2_idf = round(sqrt(mean((pred_reg2-test2$Rating)^2)),5); rmse_reg2_idf
## - RMSE = 1.06697
rmse_review_text_df = data.frame(for_Review.Text = c("Method", "TF", "TF-IDF"),CART_RMSE = c(" ", rmse_tree, rmse_tree_idf),
Regression_RMSE = c(" ", rmse_reg, rmse_reg_idf))
rmse_title_df = data.frame(for_Title = c("Method", "TF", "TF-IDF"),CART_RMSE = c(" ", rmse_tree2, rmse_tree2_idf),
Regression_RMSE = c(" ", rmse_reg2, rmse_reg2_idf))
rmse_review_text_df
rmse_title_df
## - Both methods, i.e., TF and TF-IDf give the exact same RMSE for both 'Review.Text' and 'Title'.
## - 'Review.Text' always gives lower rmse than any method used for 'Title'. So we shoud use 'Review.Text' going forward.
## - For best rmse, we need to use the regression method of predictive modelling, but wmight need to compare results from TF and TF-IDF methods.
###############################################################################################################
###############################################################################################################
# SECTION 6: Clustering and Predictive Modelling using clustering techniques, except all text columns, dendogram for text columns clustering
# Part 1: Prepare Data for Cluster Analysis
library(caret)
set.seed(617)
split = createDataPartition(y=data$Rating,p = 0.75,list = F,groups = 100)
train = data[split,]
test = data[-split,]
train = subset(train, select = -c(id, Clothing.ID, Title, Review.Text, Division.Name, Department.Name, Class.Name, bins))
test = subset(test, select = -c(id, Clothing.ID, Title, Review.Text, Division.Name, Department.Name, Class.Name, bins))
# Simple Regression
linear = lm(Rating~.,train)
summary(linear)
sseLinear = sum(linear$residuals^2); sseLinear
predLinear = predict(linear,newdata=test)
sseLinear = sum((predLinear-test$Rating)^2); sseLinear
# Cluster and Regression
trainMinusDV = subset(train,select=-c(Rating))
testMinusDV = subset(test,select=-c(Rating))
# Prepare Data for Clustering - Cluster Analysis is sensitive to scale. Normalizing the data.
preproc = preProcess(trainMinusDV)
trainNorm = predict(preproc,trainMinusDV)
testNorm = predict(preproc,testMinusDV)
######################################
# Part 2: Hierarchical and k-means Cluster Analysis
# Hierarchical
distances = dist(trainNorm,method = 'euclidean')
clusters = hclust(d = distances,method = 'ward.D2')
library(dendextend)
plot(color_branches(cut(as.dendrogram(clusters), h = 20)$upper), k = 3, groupLabels = F) # displaying clusters with tree above 20
rect.hclust(tree=clusters,k = 3,border='red')
## - Based on the plot, a 3 cluster solution looks good.
clusterGroups = cutree(clusters,k=2)
# install.packages('psych')
# visualize
library(psych)
temp = data.frame(cluster = factor(clusterGroups),
factor1 = fa(trainNorm,nfactors = 2,rotate = 'varimax')$scores[,1],
factor2 = fa(trainNorm,nfactors = 2,rotate = 'varimax')$scores[,2])
ggplot(temp,aes(x=factor1,y=factor2,col=cluster))+
geom_point()
## - See visualization graph
# k-means clustering
set.seed(617)
km = kmeans(x = trainNorm,centers = 2,iter.max=10000,nstart=100)
km$centers
mean(km$cluster==clusterGroups) # %match between results of hclust and kmeans
# Total within sum of squares Plot
within_ss = sapply(1:10,FUN = function(x) kmeans(x = trainNorm,centers = x,iter.max = 1000,nstart = 25)$tot.withinss)
ggplot(data=data.frame(cluster = 1:10,within_ss),aes(x=cluster,y=within_ss))+ geom_line(col='steelblue',size=1.2)+
geom_point()+ scale_x_continuous(breaks=seq(1,10,1))
# Ratio Plot
ratio_ss = sapply(1:10,FUN = function(x) {km = kmeans(x = trainNorm,centers = x,iter.max = 1000,nstart = 25)
km$betweenss/km$totss} )
ggplot(data=data.frame(cluster = 1:10,ratio_ss),aes(x=cluster,y=ratio_ss))+ geom_line(col='steelblue',size=1.2)+
geom_point()+ scale_x_continuous(breaks=seq(1,10,1))
# Silhouette Plot
library(cluster)
silhoette_width = sapply(2:10,FUN = function(x) pam(x = trainNorm,k = x)$silinfo$avg.width)
#ggplot(data=data.frame(cluster = 2:10,silhoette_width),aes(x=cluster,y=silhoette_width))+ # takes too much time
# geom_line(col='steelblue',size=1.2)+ geom_point()+ scale_x_continuous(breaks=seq(2,10,1))
######################################
# Part 3: Apply to test, and Compare Results
# Set the centers as 3
set.seed(617)
km = kmeans(x = trainNorm,centers = 3,iter.max=10000,nstart=100)
# install.packages('flexclust')
library(flexclust)
km_kcca = as.kcca(km,trainNorm) # flexclust uses objects of the classes kcca
clusterTrain = predict(km_kcca)
clusterTest = predict(km_kcca,newdata=testNorm)
table(clusterTrain)
table(clusterTest)
# Split train and test based on cluster membership
train1 = subset(train,clusterTrain==1)
train2 = subset(train,clusterTrain==2)
test1 = subset(test,clusterTest==1)
test2 = subset(test,clusterTest==2)
# Predict for each Cluster then Combine
lm1 = lm(Rating~.,train1)
lm2 = lm(Rating~.,train2)
pred1 = predict(lm1,newdata=test1)
pred2 = predict(lm2,newdata=test2)
sse1 = sum((test1$Rating-pred1)^2); sse1
sse2 = sum((test2$Rating-pred2)^2); sse2
predOverall = c(pred1,pred2)
RatingOverall = c(test1$Rating,test2$Rating)
sseOverall = sum((predOverall - RatingOverall)^2); sseOverall
# Compare Results
paste('SSE for model on entire data',sseLinear)
paste('SSE for model on clusters',sseOverall)
## - SSE on Entire data = 2262.3200502617, SSE on Clusters = 1643.99972478085
## - Prediction using clusters is more accurate, as the standard error is less.
######################################
# Part 4: Predict Using Tree, and Compare Results
# Simple Tree
library(rpart); library(rpart.plot)
tree = rpart(Rating~.,train,minbucket=10)
predTree = predict(tree,newdata=test)
sseTree = sum((predTree - test$Rating)^2); sseTree
# Cluster Then Predict Using Tree
tree1 = rpart(Rating~.,train1,minbucket=10)
tree2 = rpart(Rating~.,train2,minbucket=10)
pred1 = predict(tree1,newdata=test1)
pred2 = predict(tree2,newdata=test2)
sse1 = sum((test1$Rating-pred1)^2); sse1
sse2 = sum((test2$Rating-pred2)^2); sse2
predTreeCombine = c(pred1,pred2)
RatingOverall = c(test1$Rating,test2$Rating)
sseTreeCombine = sum((predTreeCombine - RatingOverall)^2); sseTreeCombine
# Compare Results
paste('SSE for model on entire data',sseTree)
paste('SSE for model on clusters',sseTreeCombine)
## - SSE on Entire data = 2262.07769316003, SSE on Clusters = 1643.2592670892
## - Prediction using clusters is more accurate, as the standard error is less.
## - Lowest Error is when we CLuster with Tree and predict
######################################
# Part 5: Clustering, and Dendogram from cleaned corpus, of 'Review.Text' and 'Title'
# We had defined 'xdtm_cluster' as the cleaned corpus earlier in Line 478
# 'Review.Text'
#hc = hclust(d = dist(xdtm_cluster, method = "euclidean"), method = "complete") # this takes massive time to run
#plot(hc)
# 'Title'
hc = hclust(d = dist(xdtm2_cluster, method = "euclidean"), method = "complete")
plot(hc)
## - See visualization graph
###############################################################################################################
###############################################################################################################
# SECTION 7: Looking at Future, what else we could have done.
# 1: In-dept Cluster Analysis of text columns, using detailed scatterplots
# For clustering and prediction Modelling using the text column 'Review.Text', the following code can be used.
# Source 1 - https://gist.github.com/luccitan/b74c53adfe3b6dad1764af1cdc1f08b7
# Source 2 - https://medium.com/@SAPCAI/text-clustering-with-r-an-introduction-for-data-scientists-c406e7454e76
# We had defined 'xdtm_cluster' as the cleaned corpus earlier in Line 478, which will be used here for converting to matrix, etc... as per the code given.
######################################
# 2: Further detailed exploratory analysis
# Source - https://www.kaggle.com/dubravkodolic/reviews-of-clothings-analyzed-by-sentiments
# Source - https://www.kaggle.com/cosinektheta/mining-the-women-s-clothing-reviews
######################################
# 3: More prediction models, to evaluate better rmse measures
# Source - https://www.kaggle.com/ankitppn/logistic-regression-and-random-forest-models/output
#################################### T H E E N D ####################################
|
/Women's clothes.R
|
no_license
|
qiaozhou-qz/Ecommerce
|
R
| false
| false
| 35,466
|
r
|
# Team FINANCE 3
# Project Deliverable 2 - Perform analysis on the dataset and build graphical representatons, predictions, etc.
# DATA SET - https://www.kaggle.com/nicapotato/womens-ecommerce-clothing-reviews
# The OBJECTIVE is to perform exploratory analysis, predict the rating of the clothes, and do clustering analysis.
# Line 49 - SECTION 1: Exploratory Analysis of different variables
# Line 120 - SECTION 2: Exploratory Analysis of text column 'Review.Text' and numerical column 'Rating'
# Line 246 - SECTION 3: Sentiment Analysis on text colum 'Review.Text', formation of Wordclouds
# Line 403 - SECTION 4: Data Preparation for Predictive Modelling (TF, TF-IDF of text columns 'Review.Text' and 'Title'),
# and Exploratory Analysis from Corpus for 'Review.Text'
# Line 611 - SECTION 5: Predictive Modelling (CART and Regression) using only text columns 'Review.Text' and 'Title'
# Line 734 - SECTION 6: Clustering and Predictive Modelling using clustering techniques for non-text columns, dendogram for text columns
# Line 926 - SECTION 7: Looking at Future, what else we could have done.
RNGversion(vstr = 3.6)
rm(list=ls())
# Load all necessary libraries
library(ggplot2); library(ggthemes); library(tidyr); library(dplyr)
library(cluster); library(mclust)
library(stringr); library(corrplot);
library(tidytext);library(janeaustenr); library(gridExtra)
# Read the cleaned data set from Project 1.1
getwd();
data = read.csv('/Users/zhouqiao/Desktop/Clean_Womens_Reviews_Simple.csv', stringsAsFactors = F)
# Evaluate the structure and contents of the dataset
str(data)
summary(data)
# Check column names
names(data)
# The first column 'X' is the original (given) serial number for the rows. We rename it to 'id' for simplicity
names(data)[1] = "id"
dim(data)
# Cleaned dataset with 19662 rows and 11 columns
###############################################################################################################
###############################################################################################################
## SECTION 1: Exploratory Analysis of different variables
# Part 1: Ratings - Number of Reviewers by Age (Age Group)
data$bins = cut(data$Age, breaks = c(0,20,40,60,80,100), labels = c("Centennials(0-20)","Young Adults(21-40)",
"Adults(41-60)","Retired(61-80)","Traditionalists(81-100)"))
age_groups = data %>% select(bins,Age) %>% group_by(bins) %>% summarise(count = n())
ggplot(data=age_groups,aes(x=bins,y=count)) + geom_bar(stat = "identity",fill="blue") +
labs(x = 'Age Groups', y = 'Number of Reviews')
## - Ages groups 21-40 are the used who use e-commerce the most, hence they have given the most reviews
## - The lowest raters are the ones below 20 years, reasons maybe limited access to internet or devices
## - See visualization graph
######################################
# Part 2: Distribution of Departments where each Age Group tends to shop the most
age_groups_dept = data %>% select(bins,Class.Name, id) %>% group_by(Class.Name, bins) %>% summarise(count = n())
ggplot(age_groups_dept, aes(x = bins, y = count,fill=Class.Name)) + geom_bar(stat='identity') +
labs(x = 'Age Groups', y = 'Number of Reviews') + theme(axis.text.x = element_text(angle = 90, hjust = 1))
## - 'Dresses' are the most common, and are shopped by age groups 21 to 60
## - See visualization graph
######################################
# Part 3: Most Reviewed Products by 'Class.Name'
most_reviewed_products <- data %>% select(Class.Name) %>% group_by(Class.Name) %>% summarise(count = n()) %>% arrange(desc(count)) %>% head(10)
colnames(most_reviewed_products)[1] = "Class of Product"
colnames(most_reviewed_products)[2] = "Number of Reviews"
#install.packages('gridExtra')
library(gridExtra)
table1 = tableGrob(most_reviewed_products)
grid.arrange(table1,ncol=1)
## - We see that 'Dresses' top the list followed by 'Knits' and 'Blouses'
## - See visualization table
######################################
# Part 4: Understanding the distribution of 'Rating' by 'Department.Name'
ggplot(data.frame(prop.table(table(data$Department.Name))), aes(x=Var1, y = Freq*100)) + geom_bar(stat = 'identity') +
xlab('Department Name') + ylab('Percentage of Reviews/Ratings (%)') + geom_text(aes(label=round(Freq*100,2)), vjust=-0.25) +
ggtitle('Percentage of Reviews By Department')
## - 'Tops' have the highest percentage of reviews and ratings in this dataset, followed by 'dresses'.
## - Items in the 'Jackets' and 'Trend' department received the lowest number of reviews.
## - See visualization graph
###############################################################################################################
###############################################################################################################
## SECTION 2: Exploratory Analysis of text column 'Review.Text' and numerical column 'Rating'
# Explore the numeric column 'Rating' and the text column 'Review.Text' and understand their statistical features and distribution
# Part 1: Ratings - mean and median
# Mean and Median Ratings
data %>%
summarize(Average_rating = mean(Rating), Median_rating = median(Rating))
# Distribution of Ratings
ggplot(data = data, aes(x = Rating)) + geom_histogram(fill = 'black') + theme_grey() + coord_flip()
## - Average Rating = 4.18 and Median Rating = 5
## - Indicates most of the customers have rated all the different products positively, with higher ratings for most reviews
## - See visualization graph
######################################
# Part 2: Review.Text - Character, Words and Sentences counts for all Reviews
# Characters
mean_characters = mean(nchar(data$Review.Text));
median_characters = median(nchar(data$Review.Text))
# Words
mean_words = mean(str_count(string = data$Review.Text,pattern = '\\S+'));
median_words = median(str_count(string = data$Review.Text,pattern = '\\S+'))
# Sentences
mean_sentences = mean(str_count(string = data$Review.Text,pattern = "[A-Za-z,;'\"\\s]+[^.!?]*[.?!]"));
median_sentences = median(str_count(string = data$Review.Text,pattern = "[A-Za-z,;'\"\\s]+[^.!?]*[.?!]"))
counts = data.frame(Variables = c("Characters", "Words", "Sentences"),
Mean = round(c(mean_characters, mean_words, mean_sentences),2),
Median = round(c(median_characters, median_words, median_sentences),2))
counts
## - The counts for each are more or less similar in their own mean and median
## - Implies that the counts distribution is highly symmetric and the skewless is low across the individual counts.
######################################
# Part 3: Review.Text length and Ratings - correlation
# Characters
cor(nchar(data$Review.Text),data$Rating)
cor.test(nchar(data$Review.Text),data$Rating)
# Words
cor(str_count(string = data$Review.Text,pattern = '\\S+'),data$Rating)
cor.test(str_count(string = data$Review.Text,pattern = '\\S+'),data$Rating)
# Sentences
cor(str_count(string = data$Review.Text,pattern = "[A-Za-z,;'\"\\s]+[^.!?]*[.?!]"),data$Rating)
cor.test(str_count(string = data$Review.Text,pattern = "[A-Za-z,;'\"\\s]+[^.!?]*[.?!]"),data$Rating)
## - Cor for: Characters = -0.05478506, Words = -0.05622374, Sentences = 0.01813276
## - Low correlations for all three variables
## - Implies that the length of the 'Review.Text' do not really impact the 'Rating' given.
######################################
# Part 4: 'Review.Text' text characteristics and Ratings - correlation
# Screaming Reviews - Upper Case Letters
proportionUpper = str_count(data$Review.Text,pattern='[A-Z]')/nchar(data$Review.Text)
cor(proportionUpper,data$Rating)
cor.test(proportionUpper,data$Rating)
## - Low correlations for all parameters
## - Implies that the Upper Case letters in 'Review.Text' do not really impact the 'Ratings'
# Exclamation Marks
summary(str_count(data$Review.Text,pattern='!'))
proportionExclamation = str_count(data$Review.Text,pattern='!')/nchar(data$Review.Text)
cor(proportionExclamation,data$Rating)
cor.test(proportionExclamation,data$Rating)
## - Cor for: Upper Case = 0.05779606, Exclamation Marks = 0.1776584
## - Low correlations for both variables
## - Implies that the Exclamation Marks in 'Review.Text' do not greatly impact the 'Ratings'
## - But it has more impact than Upper case letter as its correlation is higher than Upper Case letters
######################################
# Part 5: 'Review.Text' - most common words
# Most common words, out of all words
library(qdap)
freq_terms(text.var = data$Review.Text,top = 10)
plot(freq_terms(text.var = data$Review.Text,top = 10))
## - The most common used words are - the, i, and
## - But this is irrelevant. We need to remove stop words before computing this
## - See visualization for graph
# Most common words, excluding stop words
freq_terms(text.var=data$Review.Text,top=10,stopwords = Top200Words)
plot(freq_terms(text.var=data$Review.Text,top=10,stopwords = Top200Words))
## - The top used words are - dress, size, love
## - See visualization for graph
## - (Check Section 3, Part 5 (Line 367) below for wordcloud of common words)
## - (Check Section 4, Part 5 (Line 595) below for wordcloud from corpus, which removes stop words, punctuations, sparse terms, etc)
###############################################################################################################
###############################################################################################################
## SECTION 3: Sentiment Analysis on text colum 'Review.Text', formation of Wordclouds
# Conduct Sentiment Analysis using the various Lexicons, and bag of words, and word clouds
# Part 1: Binary Sentiment (positive/negative) - Bing Lexicon
data %>% select(id,Review.Text)%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% ungroup()%>% inner_join(get_sentiments('bing'))%>%
group_by(sentiment)%>% summarize(n = n())%>% mutate(proportion = n/sum(n))
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(get_sentiments('bing'))%>% group_by(sentiment)%>%
count()%>% ggplot(aes(x=sentiment,y=n,fill=sentiment))+geom_col()+theme_economist()+guides(fill=F)+ coord_flip()
## - Positive words = 90474 and Negative words - 22938
## - Approx 80% words are positive in the entire reviews set, which justifies the higher review 'Ratings' as seen before
## - See visualization graph
# Correlation between Positive Words and Review helpfulness
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(get_sentiments('bing'))%>% group_by(id,Rating)%>%
summarize(positivity = sum(sentiment=='positive')/n())%>% ungroup()%>% summarize(correlation = cor(positivity,Rating))
## - The correlation is around 36%, which indicates that a lot of positive words doesnt directly imply a good Rating, but does to a limited extent.
######################################
# Part 2: NRC Sentiment Polarity Table - Lexicon
library(lexicon)
data %>% select(id, Review.Text)%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(y = hash_sentiment_nrc,by = c('word'='x'))%>%
ungroup()%>% group_by(y)%>% summarize(count = n())%>% ungroup()
## - Count of '-1' words = 31221 and '1' words = 63759
## - Approx 67% words are in the '1' category
######################################
# Part 3: Emotion Lexicon - NRC Emotion Lexicon
nrc = get_sentiments('nrc')
nrc = read.table(file = 'https://raw.githubusercontent.com/pseudorational/data/master/nrc_lexicon.txt',
header = F,
col.names = c('word','sentiment','num'),
sep = '\t',
stringsAsFactors = F)
nrc = nrc[nrc$num!=0,]
nrc$num = NULL
# Counts of emotions
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(sentiment)%>%count()
# Plot of emotions
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(sentiment)%>% count()%>%
ggplot(aes(x=reorder(sentiment,X = n),y=n,fill=sentiment))+geom_col()+guides(fill=F)+coord_flip()+theme_wsj()
## - 'positive' has the highest count, followed by trust
## - See visualization graph
# Ratings of each Review based on Emotions Expressed
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()
# Ratings of all Reviews based on Emotion Expressed
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()%>%
group_by(sentiment, Rating)%>% summarize(n = mean(n))%>% data.frame()
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()%>%
group_by(sentiment, Rating)%>% summarize(n = mean(n))%>% ungroup()%>% ggplot(aes(x=Rating,y=n,fill=Rating))+ geom_col()+
facet_wrap(~sentiment)+ guides(fill=F)+coord_flip()
## - See visualization graph, shows distribution of 'Rating' across different emotions
# Correlation between emotion expressed and review rating
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()%>%
ungroup()%>% group_by(sentiment)%>% summarize(correlation = cor(n,Rating))
# Scatterplot of relationship
data%>% group_by(id)%>% unnest_tokens(output = word, input = Review.Text)%>% inner_join(nrc)%>% group_by(id,sentiment,Rating)%>% count()%>%
ungroup()%>% group_by(sentiment)%>% ggplot(aes(x=Rating,y=n))+geom_point()+facet_wrap(~sentiment)+geom_smooth(method='lm',se=F)
## - There is a rise in the number of 'joy' and 'positive' words as the 'Rating' goes up.
## - And a drop in the number of 'negative' and 'disgust' words as the 'Rating' goes up.
## - See visualization graph
######################################
# Part 4: Sentiment score Lexicons - afinn Lexicon
afinn = get_sentiments('afinn')
afinn = read.table('https://raw.githubusercontent.com/pseudorational/data/master/AFINN-111.txt',
header = F,
quote="",
sep = '\t',
col.names = c('word','value'),
encoding='UTF-8',
stringsAsFactors = F)
data %>% select(id,Review.Text)%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% inner_join(afinn)%>%
summarize(reviewSentiment = mean(value))%>% ungroup()%>%
summarize(min=min(reviewSentiment),max=max(reviewSentiment),median=median(reviewSentiment),mean=mean(reviewSentiment))
data %>% select(id,Review.Text)%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% inner_join(afinn)%>%
summarize(reviewSentiment = mean(value))%>% ungroup()%>% ggplot(aes(x=reviewSentiment,fill=reviewSentiment>0))+ geom_histogram(binwidth = 0.1)+
scale_x_continuous(breaks=seq(-5,5,1))+scale_fill_manual(values=c('tomato','seagreen'))+ guides(fill=F)+ theme_wsj()
## - The lowest sentiment score for any 'Review.Text' is -3 and the maximum is 5.
## - The mean sentiment score is 1.71 and the median is 1.85
## - See visualization graph, shows distribution of sentiment scores and their counts
######################################
# Part 5: Wordcloud of 150 words (except stop words)
library(wordcloud)
wordcloudData = data%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% anti_join(stop_words)%>% group_by(word)%>%
summarize(freq = n())%>% arrange(desc(freq))%>% ungroup()%>% data.frame()
set.seed(123)
wordcloud(words = wordcloudData$word,wordcloudData$freq,scale=c(3,1),max.words = 150,colors=brewer.pal(11,"Spectral"))
## - See visualization wordcloud
## - (Check Line 592 for wordcloud from corpus, which removes stop words, punctuations, sparse terms, etc)
######################################
# Part 6: Wordcloud of 100 Positive vs Negative words (except stop words)
wordcloudData = data%>% group_by(id)%>% unnest_tokens(output=word,input=Review.Text)%>% anti_join(stop_words)%>%
inner_join(get_sentiments('bing'))%>% ungroup()%>% count(sentiment,word,sort=T)%>% spread(key=sentiment,value = n,fill=0)%>% data.frame()
rownames(wordcloudData) = wordcloudData[,'word']
wordcloudData = wordcloudData[,c('positive','negative')]
set.seed(123)
comparison.cloud(term.matrix = wordcloudData,scale = c(2.5,0.8),max.words = 100, rot.per=0)
## - See visualization wordcloud, Green = Positive words, Red = Negative words
###############################################################################################################
###############################################################################################################
## SECTION 4: Data Preparation for Predictive Modelling (TF, TF-IDF of text columns 'Review.Text' and 'Title'),
## and Exploratory Analysis from Corpus for 'Review.Text'
# Re-run the steps for data preparation - tokenizaton, as was outlined in the previous Project 1.1 file (Line 113 of Project 1.1).
# Part 1: Data Preparation - Tokenization, for both 'Review.Text' and 'Title'
# for Review.Text
# 1 -- Create a corpus from the variable 'Review.Text'
# install.packages('tm')
library(tm)
corpus = Corpus(VectorSource(data$Review.Text))
# 2 -- Use tm_map to
#(a) transform text to lower case,
corpus = tm_map(corpus,FUN = content_transformer(tolower))
#(b)URL'S
corpus = tm_map(corpus, FUN = content_transformer(FUN = function(x)gsub(pattern = 'http[[:alnum:][:punct:]]*',
replacement = ' ',x = x)))
#(c) remove punctuation,
corpus = tm_map(corpus,FUN = removePunctuation)
#(d) remove English stopwords using the following dictionary tm::stopwords('english)
corpus = tm_map(corpus,FUN = removeWords,c(stopwords('english')))
#(e) remove whitespace
corpus = tm_map(corpus,FUN = stripWhitespace)
# 3 -- Create a dictionary
dict = findFreqTerms(DocumentTermMatrix(Corpus(VectorSource(data$Review.Text))), lowfreq = 0)
dict_corpus = Corpus(VectorSource(dict))
# 4 -- Use tm_map to stem words
corpus = tm_map(corpus,FUN = stemDocument)
# 5 -- Create a DocumentTermMatrix
dtm = DocumentTermMatrix(corpus)
inspect(dtm)
dim(dtm)
## - 19662 documents with a total of 13633 terms
# for Title
# 1 -- Create a corpus from the variable 'Title'
corpus2 = Corpus(VectorSource(data$Title))
# 2 -- Use tm_map to
#(a) transform text to lower case,
corpus2 = tm_map(corpus2,FUN = content_transformer(tolower))
#(b) URL'S
corpus2 = tm_map(corpus2,FUN = content_transformer(FUN = function(x)gsub(pattern = 'http[[:alnum:][:punct:]]*',
replacement = ' ',x = x)))
#(c) remove punctuation,
corpus2 = tm_map(corpus2,FUN = removePunctuation)
#(d) remove English stopwords using the following dictionary tm::stopwords('english)
corpus2 = tm_map(corpus2,FUN = removeWords,c(stopwords('english')))
#(e) remove whitespace
corpus2 = tm_map(corpus2,FUN = stripWhitespace)
# 3 -- Create a dictionary
dict2 = findFreqTerms(DocumentTermMatrix(Corpus(VectorSource(data$Title))),lowfreq = 0)
dict_corpus2 = Corpus(VectorSource(dict2))
# 4 -- Use tm_map to stem words
corpus2 = tm_map(corpus2,FUN = stemDocument)
# 5 -- Create a DocumentTermMatrix
dtm2 = DocumentTermMatrix(corpus2)
inspect(dtm2)
dim(dtm2)
## - 19662 documents with a total of 3204 terms
####################
# Remove Sparse Terms - We will remove those words which appear in less than 3% of the reviews
# for Review.Text
xdtm = removeSparseTerms(dtm,sparse = 0.97)
xdtm
xdtm_cluster = xdtm # to be used later for clustering
# for Title
xdtm2 = removeSparseTerms(dtm2,sparse = 0.97)
xdtm2; xdtm2_cluster = xdtm2
####################
# Complete Stems and Sort Tokens
# for Review.Text
xdtm = as.data.frame(as.matrix(xdtm))
colnames(xdtm) = stemCompletion(x = colnames(xdtm),
dictionary = dict_corpus,
type='prevalent')
colnames(xdtm) = make.names(colnames(xdtm))
sort(colSums(xdtm),decreasing = T)
## - sort to see most common terms
# for Title
xdtm2 = as.data.frame(as.matrix(xdtm2))
colnames(xdtm2) = stemCompletion(x = colnames(xdtm2),
dictionary = dict_corpus,
type='prevalent')
colnames(xdtm2) = make.names(colnames(xdtm2))
sort(colSums(xdtm2),decreasing = T)
## - sort to see most common terms
######################################
# Part 2: Document Term Matrix using Inverse Document Frequency - tfidf
# for Review.Text
dtm_tfidf = DocumentTermMatrix(x=corpus,
control = list(weighting=function(x) weightTfIdf(x,normalize=F)))
xdtm_tfidf = removeSparseTerms(dtm_tfidf,sparse = 0.97)
xdtm_tfidf = as.data.frame(as.matrix(xdtm_tfidf))
colnames(xdtm_tfidf) = stemCompletion(x = colnames(xdtm_tfidf),
dictionary = dict_corpus,
type='prevalent')
colnames(xdtm_tfidf) = make.names(colnames(xdtm_tfidf))
sort(colSums(xdtm_tfidf),decreasing = T)
## - sort to see most common terms
# for Title
dtm_tfidf2 = DocumentTermMatrix(x=corpus2,
control = list(weighting=function(x) weightTfIdf(x,normalize=F)))
xdtm_tfidf2 = removeSparseTerms(dtm_tfidf2,sparse = 0.97)
xdtm_tfidf2 = as.data.frame(as.matrix(xdtm_tfidf2))
colnames(xdtm_tfidf2) = stemCompletion(x = colnames(xdtm_tfidf2),
dictionary = dict_corpus2,
type='prevalent')
colnames(xdtm_tfidf2) = make.names(colnames(xdtm_tfidf2))
sort(colSums(xdtm_tfidf2),decreasing = T)
## - sort to see most common terms
######################################
# Part 3: Compare both DTM methods' results using graph
# for Review.Text
data.frame(term = colnames(xdtm),tf = colMeans(xdtm), tfidf = colMeans(xdtm_tfidf))%>%
arrange(desc(tf))%>%
top_n(9)%>%
gather(key=weighting_method,value=weight,2:3)%>%
ggplot(aes(x=term,y=weight,fill=weighting_method))+
geom_col(position='dodge')+
coord_flip()+
theme_economist()
## - the term dress was assigned a much higher weight in the tf method, becasue it occured in most of the reviews
## - but was assigned a lower weight in the tditf method, becasue it has little diagnostic value, since it occurs on most reviews.
## - See visualization graph
# for Title
data.frame(term = colnames(xdtm2),tf = colMeans(xdtm2), tfidf = colMeans(xdtm_tfidf2))%>%
arrange(desc(tf))%>%
top_n(10)%>%
gather(key=weighting_method,value=weight,2:3)%>%
ggplot(aes(x=term,y=weight,fill=weighting_method))+
geom_col(position='dodge')+
coord_flip()+
theme_economist()
## - the term love and great were assigned a much higher weight in the tf method, becasue they occured in most of the titles
## - but was assigned a lower weight in the tditf method, becasue they have little diadnostic value, since they occur on most titles
## - See visualization graph
######################################
# Part 4: Add Rating back to dataframe of features
# for Review.Text
clothes_data = cbind(Rating = data$Rating, xdtm)
clothes_data_tfidf = cbind(Rating = data$Rating, xdtm_tfidf)
# for Title
clothes_data2 = cbind(Rating = data$Rating,xdtm2)
clothes_data_tfidf2 = cbind(Rating = data$Rating,xdtm_tfidf2)
######################################
# Part 5: WordCloud from the prepared corpus set (removing all stop words, punctuations, etc)
# for Review.Text
set.seed(123)
wordcloud(corpus, scale=c(6,0.5), max.words=170, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, 'Dark2'))
## - See visualization wordcloud
###############################################################################################################
###############################################################################################################
## SECTION 5: Predictive Modelling (CART and Regression) using only text columns 'Review.Text' and 'Title'
# Part 1: Predictive Models (using TF)
# for Review.Text
set.seed(617)
split = sample(1:nrow(clothes_data), size = 0.75*nrow(clothes_data))
train = clothes_data[split,]
test = clothes_data[-split,]
# CART Method
library(rpart); library(rpart.plot)
tree = rpart(Rating~.,train)
rpart.plot(tree)
pred_tree = predict(tree,newdata=test)
rmse_tree = round(sqrt(mean((pred_tree - test$Rating)^2)),5); rmse_tree
## - See visualization Tree
## - RMSE = 1.009915
# Regression Method
reg = lm(Rating~.,train)
pred_reg = predict(reg, newdata=test)
rmse_reg = round(sqrt(mean((pred_reg-test$Rating)^2)),5); rmse_reg
## - RMSE = 0.9013822
# for Title
set.seed(617)
split = sample(1:nrow(clothes_data2), size = 0.75*nrow(clothes_data2))
train2 = clothes_data2[split,]
test2 = clothes_data2[-split,]
# CART Method
tree2 = rpart(Rating~.,train2)
rpart.plot(tree2)
pred_tree2 = predict(tree2,newdata=test2)
rmse_tree2 = round(sqrt(mean((pred_tree2 - test2$Rating)^2)),5); rmse_tree2
## - See visualization Tree
## - RMSE = 1.075686
# Regression Method
reg2 = lm(Rating~.,train2)
pred_reg2 = predict(reg2, newdata=test2)
rmse_reg2 = round(sqrt(mean((pred_reg2-test2$Rating)^2)),5); rmse_reg2
## - RMSE = 1.06697
## - Title is also not a bad predictor as well, the rmse lies within close range of Review.Text. But Review.Text gives the lowest rmse.
######################################
# Part 2: Predictive Models (using TF-IDF)
# for Review.Text
set.seed(617)
split = sample(1:nrow(clothes_data_tfidf), size = 0.75*nrow(clothes_data_tfidf))
train = clothes_data_tfidf[split,]
test = clothes_data_tfidf[-split,]
# CART Method
tree = rpart(Rating~.,train)
rpart.plot(tree)
pred_tree = predict(tree,newdata=test)
rmse_tree_idf = round(sqrt(mean((pred_tree - test$Rating)^2)),5); rmse_tree_idf
## - RMSE = 1.009915
## - See visualization Tree
# Regression Method
reg = lm(Rating~.,train)
pred_reg = predict(reg, newdata=test)
rmse_reg_idf = round(sqrt(mean((pred_reg-test$Rating)^2)),5); rmse_reg_idf
## - RMSE = 0.9013822
# for Title
set.seed(617)
split = sample(1:nrow(clothes_data_tfidf2), size = 0.75*nrow(clothes_data_tfidf2))
train2 = clothes_data_tfidf2[split,]
test2 = clothes_data_tfidf2[-split,]
# CART Method
tree2 = rpart(Rating~.,train2)
rpart.plot(tree2)
pred_tree2 = predict(tree2,newdata=test2)
rmse_tree2_idf = round(sqrt(mean((pred_tree2 - test2$Rating)^2)),5); rmse_tree2_idf
## - RMSE = 1.075686
## - See visualization Tree
# Regression Method
reg2 = lm(Rating~.,train2)
pred_reg2 = predict(reg2, newdata=test2)
rmse_reg2_idf = round(sqrt(mean((pred_reg2-test2$Rating)^2)),5); rmse_reg2_idf
## - RMSE = 1.06697
rmse_review_text_df = data.frame(for_Review.Text = c("Method", "TF", "TF-IDF"),CART_RMSE = c(" ", rmse_tree, rmse_tree_idf),
Regression_RMSE = c(" ", rmse_reg, rmse_reg_idf))
rmse_title_df = data.frame(for_Title = c("Method", "TF", "TF-IDF"),CART_RMSE = c(" ", rmse_tree2, rmse_tree2_idf),
Regression_RMSE = c(" ", rmse_reg2, rmse_reg2_idf))
rmse_review_text_df
rmse_title_df
## - Both methods, i.e., TF and TF-IDf give the exact same RMSE for both 'Review.Text' and 'Title'.
## - 'Review.Text' always gives lower rmse than any method used for 'Title'. So we shoud use 'Review.Text' going forward.
## - For best rmse, we need to use the regression method of predictive modelling, but wmight need to compare results from TF and TF-IDF methods.
###############################################################################################################
###############################################################################################################
# SECTION 6: Clustering and Predictive Modelling using clustering techniques, except all text columns, dendogram for text columns clustering
# Part 1: Prepare Data for Cluster Analysis
library(caret)
set.seed(617)
split = createDataPartition(y=data$Rating,p = 0.75,list = F,groups = 100)
train = data[split,]
test = data[-split,]
train = subset(train, select = -c(id, Clothing.ID, Title, Review.Text, Division.Name, Department.Name, Class.Name, bins))
test = subset(test, select = -c(id, Clothing.ID, Title, Review.Text, Division.Name, Department.Name, Class.Name, bins))
# Simple Regression
linear = lm(Rating~.,train)
summary(linear)
sseLinear = sum(linear$residuals^2); sseLinear
predLinear = predict(linear,newdata=test)
sseLinear = sum((predLinear-test$Rating)^2); sseLinear
# Cluster and Regression
trainMinusDV = subset(train,select=-c(Rating))
testMinusDV = subset(test,select=-c(Rating))
# Prepare Data for Clustering - Cluster Analysis is sensitive to scale. Normalizing the data.
preproc = preProcess(trainMinusDV)
trainNorm = predict(preproc,trainMinusDV)
testNorm = predict(preproc,testMinusDV)
######################################
# Part 2: Hierarchical and k-means Cluster Analysis
# Hierarchical
distances = dist(trainNorm,method = 'euclidean')
clusters = hclust(d = distances,method = 'ward.D2')
library(dendextend)
plot(color_branches(cut(as.dendrogram(clusters), h = 20)$upper), k = 3, groupLabels = F) # displaying clusters with tree above 20
rect.hclust(tree=clusters,k = 3,border='red')
## - Based on the plot, a 3 cluster solution looks good.
clusterGroups = cutree(clusters,k=2)
# install.packages('psych')
# visualize
library(psych)
temp = data.frame(cluster = factor(clusterGroups),
factor1 = fa(trainNorm,nfactors = 2,rotate = 'varimax')$scores[,1],
factor2 = fa(trainNorm,nfactors = 2,rotate = 'varimax')$scores[,2])
ggplot(temp,aes(x=factor1,y=factor2,col=cluster))+
geom_point()
## - See visualization graph
# k-means clustering
set.seed(617)
km = kmeans(x = trainNorm,centers = 2,iter.max=10000,nstart=100)
km$centers
mean(km$cluster==clusterGroups) # %match between results of hclust and kmeans
# Total within sum of squares Plot
within_ss = sapply(1:10,FUN = function(x) kmeans(x = trainNorm,centers = x,iter.max = 1000,nstart = 25)$tot.withinss)
ggplot(data=data.frame(cluster = 1:10,within_ss),aes(x=cluster,y=within_ss))+ geom_line(col='steelblue',size=1.2)+
geom_point()+ scale_x_continuous(breaks=seq(1,10,1))
# Ratio Plot
ratio_ss = sapply(1:10,FUN = function(x) {km = kmeans(x = trainNorm,centers = x,iter.max = 1000,nstart = 25)
km$betweenss/km$totss} )
ggplot(data=data.frame(cluster = 1:10,ratio_ss),aes(x=cluster,y=ratio_ss))+ geom_line(col='steelblue',size=1.2)+
geom_point()+ scale_x_continuous(breaks=seq(1,10,1))
# Silhouette Plot
library(cluster)
silhoette_width = sapply(2:10,FUN = function(x) pam(x = trainNorm,k = x)$silinfo$avg.width)
#ggplot(data=data.frame(cluster = 2:10,silhoette_width),aes(x=cluster,y=silhoette_width))+ # takes too much time
# geom_line(col='steelblue',size=1.2)+ geom_point()+ scale_x_continuous(breaks=seq(2,10,1))
######################################
# Part 3: Apply to test, and Compare Results
# Set the centers as 3
set.seed(617)
km = kmeans(x = trainNorm,centers = 3,iter.max=10000,nstart=100)
# install.packages('flexclust')
library(flexclust)
km_kcca = as.kcca(km,trainNorm) # flexclust uses objects of the classes kcca
clusterTrain = predict(km_kcca)
clusterTest = predict(km_kcca,newdata=testNorm)
table(clusterTrain)
table(clusterTest)
# Split train and test based on cluster membership
train1 = subset(train,clusterTrain==1)
train2 = subset(train,clusterTrain==2)
test1 = subset(test,clusterTest==1)
test2 = subset(test,clusterTest==2)
# Predict for each Cluster then Combine
lm1 = lm(Rating~.,train1)
lm2 = lm(Rating~.,train2)
pred1 = predict(lm1,newdata=test1)
pred2 = predict(lm2,newdata=test2)
sse1 = sum((test1$Rating-pred1)^2); sse1
sse2 = sum((test2$Rating-pred2)^2); sse2
predOverall = c(pred1,pred2)
RatingOverall = c(test1$Rating,test2$Rating)
sseOverall = sum((predOverall - RatingOverall)^2); sseOverall
# Compare Results
paste('SSE for model on entire data',sseLinear)
paste('SSE for model on clusters',sseOverall)
## - SSE on Entire data = 2262.3200502617, SSE on Clusters = 1643.99972478085
## - Prediction using clusters is more accurate, as the standard error is less.
######################################
# Part 4: Predict Using Tree, and Compare Results
# Simple Tree
library(rpart); library(rpart.plot)
tree = rpart(Rating~.,train,minbucket=10)
predTree = predict(tree,newdata=test)
sseTree = sum((predTree - test$Rating)^2); sseTree
# Cluster Then Predict Using Tree
tree1 = rpart(Rating~.,train1,minbucket=10)
tree2 = rpart(Rating~.,train2,minbucket=10)
pred1 = predict(tree1,newdata=test1)
pred2 = predict(tree2,newdata=test2)
sse1 = sum((test1$Rating-pred1)^2); sse1
sse2 = sum((test2$Rating-pred2)^2); sse2
predTreeCombine = c(pred1,pred2)
RatingOverall = c(test1$Rating,test2$Rating)
sseTreeCombine = sum((predTreeCombine - RatingOverall)^2); sseTreeCombine
# Compare Results
paste('SSE for model on entire data',sseTree)
paste('SSE for model on clusters',sseTreeCombine)
## - SSE on Entire data = 2262.07769316003, SSE on Clusters = 1643.2592670892
## - Prediction using clusters is more accurate, as the standard error is less.
## - Lowest Error is when we CLuster with Tree and predict
######################################
# Part 5: Clustering, and Dendogram from cleaned corpus, of 'Review.Text' and 'Title'
# We had defined 'xdtm_cluster' as the cleaned corpus earlier in Line 478
# 'Review.Text'
#hc = hclust(d = dist(xdtm_cluster, method = "euclidean"), method = "complete") # this takes massive time to run
#plot(hc)
# 'Title'
hc = hclust(d = dist(xdtm2_cluster, method = "euclidean"), method = "complete")
plot(hc)
## - See visualization graph
###############################################################################################################
###############################################################################################################
# SECTION 7: Looking at Future, what else we could have done.
# 1: In-dept Cluster Analysis of text columns, using detailed scatterplots
# For clustering and prediction Modelling using the text column 'Review.Text', the following code can be used.
# Source 1 - https://gist.github.com/luccitan/b74c53adfe3b6dad1764af1cdc1f08b7
# Source 2 - https://medium.com/@SAPCAI/text-clustering-with-r-an-introduction-for-data-scientists-c406e7454e76
# We had defined 'xdtm_cluster' as the cleaned corpus earlier in Line 478, which will be used here for converting to matrix, etc... as per the code given.
######################################
# 2: Further detailed exploratory analysis
# Source - https://www.kaggle.com/dubravkodolic/reviews-of-clothings-analyzed-by-sentiments
# Source - https://www.kaggle.com/cosinektheta/mining-the-women-s-clothing-reviews
######################################
# 3: More prediction models, to evaluate better rmse measures
# Source - https://www.kaggle.com/ankitppn/logistic-regression-and-random-forest-models/output
#################################### T H E E N D ####################################
|
###########################################################################/**
# @RdocGeneric callNaiveGenotypes
# @alias callNaiveGenotypes.numeric
#
# @title "Calls genotypes in a normal sample"
#
# \description{
# @get "title".
# }
#
# \usage{
# @usage callNaiveGenotypes,numeric
# }
#
# \arguments{
# \item{y}{A @numeric @vector of length J containing allele B fractions
# for a normal sample.}
# \item{cn}{An optional @numeric @vector of length J specifying the true
# total copy number in \eqn{\{0,1,2,NA\}} at each locus. This can be
# used to specify which loci are diploid and which are not, e.g.
# autosomal and sex chromosome copy numbers.}
# \item{...}{Additional arguments passed to @see "fitNaiveGenotypes".}
# \item{modelFit}{A optional model fit as returned
# by @see "fitNaiveGenotypes".}
# \item{verbose}{A @logical or a @see "R.utils::Verbose" object.}
# }
#
# \value{
# Returns a @numeric @vector of length J containing the genotype calls
# in allele B fraction space, that is, in [0,1] where 1/2 corresponds
# to a heterozygous call, and 0 and 1 corresponds to homozygous A
# and B, respectively.
# Non called genotypes have value @NA.
# }
#
# @examples "..\incl\callNaiveGenotypes.Rex"
#
# \section{Missing and non-finite values}{
# A missing value always gives a missing (@NA) genotype call.
# Negative infinity (-@Inf) always gives genotype call 0.
# Positive infinity (+@Inf) always gives genotype call 1.
# }
#
# @author
#
# \seealso{
# Internally @see "fitNaiveGenotypes" is used to identify the thresholds.
# }
#*/###########################################################################
setMethodS3("callNaiveGenotypes", "numeric", function(y, cn=rep(2L, times=length(y)), ..., modelFit=NULL, verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'y':
J <- length(y);
y <- as.double(y);
# Argument 'cn':
cn <- as.integer(cn);
if (length(cn) == 1L) {
cn <- rep(cn, times=J);
} else if (length(cn) != J) {
stop("The length of argument 'cn' does not match 'y': ",
length(cn), " != ", J);
}
uniqueCNs <- sort(unique(cn));
unknown <- which(!is.element(uniqueCNs, c(0,1,2,NA)));
if (length(unknown) > 0L) {
unknown <- paste(uniqueCNs[unknown], collapse=", ");
stop("Argument 'cn' contains unknown CN levels: ", unknown);
}
# Argument 'modelFit':
if (!is.null(modelFit)) {
if (!inherits(modelFit, "NaiveGenotypeModelFit")) {
throw("Argument 'modelFit' is not of class NaiveGenotypeModelFit: ", class(modelFit)[1]);
}
}
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
verbose && enter(verbose, "Calling genotypes from allele B fractions (BAFs)");
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Fit naive genotype model?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (is.null(modelFit)) {
verbose && enter(verbose, "Fitting naive genotype model");
modelFit <- fitNaiveGenotypes(y=y, cn=cn, ..., verbose=verbose);
verbose && print(verbose, modelFit);
verbose && exit(verbose);
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Call genotypes
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
mu <- rep(NA_real_, times=J);
# To please R CMD check
type <- NULL; rm(list="type");
# Fitted CNs
cns <- sapply(modelFit, FUN=function(fit) fit$cn);
for (kk in seq(along=uniqueCNs)) {
cnKK <- uniqueCNs[kk];
verbose && enter(verbose, sprintf("Copy number level #%d (C=%g) of %d", kk, cnKK, length(uniqueCNs)));
# Special case
if (cnKK == 0) {
verbose && cat(verbose, "TCN=0 => BAF not defined. Skipping.");
verbose && exit(verbose);
next;
}
keep <- which(cn == cnKK);
yKK <- y[keep];
idx <- which(cnKK == cns);
if (length(idx) != 1L) {
msg <- sprintf("Cannot call genotypes for %d loci with true total copy number %d, because the naive genotype model was not fit for such copy numbers. Skipping.", length(yKK), cnKK);
verbose && cat(verbose, msg);
verbose && exit(verbose);
next;
}
fitKK <- modelFit[[idx]];
verbose && cat(verbose, "Model fit:");
verbose && print(verbose, fitKK);
tau <- fitKK$tau;
if (is.null(tau)) {
# Backward compatibility
fitValleys <- fitKK$fitValleys;
verbose && cat(verbose, "Local minimas (\"valleys\") in BAF:");
verbose && print(verbose, fitValleys);
tau <- fitValleys$x;
# Not needed anymore
fitValleys <- NULL;
}
verbose && printf(verbose, "Genotype threshholds [%d]: %s\n", length(tau), hpaste(tau));
# Call genotypes
muKK <- rep(NA_real_, times=length(yKK));
if (cnKK == 1) {
verbose && cat(verbose, "TCN=1 => BAF in {0,1}.");
a <- tau[1];
verbose && printf(verbose, "Call regions: A = (-Inf,%.3f], B = (%.3f,+Inf)\n", a, a);
muKK[yKK <= a] <- 0;
muKK[a < yKK] <- 1;
} else if (cnKK == 2) {
verbose && cat(verbose, "TCN=2 => BAF in {0,1/2,1}.");
a <- tau[1];
b <- tau[2];
verbose && printf(verbose, "Call regions: AA = (-Inf,%.3f], AB = (%.3f,%.3f], BB = (%.3f,+Inf)\n", a, a, b, b);
muKK[yKK <= a] <- 0;
muKK[a < yKK & yKK <= b] <- 1/2;
muKK[b < yKK] <- 1;
} else {
verbose && printf(verbose, "TCN=%d => Skipping.\n", cnKK);
}
mu[keep] <- muKK;
verbose && exit(verbose);
} # for (kk ...)
# Sanity check
stopifnot(length(mu) == J);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Return genotype calls (and parameter estimates)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
attr(mu, "modelFit") <- modelFit;
verbose && exit(verbose);
mu;
}) # callNaiveGenotypes()
###########################################################################
# HISTORY:
# 2012-04-16
# o CLEANUP: Dropped argument 'flavor' of callNaiveGenotypes(); it is
# now passed to fitNaiveGenotypes() via '...'.
# o GENERALIZATION: Now callNaiveGenotypes() no longer relies on 'modelFit'
# to hold a 'fitValleys' element, but rather a 'tau' element.
# 2010-10-14
# o TYPO FIX: Used name 'fitPeaks' instead of 'fitValleys'.
# 2010-10-07
# o Now callNaiveGenotypes() utilizes fitNaiveGenotypes().
# o Added more detailed verbose to callNaiveGenotypes().
# 2010-07-23
# o Now callNaiveGenotypes() returns the model estimates as attribute
# 'modelFit'.
# 2010-04-04
# o Updated code such that R.utils::Verbose is optional.
# o Corrected an Rdoc tag typo.
# 2009-11-03
# o Added an example() to the Rd help of callNaiveGenotypes().
# 2009-07-08
# o BUG FIX: Was never tested. Now tested via example(normalizeTumorBoost).
# 2009-07-06
# o Created from aroma.cn test script.
###########################################################################
|
/R/callNaiveGenotypes.R
|
no_license
|
HenrikBengtsson/aroma.light-BioC_release
|
R
| false
| false
| 7,173
|
r
|
###########################################################################/**
# @RdocGeneric callNaiveGenotypes
# @alias callNaiveGenotypes.numeric
#
# @title "Calls genotypes in a normal sample"
#
# \description{
# @get "title".
# }
#
# \usage{
# @usage callNaiveGenotypes,numeric
# }
#
# \arguments{
# \item{y}{A @numeric @vector of length J containing allele B fractions
# for a normal sample.}
# \item{cn}{An optional @numeric @vector of length J specifying the true
# total copy number in \eqn{\{0,1,2,NA\}} at each locus. This can be
# used to specify which loci are diploid and which are not, e.g.
# autosomal and sex chromosome copy numbers.}
# \item{...}{Additional arguments passed to @see "fitNaiveGenotypes".}
# \item{modelFit}{A optional model fit as returned
# by @see "fitNaiveGenotypes".}
# \item{verbose}{A @logical or a @see "R.utils::Verbose" object.}
# }
#
# \value{
# Returns a @numeric @vector of length J containing the genotype calls
# in allele B fraction space, that is, in [0,1] where 1/2 corresponds
# to a heterozygous call, and 0 and 1 corresponds to homozygous A
# and B, respectively.
# Non called genotypes have value @NA.
# }
#
# @examples "..\incl\callNaiveGenotypes.Rex"
#
# \section{Missing and non-finite values}{
# A missing value always gives a missing (@NA) genotype call.
# Negative infinity (-@Inf) always gives genotype call 0.
# Positive infinity (+@Inf) always gives genotype call 1.
# }
#
# @author
#
# \seealso{
# Internally @see "fitNaiveGenotypes" is used to identify the thresholds.
# }
#*/###########################################################################
setMethodS3("callNaiveGenotypes", "numeric", function(y, cn=rep(2L, times=length(y)), ..., modelFit=NULL, verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'y':
J <- length(y);
y <- as.double(y);
# Argument 'cn':
cn <- as.integer(cn);
if (length(cn) == 1L) {
cn <- rep(cn, times=J);
} else if (length(cn) != J) {
stop("The length of argument 'cn' does not match 'y': ",
length(cn), " != ", J);
}
uniqueCNs <- sort(unique(cn));
unknown <- which(!is.element(uniqueCNs, c(0,1,2,NA)));
if (length(unknown) > 0L) {
unknown <- paste(uniqueCNs[unknown], collapse=", ");
stop("Argument 'cn' contains unknown CN levels: ", unknown);
}
# Argument 'modelFit':
if (!is.null(modelFit)) {
if (!inherits(modelFit, "NaiveGenotypeModelFit")) {
throw("Argument 'modelFit' is not of class NaiveGenotypeModelFit: ", class(modelFit)[1]);
}
}
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
verbose && enter(verbose, "Calling genotypes from allele B fractions (BAFs)");
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Fit naive genotype model?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (is.null(modelFit)) {
verbose && enter(verbose, "Fitting naive genotype model");
modelFit <- fitNaiveGenotypes(y=y, cn=cn, ..., verbose=verbose);
verbose && print(verbose, modelFit);
verbose && exit(verbose);
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Call genotypes
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
mu <- rep(NA_real_, times=J);
# To please R CMD check
type <- NULL; rm(list="type");
# Fitted CNs
cns <- sapply(modelFit, FUN=function(fit) fit$cn);
for (kk in seq(along=uniqueCNs)) {
cnKK <- uniqueCNs[kk];
verbose && enter(verbose, sprintf("Copy number level #%d (C=%g) of %d", kk, cnKK, length(uniqueCNs)));
# Special case
if (cnKK == 0) {
verbose && cat(verbose, "TCN=0 => BAF not defined. Skipping.");
verbose && exit(verbose);
next;
}
keep <- which(cn == cnKK);
yKK <- y[keep];
idx <- which(cnKK == cns);
if (length(idx) != 1L) {
msg <- sprintf("Cannot call genotypes for %d loci with true total copy number %d, because the naive genotype model was not fit for such copy numbers. Skipping.", length(yKK), cnKK);
verbose && cat(verbose, msg);
verbose && exit(verbose);
next;
}
fitKK <- modelFit[[idx]];
verbose && cat(verbose, "Model fit:");
verbose && print(verbose, fitKK);
tau <- fitKK$tau;
if (is.null(tau)) {
# Backward compatibility
fitValleys <- fitKK$fitValleys;
verbose && cat(verbose, "Local minimas (\"valleys\") in BAF:");
verbose && print(verbose, fitValleys);
tau <- fitValleys$x;
# Not needed anymore
fitValleys <- NULL;
}
verbose && printf(verbose, "Genotype threshholds [%d]: %s\n", length(tau), hpaste(tau));
# Call genotypes
muKK <- rep(NA_real_, times=length(yKK));
if (cnKK == 1) {
verbose && cat(verbose, "TCN=1 => BAF in {0,1}.");
a <- tau[1];
verbose && printf(verbose, "Call regions: A = (-Inf,%.3f], B = (%.3f,+Inf)\n", a, a);
muKK[yKK <= a] <- 0;
muKK[a < yKK] <- 1;
} else if (cnKK == 2) {
verbose && cat(verbose, "TCN=2 => BAF in {0,1/2,1}.");
a <- tau[1];
b <- tau[2];
verbose && printf(verbose, "Call regions: AA = (-Inf,%.3f], AB = (%.3f,%.3f], BB = (%.3f,+Inf)\n", a, a, b, b);
muKK[yKK <= a] <- 0;
muKK[a < yKK & yKK <= b] <- 1/2;
muKK[b < yKK] <- 1;
} else {
verbose && printf(verbose, "TCN=%d => Skipping.\n", cnKK);
}
mu[keep] <- muKK;
verbose && exit(verbose);
} # for (kk ...)
# Sanity check
stopifnot(length(mu) == J);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Return genotype calls (and parameter estimates)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
attr(mu, "modelFit") <- modelFit;
verbose && exit(verbose);
mu;
}) # callNaiveGenotypes()
###########################################################################
# HISTORY:
# 2012-04-16
# o CLEANUP: Dropped argument 'flavor' of callNaiveGenotypes(); it is
# now passed to fitNaiveGenotypes() via '...'.
# o GENERALIZATION: Now callNaiveGenotypes() no longer relies on 'modelFit'
# to hold a 'fitValleys' element, but rather a 'tau' element.
# 2010-10-14
# o TYPO FIX: Used name 'fitPeaks' instead of 'fitValleys'.
# 2010-10-07
# o Now callNaiveGenotypes() utilizes fitNaiveGenotypes().
# o Added more detailed verbose to callNaiveGenotypes().
# 2010-07-23
# o Now callNaiveGenotypes() returns the model estimates as attribute
# 'modelFit'.
# 2010-04-04
# o Updated code such that R.utils::Verbose is optional.
# o Corrected an Rdoc tag typo.
# 2009-11-03
# o Added an example() to the Rd help of callNaiveGenotypes().
# 2009-07-08
# o BUG FIX: Was never tested. Now tested via example(normalizeTumorBoost).
# 2009-07-06
# o Created from aroma.cn test script.
###########################################################################
|
# Capstone Project
# File: toBenchDir.R
# Set working directory to the benchmark data directory
prj.dir <- file.path(Sys.getenv("HOME"),"git","NLPCapstone")
download.dir <- "nlpData.dir"; bench.dir <- "bench"
bench.dir <- file.path(prj.dir,download.dir,bench.dir)
setwd(bench.dir)
print(paste("Current directory: ",getwd()))
|
/toBenchDir.R
|
no_license
|
gamercier/NLPCapstone
|
R
| false
| false
| 326
|
r
|
# Capstone Project
# File: toBenchDir.R
# Set working directory to the benchmark data directory
prj.dir <- file.path(Sys.getenv("HOME"),"git","NLPCapstone")
download.dir <- "nlpData.dir"; bench.dir <- "bench"
bench.dir <- file.path(prj.dir,download.dir,bench.dir)
setwd(bench.dir)
print(paste("Current directory: ",getwd()))
|
library(bedr)
### Name: is.sorted.region
### Title: checks if region file is sorted
### Aliases: is.sorted.region
### Keywords: ~kwd1
### ** Examples
if (check.binary("bedtools")) {
index <- get.example.regions();
a <- index[[1]];
b <- is.sorted.region(a);
}
|
/data/genthat_extracted_code/bedr/examples/is.sorted.region.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 268
|
r
|
library(bedr)
### Name: is.sorted.region
### Title: checks if region file is sorted
### Aliases: is.sorted.region
### Keywords: ~kwd1
### ** Examples
if (check.binary("bedtools")) {
index <- get.example.regions();
a <- index[[1]];
b <- is.sorted.region(a);
}
|
# Test thtPower.R
#
library("testthat")
library(stringr)
path_to_source <- "/home/kwabena/Documents/trafin/lovy/power/src/main/R/"
source(str_glue(path_to_source, "thtPower.R"))
path_to_data <- "/home/kwabena/Documents/trafin/lovy/power/src/main/Data"
describe("thtPower.R", {
describe("getThtBased(colName)", {
dfSounding <- getData(dataref="sounding", station_name="pr") %>% filter(dateofsounding == as.Date("2015-12-01"))
stopifnot(nrow(dfSounding) == 162)
paramsList = paramsList(station_name="pr")
dfThta <- getThtBased(paramsList, dfSounding, "thta")
it("expects to return a dataframe with the correct data", {
expect_true( 'powerThta' %in% colnames(dfThta) )
})
})
})
|
/power/src/main/R/thtPower_tests.R
|
no_license
|
fbrute/lovy
|
R
| false
| false
| 778
|
r
|
# Test thtPower.R
#
library("testthat")
library(stringr)
path_to_source <- "/home/kwabena/Documents/trafin/lovy/power/src/main/R/"
source(str_glue(path_to_source, "thtPower.R"))
path_to_data <- "/home/kwabena/Documents/trafin/lovy/power/src/main/Data"
describe("thtPower.R", {
describe("getThtBased(colName)", {
dfSounding <- getData(dataref="sounding", station_name="pr") %>% filter(dateofsounding == as.Date("2015-12-01"))
stopifnot(nrow(dfSounding) == 162)
paramsList = paramsList(station_name="pr")
dfThta <- getThtBased(paramsList, dfSounding, "thta")
it("expects to return a dataframe with the correct data", {
expect_true( 'powerThta' %in% colnames(dfThta) )
})
})
})
|
library(checkarg)
### Name: isZeroOrNanVector
### Title: Wrapper for the checkarg function, using specific parameter
### settings.
### Aliases: isZeroOrNanVector
### ** Examples
isZeroOrNanVector(0)
# returns TRUE (argument is valid)
isZeroOrNanVector("X")
# returns FALSE (argument is invalid)
#isZeroOrNanVector("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isZeroOrNanVector(0, default = NaN)
# returns 0 (the argument, rather than the default, since it is not NULL)
#isZeroOrNanVector("X", default = NaN)
# throws exception with message defined by message and argumentName parameters
isZeroOrNanVector(NULL, default = NaN)
# returns NaN (the default, rather than the argument, since it is NULL)
|
/data/genthat_extracted_code/checkarg/examples/isZeroOrNanVector.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 783
|
r
|
library(checkarg)
### Name: isZeroOrNanVector
### Title: Wrapper for the checkarg function, using specific parameter
### settings.
### Aliases: isZeroOrNanVector
### ** Examples
isZeroOrNanVector(0)
# returns TRUE (argument is valid)
isZeroOrNanVector("X")
# returns FALSE (argument is invalid)
#isZeroOrNanVector("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isZeroOrNanVector(0, default = NaN)
# returns 0 (the argument, rather than the default, since it is not NULL)
#isZeroOrNanVector("X", default = NaN)
# throws exception with message defined by message and argumentName parameters
isZeroOrNanVector(NULL, default = NaN)
# returns NaN (the default, rather than the argument, since it is NULL)
|
library(testthat)
library(ssid)
test_check("ssid")
|
/tests/testthat.R
|
permissive
|
JingjieSong/ssid
|
R
| false
| false
| 52
|
r
|
library(testthat)
library(ssid)
test_check("ssid")
|
# Unit tests
library(GenomicDistributions)
context("Testthat context...")
#############################################################################
# Test data should be with toy examples you can work out by hand
# that way you can calculate by hand and compare to the output of the function
# toy data for testing functions
# if altered, tests relying on these objects will be disrupted
start1 = c(seq(from=1, to = 2001, by = 1000), 800)
start2 = c(seq(from=126, to = 2126, by = 1000), 100, 2500)
chrString1 = c(rep("chr1", 3), "chr2")
chrString2 = c(chrString1, "chr3")
origCoordDT1 = data.table(chr=chrString1,
start = start1,
end = start1 + 250)
origCoordDT2 = data.table(chr=chrString2,
start=start2,
end=start2+150)
coordDT1 = copy(origCoordDT1)
coordDT2 = copy(origCoordDT2)
testGR1 = dtToGr(coordDT1)
testGR2 = dtToGr(coordDT2)
testGR3 = GenomicRanges::shift(testGR2, 1000)
testGR4 = GenomicRanges::shift(testGR2, 2500)
testGR5 = GenomicRanges::shift(testGR2, 4000)
###############################################################################
# test for calcOLCount
# reset test data in case it was changed by another unit test section
coordDT1 = copy(origCoordDT1)
coordDT2 = copy(origCoordDT2)
testGR1 = dtToGr(coordDT1)
testGR2 = dtToGr(coordDT2)
test_that("calcOLCount", {
# uses midpoint coordinate of queryRegionDT
testGRList = GRangesList(dtToGr(data.table(chr=c("chr1", "chr1"),
start = c(1, 2001),
end = c(2000, 4000))),
dtToGr(data.table(chr=c("chr2", "chr2"),
start = c(1, 2001),
end = c(2000, 4000))),
dtToGr(data.table(chr=c("chr3", "chr3"),
start = c(1, 2001),
end = c(2000, 4000))))
olCount1 = calcOLCount(queryRegionDT = coordDT2, regionsGRL = testGRList)
expect_equal(olCount1$N, c(2, 1, 1, 1))
expect_equal(olCount1$regionGroupID, c(1, 1, 2, 3))
# only expect one overlap: chr2
olCount2 = calcOLCount(coordDT2, dtToGr(data.table(chr=c("chr1", "chr1", "chr2"),
start = c(1, 250, 170),
end = c(150, 300, 180))))
olCount2=as.data.frame(olCount2)
expectedOut = data.frame(regionID=3, chr="chr2", start=170, end=180, withinGroupID=3, regionGroupID=1, N=1, stringsAsFactors = FALSE)
expect_equal(olCount2, expectedOut)
})
# "featureDistanceDistribution" function is now named "calcFeatureDist"
# reset test data in case it was changed by another unit test section
# and select just one chromosome - since DTNearest is help function calculating
# distances within one chromosome
coordDT1 = copy(origCoordDT1)
coordDT2 = copy(origCoordDT2)
testGR1 = dtToGr(coordDT1)
testGR2 = dtToGr(coordDT2)
test_that("featureDistribution", {
############# old
# queryFile = system.file("extdata", "setB_100.bed.gz", package="GenomicDistributions")
# query = rtracklayer::import(queryFile)
#
# featureExample = GenomicRanges::shift(query, round(rnorm(length(query), 0,1000)))
# fdd = featureDistanceDistribution(query, featureExample)
# featureFile = system.file("extdata", "vistaEnhancers.bed.gz", package="GenomicDistributions")
# feats = rtracklayer::import(featureFile)
#' featureDistance = featureDistanceDistribution(query, feats)
#' expect_equal(sum(is.na(featureDistance)), -3)
#' expect_equal(sum(featureDistance, na.rm=TRUE), 743969)
############# old
coordDT1$end[1] = 100
coordDT1$start[2] = 200
coordDT1$end[2] = 400
testGR1 = dtToGr(coordDT1)
# DTNearest
# @param DT1 data.table Has start and end column
# @param DT2
# @return numeric vector. Distance from region set to closest other region set.
# Distance from the midpointof each region to the midpoint.
nearestVec = DTNearest(coordDT1, coordDT2)
nearestVec
expect_equal(nearestVec, c(124, -99, 276, 75))
# DTNearest ignores chromosome completely. By design.
# DTNearest shouldn't be used with data from different chromosomes.
# Suggested to split by chromosome when such case presents (e.g chrom1).
DT1chrom1 = coordDT1[coordDT1$chr == "chr1"]
DT2chrom1 = coordDT2[coordDT2$chr == "chr1"]
nearestVec2C1 = DTNearest(DT2chrom1, DT1chrom1)
expect_equal(nearestVec2C1, c(99, -901, -75))
featureDistance = calcFeatureDist(testGR1, testGR2)
featureDistance
expect_equal(featureDistance, c(150, -99, 75, -750))
featureDistance2 = calcFeatureDist(testGR2, testGR1)
featureDistance2
expect_equal(featureDistance2, c( 99, -901, -75, 750, NA))
# coordDT1$chr = "chr2"
# testGR1 = dtToGr(coordDT1)
# featureDistance = calcFeatureDist(testGR1, testGR2)
# featureDistance
# featureDistance2 = calcFeatureDist(testGR2, testGR1)
# featureDistance2
})
#' queryDT = GenomicDistributions:::grToDt(query)
#' featureDT = GenomicDistributions:::grToDt(features)
#' queryDTs = GenomicDistributions:::splitDataTable(queryDT, "chr")
#' featureDTs = GenomicDistributions:::splitDataTable(featureDT, "chr")
#' as.vector(unlist(mapply(queryDTs, featureDTs[names(queryDTs)], FUN=DTNearest)))
test_that("Genome aggregate", {
queryFile = system.file("extdata", "vistaEnhancers.bed.gz", package="GenomicDistributions")
query = rtracklayer::import(queryFile)
# First, calculate the distribution:
x = aggregateOverGenomeBins(query, "hg19")
# Then, plot the result:
# plotGenomeAggregate(x)
})
# "genomicPartitions" function changed to "calcPartitionsRef"
test_that("Partitions", {
################### old
#queryFile = system.file("extdata", "vistaEnhancers.bed.gz", package="GenomicDistributions")
#query = rtracklayer::import(queryFile)
#gp = genomicPartitions(query, "hg38")
#gp = genomicPartitions(query, "hg19")
#gp = genomicPartitions(query, "mm10")
#gp = genomicPartitions(query, "mm9")
#plotPartitions(gp)
################### old
# test calcPartitions()
# GenomePartitionList
promCore = trim(promoters(testGR2, upstream=100, downstream=0))
promProx = trim(promoters(testGR2, upstream=2000, downstream=0))
promoterProx = GenomicRanges::setdiff(promProx, promCore)
# remove any possible overlaps between classes
testGR5 = GenomicRanges::setdiff(testGR5, testGR4)
testGR3 = GenomicRanges::setdiff(testGR3, testGR4)
testGR3 = GenomicRanges::setdiff(testGR3, testGR5)
nonThree = GenomicRanges::setdiff(testGR2, testGR4)
nonThreeFive = GenomicRanges::setdiff(nonThree, testGR5)
intronGR = GenomicRanges::setdiff(nonThreeFive, testGR3)
partList = list(promoterCore=trim(promoters(testGR2, upstream=100, downstream=0)),
promoterProx=promoterProx,
threeUTR=testGR4,
fiveUTR=testGR5,
exon=testGR3,
intron=intronGR)
gp = genomePartitionList(testGR2, testGR3, testGR4, testGR5)
expect_equal(gp, partList)
# calcPartitions
partition = rep(0, length(testGR1))
for (i in seq_along(partList)) {
ols = countOverlaps(testGR1[partition==0], partList[[i]])
partition[partition==0][ols > 0] = names(partList)[[i]]
}
partition[partition=="0"] = "intergenic"
testPartitions = data.frame(table(partition))
testPartitionNames = c("promoterCore", "promoterProx", "threeUTR", "fiveUTR",
"exon", "intron", "intergenic")
if (!all(testPartitionNames %in% testPartitions$partition)){
notIncluded = testPartitionNames[!(testPartitionNames %in%
testPartitions$partition)]
addRows = data.frame(partition = notIncluded,
Freq = rep(0, length(notIncluded)))
testPartitions = rbind(testPartitions, addRows)
}
Partitions = calcPartitions(testGR1, partList)
expect_equal(Partitions, testPartitions)
})
test_that("Neighbor distances", {
testGRdt = grToDt(sort(testGR1))
splitdt = splitDataTable(testGRdt, "chr")
chromTest = splitdt[[1]]
# Compare bp distance generated by neighbordt
distancesExp = neighbordt(chromTest)
# Calculated by hand c(750, 750)
expect_equal(distancesExp, c(750, 750))
# Compare log transformed distances from calcNeighborDist
logdistancesExp = calcNeighborDist(testGR1)
expect_equal(logdistancesExp, log10(c(750, 750)))
})
|
/tests/testthat/test_all.R
|
no_license
|
GenomicsNX/GenomicDistributions
|
R
| false
| false
| 8,854
|
r
|
# Unit tests
library(GenomicDistributions)
context("Testthat context...")
#############################################################################
# Test data should be with toy examples you can work out by hand
# that way you can calculate by hand and compare to the output of the function
# toy data for testing functions
# if altered, tests relying on these objects will be disrupted
start1 = c(seq(from=1, to = 2001, by = 1000), 800)
start2 = c(seq(from=126, to = 2126, by = 1000), 100, 2500)
chrString1 = c(rep("chr1", 3), "chr2")
chrString2 = c(chrString1, "chr3")
origCoordDT1 = data.table(chr=chrString1,
start = start1,
end = start1 + 250)
origCoordDT2 = data.table(chr=chrString2,
start=start2,
end=start2+150)
coordDT1 = copy(origCoordDT1)
coordDT2 = copy(origCoordDT2)
testGR1 = dtToGr(coordDT1)
testGR2 = dtToGr(coordDT2)
testGR3 = GenomicRanges::shift(testGR2, 1000)
testGR4 = GenomicRanges::shift(testGR2, 2500)
testGR5 = GenomicRanges::shift(testGR2, 4000)
###############################################################################
# test for calcOLCount
# reset test data in case it was changed by another unit test section
coordDT1 = copy(origCoordDT1)
coordDT2 = copy(origCoordDT2)
testGR1 = dtToGr(coordDT1)
testGR2 = dtToGr(coordDT2)
test_that("calcOLCount", {
# uses midpoint coordinate of queryRegionDT
testGRList = GRangesList(dtToGr(data.table(chr=c("chr1", "chr1"),
start = c(1, 2001),
end = c(2000, 4000))),
dtToGr(data.table(chr=c("chr2", "chr2"),
start = c(1, 2001),
end = c(2000, 4000))),
dtToGr(data.table(chr=c("chr3", "chr3"),
start = c(1, 2001),
end = c(2000, 4000))))
olCount1 = calcOLCount(queryRegionDT = coordDT2, regionsGRL = testGRList)
expect_equal(olCount1$N, c(2, 1, 1, 1))
expect_equal(olCount1$regionGroupID, c(1, 1, 2, 3))
# only expect one overlap: chr2
olCount2 = calcOLCount(coordDT2, dtToGr(data.table(chr=c("chr1", "chr1", "chr2"),
start = c(1, 250, 170),
end = c(150, 300, 180))))
olCount2=as.data.frame(olCount2)
expectedOut = data.frame(regionID=3, chr="chr2", start=170, end=180, withinGroupID=3, regionGroupID=1, N=1, stringsAsFactors = FALSE)
expect_equal(olCount2, expectedOut)
})
# "featureDistanceDistribution" function is now named "calcFeatureDist"
# reset test data in case it was changed by another unit test section
# and select just one chromosome - since DTNearest is help function calculating
# distances within one chromosome
coordDT1 = copy(origCoordDT1)
coordDT2 = copy(origCoordDT2)
testGR1 = dtToGr(coordDT1)
testGR2 = dtToGr(coordDT2)
test_that("featureDistribution", {
############# old
# queryFile = system.file("extdata", "setB_100.bed.gz", package="GenomicDistributions")
# query = rtracklayer::import(queryFile)
#
# featureExample = GenomicRanges::shift(query, round(rnorm(length(query), 0,1000)))
# fdd = featureDistanceDistribution(query, featureExample)
# featureFile = system.file("extdata", "vistaEnhancers.bed.gz", package="GenomicDistributions")
# feats = rtracklayer::import(featureFile)
#' featureDistance = featureDistanceDistribution(query, feats)
#' expect_equal(sum(is.na(featureDistance)), -3)
#' expect_equal(sum(featureDistance, na.rm=TRUE), 743969)
############# old
coordDT1$end[1] = 100
coordDT1$start[2] = 200
coordDT1$end[2] = 400
testGR1 = dtToGr(coordDT1)
# DTNearest
# @param DT1 data.table Has start and end column
# @param DT2
# @return numeric vector. Distance from region set to closest other region set.
# Distance from the midpointof each region to the midpoint.
nearestVec = DTNearest(coordDT1, coordDT2)
nearestVec
expect_equal(nearestVec, c(124, -99, 276, 75))
# DTNearest ignores chromosome completely. By design.
# DTNearest shouldn't be used with data from different chromosomes.
# Suggested to split by chromosome when such case presents (e.g chrom1).
DT1chrom1 = coordDT1[coordDT1$chr == "chr1"]
DT2chrom1 = coordDT2[coordDT2$chr == "chr1"]
nearestVec2C1 = DTNearest(DT2chrom1, DT1chrom1)
expect_equal(nearestVec2C1, c(99, -901, -75))
featureDistance = calcFeatureDist(testGR1, testGR2)
featureDistance
expect_equal(featureDistance, c(150, -99, 75, -750))
featureDistance2 = calcFeatureDist(testGR2, testGR1)
featureDistance2
expect_equal(featureDistance2, c( 99, -901, -75, 750, NA))
# coordDT1$chr = "chr2"
# testGR1 = dtToGr(coordDT1)
# featureDistance = calcFeatureDist(testGR1, testGR2)
# featureDistance
# featureDistance2 = calcFeatureDist(testGR2, testGR1)
# featureDistance2
})
#' queryDT = GenomicDistributions:::grToDt(query)
#' featureDT = GenomicDistributions:::grToDt(features)
#' queryDTs = GenomicDistributions:::splitDataTable(queryDT, "chr")
#' featureDTs = GenomicDistributions:::splitDataTable(featureDT, "chr")
#' as.vector(unlist(mapply(queryDTs, featureDTs[names(queryDTs)], FUN=DTNearest)))
test_that("Genome aggregate", {
queryFile = system.file("extdata", "vistaEnhancers.bed.gz", package="GenomicDistributions")
query = rtracklayer::import(queryFile)
# First, calculate the distribution:
x = aggregateOverGenomeBins(query, "hg19")
# Then, plot the result:
# plotGenomeAggregate(x)
})
# "genomicPartitions" function changed to "calcPartitionsRef"
test_that("Partitions", {
################### old
#queryFile = system.file("extdata", "vistaEnhancers.bed.gz", package="GenomicDistributions")
#query = rtracklayer::import(queryFile)
#gp = genomicPartitions(query, "hg38")
#gp = genomicPartitions(query, "hg19")
#gp = genomicPartitions(query, "mm10")
#gp = genomicPartitions(query, "mm9")
#plotPartitions(gp)
################### old
# test calcPartitions()
# GenomePartitionList
promCore = trim(promoters(testGR2, upstream=100, downstream=0))
promProx = trim(promoters(testGR2, upstream=2000, downstream=0))
promoterProx = GenomicRanges::setdiff(promProx, promCore)
# remove any possible overlaps between classes
testGR5 = GenomicRanges::setdiff(testGR5, testGR4)
testGR3 = GenomicRanges::setdiff(testGR3, testGR4)
testGR3 = GenomicRanges::setdiff(testGR3, testGR5)
nonThree = GenomicRanges::setdiff(testGR2, testGR4)
nonThreeFive = GenomicRanges::setdiff(nonThree, testGR5)
intronGR = GenomicRanges::setdiff(nonThreeFive, testGR3)
partList = list(promoterCore=trim(promoters(testGR2, upstream=100, downstream=0)),
promoterProx=promoterProx,
threeUTR=testGR4,
fiveUTR=testGR5,
exon=testGR3,
intron=intronGR)
gp = genomePartitionList(testGR2, testGR3, testGR4, testGR5)
expect_equal(gp, partList)
# calcPartitions
partition = rep(0, length(testGR1))
for (i in seq_along(partList)) {
ols = countOverlaps(testGR1[partition==0], partList[[i]])
partition[partition==0][ols > 0] = names(partList)[[i]]
}
partition[partition=="0"] = "intergenic"
testPartitions = data.frame(table(partition))
testPartitionNames = c("promoterCore", "promoterProx", "threeUTR", "fiveUTR",
"exon", "intron", "intergenic")
if (!all(testPartitionNames %in% testPartitions$partition)){
notIncluded = testPartitionNames[!(testPartitionNames %in%
testPartitions$partition)]
addRows = data.frame(partition = notIncluded,
Freq = rep(0, length(notIncluded)))
testPartitions = rbind(testPartitions, addRows)
}
Partitions = calcPartitions(testGR1, partList)
expect_equal(Partitions, testPartitions)
})
test_that("Neighbor distances", {
testGRdt = grToDt(sort(testGR1))
splitdt = splitDataTable(testGRdt, "chr")
chromTest = splitdt[[1]]
# Compare bp distance generated by neighbordt
distancesExp = neighbordt(chromTest)
# Calculated by hand c(750, 750)
expect_equal(distancesExp, c(750, 750))
# Compare log transformed distances from calcNeighborDist
logdistancesExp = calcNeighborDist(testGR1)
expect_equal(logdistancesExp, log10(c(750, 750)))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enpy.R
\name{prinsens}
\alias{prinsens}
\title{Principal Sensitivity Components}
\usage{
prinsens(
x,
y,
alpha,
lambda,
intercept = TRUE,
penalty_loadings,
en_algorithm_opts,
eps = 1e-06,
sparse = FALSE,
ncores = 1L,
method = deprecated()
)
}
\arguments{
\item{x}{\code{n} by \code{p} matrix of numeric predictors.}
\item{y}{vector of response values of length \code{n}.}
\item{alpha}{elastic net penalty mixing parameter with \eqn{0 \le \alpha \le 1}.
\code{alpha = 1} is the LASSO penalty, and \code{alpha = 0} the Ridge penalty.
Can be a vector of several values, but \code{alpha = 0} cannot be mixed with other values.}
\item{lambda}{optional user-supplied sequence of penalization levels. If given and not \code{NULL},
\code{nlambda} and \code{lambda_min_ratio} are ignored.}
\item{intercept}{include an intercept in the model.}
\item{penalty_loadings}{a vector of positive penalty loadings (a.k.a. weights) for different
penalization of each coefficient. Only allowed for \code{alpha} > 0.}
\item{en_algorithm_opts}{options for the LS-EN algorithm. See \link{en_algorithm_options} for details.}
\item{eps}{numerical tolerance.}
\item{sparse}{use sparse coefficient vectors.}
\item{ncores}{number of CPU cores to use in parallel. By default, only one CPU core is used.
Not supported on all platforms, in which case a warning is given.}
\item{method}{defunct. PSCs are always computed for EN estimates. For the PY procedure for unpenalized estimation
use package \href{https://cran.r-project.org/package=pyinit}{pyinit}.}
}
\value{
a list of principal sensitivity components, one per element in \code{lambda}. Each PSC is itself a list
with items \code{lambda}, \code{alpha}, and \code{pscs}.
}
\description{
Compute Principal Sensitivity Components for Elastic Net Regression
}
\references{
Cohen Freue, G.V.; Kepplinger, D.; Salibián-Barrera, M.; Smucler, E.
Robust elastic net estimators for variable selection and identification of proteomic biomarkers.
\emph{Ann. Appl. Stat.} \strong{13} (2019), no. 4, 2065--2090 \doi{10.1214/19-AOAS1269}
Pena, D., and Yohai, V.J.
A Fast Procedure for Outlier Diagnostics in Large Regression Problems.
\emph{J. Amer. Statist. Assoc.} \strong{94} (1999). no. 446, 434--445. \doi{10.2307/2670164}
}
\seealso{
Other functions for initial estimates:
\code{\link{enpy_initial_estimates}()},
\code{\link{starting_point}()}
}
\concept{functions for initial estimates}
|
/man/prinsens.Rd
|
no_license
|
cran/pense
|
R
| false
| true
| 2,519
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enpy.R
\name{prinsens}
\alias{prinsens}
\title{Principal Sensitivity Components}
\usage{
prinsens(
x,
y,
alpha,
lambda,
intercept = TRUE,
penalty_loadings,
en_algorithm_opts,
eps = 1e-06,
sparse = FALSE,
ncores = 1L,
method = deprecated()
)
}
\arguments{
\item{x}{\code{n} by \code{p} matrix of numeric predictors.}
\item{y}{vector of response values of length \code{n}.}
\item{alpha}{elastic net penalty mixing parameter with \eqn{0 \le \alpha \le 1}.
\code{alpha = 1} is the LASSO penalty, and \code{alpha = 0} the Ridge penalty.
Can be a vector of several values, but \code{alpha = 0} cannot be mixed with other values.}
\item{lambda}{optional user-supplied sequence of penalization levels. If given and not \code{NULL},
\code{nlambda} and \code{lambda_min_ratio} are ignored.}
\item{intercept}{include an intercept in the model.}
\item{penalty_loadings}{a vector of positive penalty loadings (a.k.a. weights) for different
penalization of each coefficient. Only allowed for \code{alpha} > 0.}
\item{en_algorithm_opts}{options for the LS-EN algorithm. See \link{en_algorithm_options} for details.}
\item{eps}{numerical tolerance.}
\item{sparse}{use sparse coefficient vectors.}
\item{ncores}{number of CPU cores to use in parallel. By default, only one CPU core is used.
Not supported on all platforms, in which case a warning is given.}
\item{method}{defunct. PSCs are always computed for EN estimates. For the PY procedure for unpenalized estimation
use package \href{https://cran.r-project.org/package=pyinit}{pyinit}.}
}
\value{
a list of principal sensitivity components, one per element in \code{lambda}. Each PSC is itself a list
with items \code{lambda}, \code{alpha}, and \code{pscs}.
}
\description{
Compute Principal Sensitivity Components for Elastic Net Regression
}
\references{
Cohen Freue, G.V.; Kepplinger, D.; Salibián-Barrera, M.; Smucler, E.
Robust elastic net estimators for variable selection and identification of proteomic biomarkers.
\emph{Ann. Appl. Stat.} \strong{13} (2019), no. 4, 2065--2090 \doi{10.1214/19-AOAS1269}
Pena, D., and Yohai, V.J.
A Fast Procedure for Outlier Diagnostics in Large Regression Problems.
\emph{J. Amer. Statist. Assoc.} \strong{94} (1999). no. 446, 434--445. \doi{10.2307/2670164}
}
\seealso{
Other functions for initial estimates:
\code{\link{enpy_initial_estimates}()},
\code{\link{starting_point}()}
}
\concept{functions for initial estimates}
|
#' test_all
#' A function which stress tests an R function with user-defined inputs.
#' @param fun an R function to test
#' @param input a grid created with \code{\link{lazy_tester}}
#' @param output not implemented yet
#' @param cores an integer specifying the number of cores to use
#'
#' @return Returns a table with error information, i.e.
#' \itemize{
#' \item The function call (arguments)
#' \item In how many occurences it threw an error
#' }
#' @import dplyr purrr reshape2 rlist
#' @export
#'
#' @examples
#' # NOT RUN:
#' test_all(mean, list(x = c(1,2,3)))
test_all <- function(fun, input, output = NULL, cores = 1) {
# checking inputs
if (!is.function(fun)) {
stop(paste0(sQuote('fun'), " has to be of class function."))
}
if (!is.list(input)) {
stop(paste0(sQuote('input'), " has to be of class list"))
}
# extract function name
fun_name <- as.character(as.list(match.call())$fun)
# Test a single function call
test_single <- function(fun, args) {
tested <- tryCatch({do.call("fun", args=args)},
error = function(e) {
e <- as.character(e)
if (grepl("[:]", e))
e <- gsub(".*[[:blank:]]?[:][[:blank:]]?(.*)", "\\1", e)
e <- gsub("\n", "", e)
class(e) <- c(class(e), 'error')
return(e)
})
if (inherits(tested, 'error')) {
return(tested)
} else {
return("success")
}
}
tests <- do.call(what = list.expand, args = input)
errors <- vector(length = length(tests))
for (i in seq_len(length(tests))) {
errors[i] <- test_single(fun = fun,
#args = lapply(tests[[i]], function(x) x[1])
args = tests[[i]]
)
}
# catch errors
stats <- which(!sapply(errors, function(x) x == "success"))
# add errors
#tests$test_all_errors <- unlist(errors)
tests <- lapply(seq_along(tests), function(i) {
tests[[i]]$test_all_errors <- errors[[i]]
return(tests[[i]])})
# characterize everything
#tests_char <- data.frame(lapply(tests, function(x) as.character(x)))
tests_char <- as.data.frame(do.call(rbind, lapply(tests, function(x) as.character(x))))
names(tests_char) <- names(tests[[1]])
tests_long <- suppressWarnings(reshape2::melt(tests_char, 'test_all_errors'))
tests_long$call <- paste0(tests_long$variable, " = ", tests_long$value)
# extract levels
level_list <- tests_long %>%
group_by(variable) %>%
summarize(n = length(unique(value)))
# extract error elements
test_errors <- tests_long %>%
filter(variable %in% as.character(level_list$variable[level_list$n > 1]))
error_table <- as.data.frame.matrix(table(test_errors$call, test_errors$test_all_errors))
error_table$argument <- gsub("(.*) = .*", "\\1", rownames(error_table))
error_table$call <- rownames(error_table)
# this is the error metric at this time, when the argument fails compared to
# the relative frequency of the argument
error_rel <- suppressMessages(error_table %>%
group_by(argument) %>%
mutate_if(is.numeric, funs(./sum(.)-1/n())) %>%
ungroup())
# extract the suggestion (error_rel > 0) from the error table
this_cols <- error_rel %>% select_if(is.numeric) %>%
names %>% setdiff(., "success")
sug_list <- error_rel %>% select_if(is.numeric) %>%
dplyr::select(one_of(this_cols)) %>%
map(function(x, df) {
which_max <- function(x) {
which(x == max(x, na.rm = TRUE))
}
ind <- which_max(x)
arguments <- df[ind, "call"]
data.frame(arguments)
}, df = error_rel)
# return elements
out <- list(tests = tests[stats],
suggestion = sug_list,
fun = fun_name)
class(out) <- 'testall_summary'
return(out)
}
|
/R/test_all.R
|
no_license
|
andremonaco/testall
|
R
| false
| false
| 3,964
|
r
|
#' test_all
#' A function which stress tests an R function with user-defined inputs.
#' @param fun an R function to test
#' @param input a grid created with \code{\link{lazy_tester}}
#' @param output not implemented yet
#' @param cores an integer specifying the number of cores to use
#'
#' @return Returns a table with error information, i.e.
#' \itemize{
#' \item The function call (arguments)
#' \item In how many occurences it threw an error
#' }
#' @import dplyr purrr reshape2 rlist
#' @export
#'
#' @examples
#' # NOT RUN:
#' test_all(mean, list(x = c(1,2,3)))
test_all <- function(fun, input, output = NULL, cores = 1) {
# checking inputs
if (!is.function(fun)) {
stop(paste0(sQuote('fun'), " has to be of class function."))
}
if (!is.list(input)) {
stop(paste0(sQuote('input'), " has to be of class list"))
}
# extract function name
fun_name <- as.character(as.list(match.call())$fun)
# Test a single function call
test_single <- function(fun, args) {
tested <- tryCatch({do.call("fun", args=args)},
error = function(e) {
e <- as.character(e)
if (grepl("[:]", e))
e <- gsub(".*[[:blank:]]?[:][[:blank:]]?(.*)", "\\1", e)
e <- gsub("\n", "", e)
class(e) <- c(class(e), 'error')
return(e)
})
if (inherits(tested, 'error')) {
return(tested)
} else {
return("success")
}
}
tests <- do.call(what = list.expand, args = input)
errors <- vector(length = length(tests))
for (i in seq_len(length(tests))) {
errors[i] <- test_single(fun = fun,
#args = lapply(tests[[i]], function(x) x[1])
args = tests[[i]]
)
}
# catch errors
stats <- which(!sapply(errors, function(x) x == "success"))
# add errors
#tests$test_all_errors <- unlist(errors)
tests <- lapply(seq_along(tests), function(i) {
tests[[i]]$test_all_errors <- errors[[i]]
return(tests[[i]])})
# characterize everything
#tests_char <- data.frame(lapply(tests, function(x) as.character(x)))
tests_char <- as.data.frame(do.call(rbind, lapply(tests, function(x) as.character(x))))
names(tests_char) <- names(tests[[1]])
tests_long <- suppressWarnings(reshape2::melt(tests_char, 'test_all_errors'))
tests_long$call <- paste0(tests_long$variable, " = ", tests_long$value)
# extract levels
level_list <- tests_long %>%
group_by(variable) %>%
summarize(n = length(unique(value)))
# extract error elements
test_errors <- tests_long %>%
filter(variable %in% as.character(level_list$variable[level_list$n > 1]))
error_table <- as.data.frame.matrix(table(test_errors$call, test_errors$test_all_errors))
error_table$argument <- gsub("(.*) = .*", "\\1", rownames(error_table))
error_table$call <- rownames(error_table)
# this is the error metric at this time, when the argument fails compared to
# the relative frequency of the argument
error_rel <- suppressMessages(error_table %>%
group_by(argument) %>%
mutate_if(is.numeric, funs(./sum(.)-1/n())) %>%
ungroup())
# extract the suggestion (error_rel > 0) from the error table
this_cols <- error_rel %>% select_if(is.numeric) %>%
names %>% setdiff(., "success")
sug_list <- error_rel %>% select_if(is.numeric) %>%
dplyr::select(one_of(this_cols)) %>%
map(function(x, df) {
which_max <- function(x) {
which(x == max(x, na.rm = TRUE))
}
ind <- which_max(x)
arguments <- df[ind, "call"]
data.frame(arguments)
}, df = error_rel)
# return elements
out <- list(tests = tests[stats],
suggestion = sug_list,
fun = fun_name)
class(out) <- 'testall_summary'
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotMCMCResults.forArray3D.R
\name{plotMCMCResults.forArray3D}
\alias{plotMCMCResults.forArray3D}
\title{Calculate and plot a posterior density based on a 3d MCMC data aray.}
\usage{
plotMCMCResults.forArray3D(data, scaleBy = 1, doPlot = TRUE,
plotEst = FALSE, add = TRUE, colorscale = c("coldhot", "hot", "cold",
"jet", NULL), alpha = 0.25, xlim = NULL, ylim = NULL, xlabel = "",
ylabel = "", label = "")
}
\arguments{
\item{data}{- the MCMC data array from which to estimate the posterior densities.}
\item{scaleBy}{- factor to scale data by}
\item{plotEst}{- flag (T/F) to plot the MLE estimate (assumed to be 1st value)}
\item{add}{- flag (T/F) to add to existing plot (creates new plot if FALSE)}
\item{colorscale}{- color scale to use for the density plot}
\item{alpha}{- transparency value to apply to the colorscale}
\item{xlim}{- x axis limits (if add=FALSE)}
\item{ylim}{- y axis limits (if add=FALSE)}
\item{xlabel}{- label for x axis (if add=FALSE)}
\item{label}{- label for plot (if add=FALSE)}
}
\description{
Function to calculate and plot posterior densities based on a 3d MCMC data array.
}
\details{
Uses functions
\itemize{
\item wtsUtilities::createColorScale(...)
}
}
|
/man/plotMCMCResults.forArray3D.Rd
|
permissive
|
wStockhausen/rTCSAM2015
|
R
| false
| true
| 1,284
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotMCMCResults.forArray3D.R
\name{plotMCMCResults.forArray3D}
\alias{plotMCMCResults.forArray3D}
\title{Calculate and plot a posterior density based on a 3d MCMC data aray.}
\usage{
plotMCMCResults.forArray3D(data, scaleBy = 1, doPlot = TRUE,
plotEst = FALSE, add = TRUE, colorscale = c("coldhot", "hot", "cold",
"jet", NULL), alpha = 0.25, xlim = NULL, ylim = NULL, xlabel = "",
ylabel = "", label = "")
}
\arguments{
\item{data}{- the MCMC data array from which to estimate the posterior densities.}
\item{scaleBy}{- factor to scale data by}
\item{plotEst}{- flag (T/F) to plot the MLE estimate (assumed to be 1st value)}
\item{add}{- flag (T/F) to add to existing plot (creates new plot if FALSE)}
\item{colorscale}{- color scale to use for the density plot}
\item{alpha}{- transparency value to apply to the colorscale}
\item{xlim}{- x axis limits (if add=FALSE)}
\item{ylim}{- y axis limits (if add=FALSE)}
\item{xlabel}{- label for x axis (if add=FALSE)}
\item{label}{- label for plot (if add=FALSE)}
}
\description{
Function to calculate and plot posterior densities based on a 3d MCMC data array.
}
\details{
Uses functions
\itemize{
\item wtsUtilities::createColorScale(...)
}
}
|
#Printing hi as many times as I want
for (i in seq(1,10) ) {
print("hi")
}
#How much money Tim has in his piggy bank
#It works
#How much tim has in his piggy bank at the first week
10 -> InitialAmmount
#His weekly allowence
5 -> WeeklyAllowence
#The price of his gum
1.34 -> GumPrice
#The loop
InitialAmmount -> NewAmmount
for (n in seq(1,8)) {
(NewAmmount + (WeeklyAllowence - (2 * GumPrice))) -> FirstAmmount
FirstAmmount -> NewAmmount
print(NewAmmount)
}
#Population Decay over time via an exponential function
#It works
#The starting population
2000 -> InitPop
#The size of the population relative to the previous year
.95 -> Decay
InitPop -> FirstPop
#The loop
for (y in seq(1,7)){
(FirstPop * Decay) -> NewPop
NewPop -> FirstPop
}
#Logistic Growth
#The starting population
2500 -> Starting
#The growth rate
.8 -> r
#The carrying capacity
10000 -> k
rep(Starting, 98) -> n
for (t in seq(2,100)){
n[t] <- n[t - 1] + (r * n[t-1] * (k - n[t-1])/k)
}
print(n[12])
#The population at year 12 is 9999.85 and rounded up it hit 10000
#Part II
#producing a sequence of 18 zeros
rep(0, 18) -> zeros
print(zeros)
#priducing a sequence where the value of an element is equal to its position times 5
for (i in seq(1,18)){
i*3 -> zeros[i]
}
print(n)
#Producing a sequence of numbers where each element is equal to the value of the previous element mutliplied by two plus one
rep(0, 18) -> V
V[1] <- 1
for (i in 2:length(V)){
(1 + (V[i-1] * 2)) -> V[i]
}
print(V)
#Producing a sequence of Fibanacci numbers
rep(1, 18) -> Fibonacci
0 -> Fibonacci[1]
for(f in seq(3,18)){
Fibonacci[f-2] + Fibonacci[f-1] -> Fibonacci[f]
}
print(Fibonacci)
#Repeat of the population decay experiment but putting the populations in each year in elements of a vector
2000 -> InitPopx
.95 -> Decayx
rep(1,7) -> popvector
InitPopx -> popvector[1]
for (p in seq(2,7)){
popvector[p] <- popvector[p-1]*Decayx
}
print(popvector)
#reading a CSV file
#This command puts the total percent change in a new vector
read.csv("CO2_data_cut_paste.csv") -> CarbonDioxide
#These commands puts the percent change from year to year of the Total
CarbonDioxide[1,8]
Len <- length(CarbonDioxide$Year)
DeltaYear <- CarbonDioxide$Year[2:Len]
DeltaTotal <- rep(0,(Len-1))
DeltaGas <- rep(0,(Len-1))
DeltaLiquids <- rep(0,(Len-1))
DeltaSolids <- rep(0,(Len-1))
DeltaCement <- rep(0,(Len-1))
DeltaFlare <- rep(0,(Len-1))
DeltaCO2 <- data.frame(DeltaYear,DeltaTotal,DeltaGas,DeltaLiquids,DeltaSolids,DeltaCement,DeltaFlare)
for(c in seq(2,7)){
for(o in seq(2,Len)){
DeltaCO2[o-1,c] <- (CarbonDioxide[o,c] - CarbonDioxide[o-1,c])
}
}
#Because deividing by 0 gives NaNs, I used the total change instead of percent
#These commands gives names to the rows
names(DeltaCO2[1]) -> "Year"
names(DeltaCO2[2]) -> "Change in Total"
names(DeltaCO2[3]) -> "Change in Gas"
names(DeltaCO2[4]) -> "Change in Liquids"
names(DeltaCO2[5]) -> "Change in Solids"
names(DeltaCO2[6]) -> "Change in Cement"
names(DeltaCO2[7]) -> "Change in Flare"
write.csv(DeltaCO2,"DeltaCO2.csv")
|
/Lab_04/Lab_04.R
|
no_license
|
PatrickMorrison0850/CompBioHomeworkAndLabs
|
R
| false
| false
| 3,137
|
r
|
#Printing hi as many times as I want
for (i in seq(1,10) ) {
print("hi")
}
#How much money Tim has in his piggy bank
#It works
#How much tim has in his piggy bank at the first week
10 -> InitialAmmount
#His weekly allowence
5 -> WeeklyAllowence
#The price of his gum
1.34 -> GumPrice
#The loop
InitialAmmount -> NewAmmount
for (n in seq(1,8)) {
(NewAmmount + (WeeklyAllowence - (2 * GumPrice))) -> FirstAmmount
FirstAmmount -> NewAmmount
print(NewAmmount)
}
#Population Decay over time via an exponential function
#It works
#The starting population
2000 -> InitPop
#The size of the population relative to the previous year
.95 -> Decay
InitPop -> FirstPop
#The loop
for (y in seq(1,7)){
(FirstPop * Decay) -> NewPop
NewPop -> FirstPop
}
#Logistic Growth
#The starting population
2500 -> Starting
#The growth rate
.8 -> r
#The carrying capacity
10000 -> k
rep(Starting, 98) -> n
for (t in seq(2,100)){
n[t] <- n[t - 1] + (r * n[t-1] * (k - n[t-1])/k)
}
print(n[12])
#The population at year 12 is 9999.85 and rounded up it hit 10000
#Part II
#producing a sequence of 18 zeros
rep(0, 18) -> zeros
print(zeros)
#priducing a sequence where the value of an element is equal to its position times 5
for (i in seq(1,18)){
i*3 -> zeros[i]
}
print(n)
#Producing a sequence of numbers where each element is equal to the value of the previous element mutliplied by two plus one
rep(0, 18) -> V
V[1] <- 1
for (i in 2:length(V)){
(1 + (V[i-1] * 2)) -> V[i]
}
print(V)
#Producing a sequence of Fibanacci numbers
rep(1, 18) -> Fibonacci
0 -> Fibonacci[1]
for(f in seq(3,18)){
Fibonacci[f-2] + Fibonacci[f-1] -> Fibonacci[f]
}
print(Fibonacci)
#Repeat of the population decay experiment but putting the populations in each year in elements of a vector
2000 -> InitPopx
.95 -> Decayx
rep(1,7) -> popvector
InitPopx -> popvector[1]
for (p in seq(2,7)){
popvector[p] <- popvector[p-1]*Decayx
}
print(popvector)
#reading a CSV file
#This command puts the total percent change in a new vector
read.csv("CO2_data_cut_paste.csv") -> CarbonDioxide
#These commands puts the percent change from year to year of the Total
CarbonDioxide[1,8]
Len <- length(CarbonDioxide$Year)
DeltaYear <- CarbonDioxide$Year[2:Len]
DeltaTotal <- rep(0,(Len-1))
DeltaGas <- rep(0,(Len-1))
DeltaLiquids <- rep(0,(Len-1))
DeltaSolids <- rep(0,(Len-1))
DeltaCement <- rep(0,(Len-1))
DeltaFlare <- rep(0,(Len-1))
DeltaCO2 <- data.frame(DeltaYear,DeltaTotal,DeltaGas,DeltaLiquids,DeltaSolids,DeltaCement,DeltaFlare)
for(c in seq(2,7)){
for(o in seq(2,Len)){
DeltaCO2[o-1,c] <- (CarbonDioxide[o,c] - CarbonDioxide[o-1,c])
}
}
#Because deividing by 0 gives NaNs, I used the total change instead of percent
#These commands gives names to the rows
names(DeltaCO2[1]) -> "Year"
names(DeltaCO2[2]) -> "Change in Total"
names(DeltaCO2[3]) -> "Change in Gas"
names(DeltaCO2[4]) -> "Change in Liquids"
names(DeltaCO2[5]) -> "Change in Solids"
names(DeltaCO2[6]) -> "Change in Cement"
names(DeltaCO2[7]) -> "Change in Flare"
write.csv(DeltaCO2,"DeltaCO2.csv")
|
library(data.table)
library(sparkline)
# ====準備部分====
source(file = "01_Settings/Path.R", local = T, encoding = "UTF-8")
# 感染者ソーステーブルを取得
byDate <- fread(paste0(DATA_PATH, "byDate.csv"), header = T)
byDate[is.na(byDate)] <- 0
byDate$date <- lapply(byDate[, 1], function(x) {
as.Date(as.character(x), format = "%Y%m%d")
})
# 死亡データ
death <- fread(paste0(DATA_PATH, "death.csv"))
death[is.na(death)] <- 0
# 文言データを取得
lang <- fread(paste0(DATA_PATH, "lang.csv"))
langCode <- "ja"
# 都道府県
provinceCode <- fread(paste0(DATA_PATH, "prefectures.csv"))
provinceSelector <- provinceCode$id
names(provinceSelector) <- provinceCode$`name-ja`
provinceAttr <- fread(paste0(DATA_PATH, "Signate/prefMaster.csv"))
provinceAttr[, 都道府県略称 := 都道府県]
provinceAttr[, 都道府県略称 := gsub("県", "", 都道府県略称)]
provinceAttr[, 都道府県略称 := gsub("府", "", 都道府県略称)]
provinceAttr[, 都道府県略称 := gsub("東京都", "東京", 都道府県略称)]
# 色設定
lightRed <- "#F56954"
middleRed <- "#DD4B39"
darkRed <- "#B03C2D"
lightYellow <- "#F8BF76"
middleYellow <- "#F39C11"
darkYellow <- "#DB8B0A"
lightGreen <- "#00A65A"
middleGreen <- "#01A65A"
darkGreen <- "#088448"
superDarkGreen <- "#046938"
lightNavy <- "#5A6E82"
middelNavy <- "#001F3F"
darkNavy <- "#001934"
lightGrey <- "#F5F5F5"
lightBlue <- "#7BD6F5"
middleBlue <- "#00C0EF"
darkBlue <- "#00A7D0"
# ====各都道府県のサマリーテーブル====
# ランキングカラムを作成
# cumDt <- cumsum(byDate[, c(2:48, 50)])
# rankDt <- data.table(t(apply(-cumDt, 1, function(x){rank(x, ties.method = 'min')})))
# rankDt[, colnames(rankDt) := shift(.SD, fill = 0) - .SD, .SDcols = colnames(rankDt)]
#
# rankDt[rankDt == 0] <- '-'
# rankDt[, colnames(rankDt) := ifelse(.SD > 0, paste0('+', .SD), .SD), .SDcols = colnames(rankDt)]
print("新規なし継続日数カラム作成")
zeroContinuousDay <- stack(lapply(byDate[, 2:ncol(byDate)], function(region) {
continuousDay <- 0
for (x in region) {
if (x == 0) {
continuousDay <- continuousDay + 1
} else {
continuousDay <- 0
}
}
return(continuousDay - 1)
}))
print("感染確認カラム作成")
total <- colSums(byDate[, 2:ncol(byDate)])
print("新規カラム作成")
today <- colSums(byDate[nrow(byDate), 2:ncol(byDate)])
print("昨日までカラム作成")
untilToday <- colSums(byDate[1:nrow(byDate) - 1, 2:ncol(byDate)])
print("感染者推移カラム作成")
dateSpan <- 21
diffSparkline <- sapply(2:ncol(byDate), function(i) {
# 新規値
value <- byDate[(nrow(byDate) - dateSpan):nrow(byDate), i, with = F][[1]]
# 累計値
cumsumValue <- c(cumsum(byDate[, i, with = F])[(nrow(byDate) - dateSpan):nrow(byDate)])[[1]]
# 日付
date <- byDate[(nrow(byDate) - dateSpan):nrow(byDate), 1, with = F][[1]]
colorMapSetting <- rep("#E7ADA6", length(value))
colorMapSetting[length(value)] <- darkRed
namesSetting <- as.list(date)
names(namesSetting) <- 0:(length(value) - 1)
# 新規
diff <- sparkline(
values = value,
type = "bar",
chartRangeMin = 0,
width = 80,
tooltipFormat = "{{offset:names}}<br><span style='color: {{color}}'>●</span> 新規{{value}}名",
tooltipValueLookups = list(
names = namesSetting
),
colorMap = colorMapSetting
)
# 累計
cumsumSpk <- sparkline(
values = cumsumValue,
type = "line",
width = 80,
fillColor = F,
lineColor = darkRed,
tooltipFormat = "<span style='color: {{color}}'>●</span> 累計{{y}}名"
)
return(as.character(htmltools::as.tags(spk_composite(diff, cumsumSpk))))
})
print("新規回復者カラム作成")
mhlwSummary <- fread(file = "50_Data/MHLW/summary.csv")
mhlwSummary$日付 <- as.Date(as.character(mhlwSummary$日付), "%Y%m%d")
mhlwSummary[order(日付), dischargedDiff := 退院者 - shift(退院者), by = "都道府県名"]
print("回復推移")
dischargedDiffSparkline <- sapply(colnames(byDate)[2:48], function(region) {
data <- mhlwSummary[`都道府県名` == region]
# 新規
span <- nrow(data) - dateSpan
value <- data$dischargedDiff[ifelse(span < 0, 0, span):nrow(data)]
# 日付
date <- data$日付[ifelse(span < 0, 0, span):nrow(data)]
namesSetting <- as.list(date)
names(namesSetting) <- 0:(length(date) - 1)
if (length(value) > 0) {
diff <- spk_chr(
values = value,
type = "bar",
width = 80,
barColor = middleGreen,
tooltipFormat = "{{offset:names}}<br><span style='color: {{color}}'>●</span> 新規回復{{value}}名",
tooltipValueLookups = list(
names = namesSetting
)
)
} else {
diff <- NA
}
return(diff)
})
print("死亡カラム作成")
deathByRegion <- stack(colSums(death[, 2:ncol(byDate)]))
print("感染者内訳")
detailSparkLineDt <- mhlwSummary[日付 == max(日付)]
detailSparkLine <- sapply(detailSparkLineDt$都道府県名, function(region) {
# 速報値との差分処理
regionNew <- ifelse(region == "空港検疫", "検疫職員", region)
confirmed <- ifelse(total[names(total) == regionNew][[1]] > detailSparkLineDt[都道府県名 == region, 陽性者],
total[names(total) == regionNew][[1]],
detailSparkLineDt[都道府県名 == region, 陽性者]
)
spk_chr(
type = "pie",
values = c(
confirmed - sum(detailSparkLineDt[都道府県名 == region, .(入院中, 退院者, 死亡者)], na.rm = T) -
ifelse(region == "クルーズ船", 40, 0),
detailSparkLineDt[都道府県名 == region, 入院中],
detailSparkLineDt[都道府県名 == region, 退院者],
detailSparkLineDt[都道府県名 == region, 死亡者]
),
sliceColors = c(middleRed, middleYellow, middleGreen, darkNavy),
tooltipFormat = '<span style="color: {{color}}">●</span> {{offset:names}}<br>{{value}} 名 ({{percent.1}}%)',
tooltipValueLookups = list(
names = list(
"0" = "情報待ち陽性者",
"1" = "入院者",
"2" = "回復者",
"3" = "死亡者"
)
)
)
})
print("二倍時間集計")
dt <- byDate[, 2:ncol(byDate)]
halfCount <- colSums(dt) / 2
dt <- cumsum(dt)
doubleTimeDay <- lapply(seq(halfCount), function(index) {
prefDt <- dt[, index, with = F]
nrow(prefDt[c(prefDt > halfCount[index])])
})
names(doubleTimeDay) <- names(dt)
# 回復者総数
totalDischarged <- mhlwSummary[日付 == max(日付), .(都道府県名, 退院者)]
colnames(totalDischarged) <- c("region", "totalDischarged")
print("都道府県別PCRデータ作成")
mhlwSummary[, 前日比 := 検査人数 - shift(検査人数), by = c("都道府県名")]
mhlwSummary[, 週間平均移動 := round(frollmean(前日比, 7), 0), by = c("都道府県名")]
mhlwSummary[, 陽性率 := round(陽性者 / 検査人数 * 100, 1)]
pcrByRegionToday <- mhlwSummary[日付 == max(日付)]
pcrDiffSparkline <- sapply(pcrByRegionToday$都道府県名, function(region) {
data <- mhlwSummary[都道府県名 == region]
# 新規
span <- nrow(data) - dateSpan
value <- data$前日比[ifelse(span < 0, 0, span):nrow(data)]
# 日付
date <- data$日付[ifelse(span < 0, 0, span):nrow(data)]
namesSetting <- as.list(date)
names(namesSetting) <- 0:(length(date) - 1)
if (length(value) > 0) {
diff <- spk_chr(
values = value,
type = "bar",
width = 80,
barColor = middleYellow,
tooltipFormat = "{{offset:names}}<br><span style='color: {{color}}'>●</span> 新規{{value}}",
tooltipValueLookups = list(
names = namesSetting
)
)
} else {
diff <- NA
}
return(diff)
})
positiveRatioSparkline <- sapply(pcrByRegionToday$都道府県名, function(region) {
data <- mhlwSummary[都道府県名 == region]
# 新規
span <- nrow(data) - dateSpan
value <- data$陽性率[ifelse(span < 0, 0, span):nrow(data)]
# 日付
date <- data$日付[ifelse(span < 0, 0, span):nrow(data)]
namesSetting <- as.list(date)
names(namesSetting) <- 0:(length(date) - 1)
if (length(value) > 0) {
diff <- spk_chr(
values = value,
type = "line",
width = 80,
lineColor = darkRed,
fillColor = "#f2b3aa",
tooltipFormat = "{{offset:names}}<br><span style='color: {{color}}'>●</span> 陽性率:{{y}}%",
tooltipValueLookups = list(
names = namesSetting
)
)
} else {
diff <- NA
}
return(diff)
})
pcrByRegionToday$検査数推移 <- pcrDiffSparkline
pcrByRegionToday$陽性率推移 <- positiveRatioSparkline
print("テーブル作成")
totalToday <- paste(sprintf("%06d", total), total, today, sep = "|")
mergeDt <- data.table(
region = names(total),
count = total,
today = today,
totalToday = totalToday,
untilToday = untilToday,
diff = diffSparkline,
dischargeDiff = "",
detailBullet = "",
death = deathByRegion$values,
zeroContinuousDay = zeroContinuousDay$values,
doubleTimeDay = doubleTimeDay
)
mergeDt <- merge(mergeDt, totalDischarged, all.x = T, sort = F)
signateSub <- provinceAttr[, .(都道府県略称, 人口)]
colnames(signateSub) <- c("region", "population")
mergeDt <- merge(mergeDt, signateSub, all.x = T, sort = F)
mergeDt[, perMillion := round(count / (population / 1000000), 0)]
mergeDt[, perMillionDeath := round(death / (population / 1000000), 0)]
for (i in mergeDt$region) {
mergeDt[region == i]$dischargeDiff <- dischargedDiffSparkline[i][[1]]
mergeDt[region == i]$detailBullet <- detailSparkLine[i][[1]]
}
# グルーピング
groupList <- list(
"北海道・東北" = provinceAttr[都道府県コード %in% 1:7]$都道府県略称,
"関東" = provinceAttr[都道府県コード %in% 8:14]$都道府県略称,
"中部" = provinceAttr[都道府県コード %in% 15:23]$都道府県略称,
"近畿" = provinceAttr[都道府県コード %in% 24:30]$都道府県略称,
"中国" = provinceAttr[都道府県コード %in% 31:35]$都道府県略称,
"四国" = provinceAttr[都道府県コード %in% 36:39]$都道府県略称,
"九州・沖縄" = provinceAttr[都道府県コード %in% 40:47]$都道府県略称,
"他" = colnames(byDate)[(ncol(byDate) - 3):ncol(byDate)]
)
mergeDt$group = ""
for (i in seq(nrow(mergeDt))) {
mergeDt[i]$group <- names(which(lapply(groupList, function(x) { mergeDt$region[i] %in% x }) == T))
}
# 面積あたりの感染者数
area <- fread(paste0(DATA_PATH, "Collection/area.csv"))
area[, 都道府県略称 := 都道府県]
area[, 都道府県略称 := gsub("県", "", 都道府県略称)]
area[, 都道府県略称 := gsub("府", "", 都道府県略称)]
area[, 都道府県略称 := gsub("東京都", "東京", 都道府県略称)]
mergeDt <- merge(mergeDt, area, by.x = "region", by.y = "都道府県略称", all.x = T, no.dups = T, sort = F)
mergeDt[, perArea := round(sqrt(可住地面積 / count), 2)]
mergeDt[, `:=` (コード = NULL, 都道府県 = NULL, 可住地面積 = NULL, 可住地面積割合 = NULL, 宅地面積 = NULL, 宅地面積割合 = NULL)]
pcrByRegionToday[, `:=` (dischargedDiff = NULL)]
mergeDt <- merge(mergeDt, pcrByRegionToday, by.x = "region", by.y = "都道府県名", all.x = T, no.dups = T, sort = F)
active <- mergeDt$陽性者 - mergeDt$退院者 - ifelse(is.na(mergeDt$死亡者), 0, mergeDt$死亡者)
mergeDt[, `:=` (日付 = NULL, 陽性者 = NULL, 入院中 = NULL, 退院者 = NULL, 死亡者 = NULL, 確認中 = NULL, 分類 = NULL)]
mergeDt[, 百万人あたり := round(検査人数 / (population / 1000000), 0)]
mergeDt[, population := NULL]
# 現在患者数
mergeDt$active <- active
mergeDt[active < 0, active := 0] # チャーター便の単独対応
mergeDt[region == "クルーズ船", active := active - 40] # クルーズ船の単独対応
# 13個特定警戒都道府県
alertPref <-
c(
"東京",
"大阪",
# "北海道",
# "茨城",
# "埼玉",
# "千葉",
"神奈川",
# "石川",
"岐阜",
"愛知",
"京都",
"三重",
# "兵庫",
"福岡",
"沖縄"
)
for(i in seq(nrow(mergeDt))) {
if (mergeDt[i]$region %in% alertPref) {
mergeDt[i]$region <- paste0("<i style='color:#DD4B39;' class=\"fa fa-exclamation-triangle\"></i>", "<span style='float:right;'>", mergeDt[i]$region, "</span>")
} else if (mergeDt[i]$active == 0 && !is.na(mergeDt[i]$active)) {
mergeDt[i]$region <- paste0("<i style='color:#01A65A;' class=\"fa fa-check-circle\"></i>", "<span style='float:right;'>", mergeDt[i]$region, "</span>")
} else {
mergeDt[i]$region <- paste0("<span style='float:right;'>", mergeDt[i]$region, "</span>")
}
}
# 自治体名前ソート用
prefNameId <- sprintf('%02d', seq(2:ncol(byDate)))
mergeDt[, region := paste0(prefNameId, "|", region)]
# オーダー
# setorder(mergeDt, - count)
# 読み取り時のエラーを回避するため
mergeDt[, diff := gsub("\\n", "", diff)]
mergeDt[, dischargeDiff := gsub("\\n", "", dischargeDiff)]
mergeDt[, detailBullet := gsub("\\n", "", detailBullet)]
mergeDt[, 検査数推移 := gsub("\\n", "", 検査数推移)]
mergeDt[, 陽性率推移 := gsub("\\n", "", 陽性率推移)]
# クルーズ船とチャーター便データ除外
# mergeDt <- mergeDt[!grepl(pattern = paste0(lang[[langCode]][35:36], collapse = "|"), x = mergeDt$region)]
print("テーブル出力")
fwrite(x = mergeDt, file = paste0(DATA_PATH, "Generated/resultSummaryTable.ja.csv"), sep = "@", quote = F)
source(file = "00_System/CreateTable.Translate.R")
# ====マップ用のデータ作成====
dt <- data.frame(date = byDate$date)
for (i in 2:ncol(byDate)) {
dt[, i] <- cumsum(byDate[, i, with = F])
}
dt <- reshape2::melt(dt, id.vars = "date")
dt <- data.table(dt)
mapDt <- dt[!(variable %in% c("クルーズ船", "伊客船", "チャーター便", "検疫職員"))]
# マップデータ用意
mapDt <- merge(x = mapDt, y = provinceCode, by.x = "variable", by.y = "name-ja", all = T)
mapDt <- merge(x = mapDt, y = provinceAttr, by.x = "variable", by.y = "都道府県略称", all = T)
# 必要なカラムを保存
mapDt <- mapDt[, .(date, variable, 都道府県, `name-en`, value, regions, lat, lng)]
# カラム名変更
colnames(mapDt) <- c("date", "ja", "full_ja", "en", "count", "regions", "lat", "lng")
fwrite(x = mapDt, file = paste0(DATA_PATH, "result.map.csv"))
# ====COVID DATA HUB====
source(file = "00_System/Generate.covid19datahub.R")
|
/00_System/CreateTable.R
|
permissive
|
yuster0/2019-ncov-japan
|
R
| false
| false
| 14,387
|
r
|
library(data.table)
library(sparkline)
# ====準備部分====
source(file = "01_Settings/Path.R", local = T, encoding = "UTF-8")
# 感染者ソーステーブルを取得
byDate <- fread(paste0(DATA_PATH, "byDate.csv"), header = T)
byDate[is.na(byDate)] <- 0
byDate$date <- lapply(byDate[, 1], function(x) {
as.Date(as.character(x), format = "%Y%m%d")
})
# 死亡データ
death <- fread(paste0(DATA_PATH, "death.csv"))
death[is.na(death)] <- 0
# 文言データを取得
lang <- fread(paste0(DATA_PATH, "lang.csv"))
langCode <- "ja"
# 都道府県
provinceCode <- fread(paste0(DATA_PATH, "prefectures.csv"))
provinceSelector <- provinceCode$id
names(provinceSelector) <- provinceCode$`name-ja`
provinceAttr <- fread(paste0(DATA_PATH, "Signate/prefMaster.csv"))
provinceAttr[, 都道府県略称 := 都道府県]
provinceAttr[, 都道府県略称 := gsub("県", "", 都道府県略称)]
provinceAttr[, 都道府県略称 := gsub("府", "", 都道府県略称)]
provinceAttr[, 都道府県略称 := gsub("東京都", "東京", 都道府県略称)]
# 色設定
lightRed <- "#F56954"
middleRed <- "#DD4B39"
darkRed <- "#B03C2D"
lightYellow <- "#F8BF76"
middleYellow <- "#F39C11"
darkYellow <- "#DB8B0A"
lightGreen <- "#00A65A"
middleGreen <- "#01A65A"
darkGreen <- "#088448"
superDarkGreen <- "#046938"
lightNavy <- "#5A6E82"
middelNavy <- "#001F3F"
darkNavy <- "#001934"
lightGrey <- "#F5F5F5"
lightBlue <- "#7BD6F5"
middleBlue <- "#00C0EF"
darkBlue <- "#00A7D0"
# ====各都道府県のサマリーテーブル====
# ランキングカラムを作成
# cumDt <- cumsum(byDate[, c(2:48, 50)])
# rankDt <- data.table(t(apply(-cumDt, 1, function(x){rank(x, ties.method = 'min')})))
# rankDt[, colnames(rankDt) := shift(.SD, fill = 0) - .SD, .SDcols = colnames(rankDt)]
#
# rankDt[rankDt == 0] <- '-'
# rankDt[, colnames(rankDt) := ifelse(.SD > 0, paste0('+', .SD), .SD), .SDcols = colnames(rankDt)]
print("新規なし継続日数カラム作成")
zeroContinuousDay <- stack(lapply(byDate[, 2:ncol(byDate)], function(region) {
continuousDay <- 0
for (x in region) {
if (x == 0) {
continuousDay <- continuousDay + 1
} else {
continuousDay <- 0
}
}
return(continuousDay - 1)
}))
print("感染確認カラム作成")
total <- colSums(byDate[, 2:ncol(byDate)])
print("新規カラム作成")
today <- colSums(byDate[nrow(byDate), 2:ncol(byDate)])
print("昨日までカラム作成")
untilToday <- colSums(byDate[1:nrow(byDate) - 1, 2:ncol(byDate)])
print("感染者推移カラム作成")
dateSpan <- 21
diffSparkline <- sapply(2:ncol(byDate), function(i) {
# 新規値
value <- byDate[(nrow(byDate) - dateSpan):nrow(byDate), i, with = F][[1]]
# 累計値
cumsumValue <- c(cumsum(byDate[, i, with = F])[(nrow(byDate) - dateSpan):nrow(byDate)])[[1]]
# 日付
date <- byDate[(nrow(byDate) - dateSpan):nrow(byDate), 1, with = F][[1]]
colorMapSetting <- rep("#E7ADA6", length(value))
colorMapSetting[length(value)] <- darkRed
namesSetting <- as.list(date)
names(namesSetting) <- 0:(length(value) - 1)
# 新規
diff <- sparkline(
values = value,
type = "bar",
chartRangeMin = 0,
width = 80,
tooltipFormat = "{{offset:names}}<br><span style='color: {{color}}'>●</span> 新規{{value}}名",
tooltipValueLookups = list(
names = namesSetting
),
colorMap = colorMapSetting
)
# 累計
cumsumSpk <- sparkline(
values = cumsumValue,
type = "line",
width = 80,
fillColor = F,
lineColor = darkRed,
tooltipFormat = "<span style='color: {{color}}'>●</span> 累計{{y}}名"
)
return(as.character(htmltools::as.tags(spk_composite(diff, cumsumSpk))))
})
print("新規回復者カラム作成")
mhlwSummary <- fread(file = "50_Data/MHLW/summary.csv")
mhlwSummary$日付 <- as.Date(as.character(mhlwSummary$日付), "%Y%m%d")
mhlwSummary[order(日付), dischargedDiff := 退院者 - shift(退院者), by = "都道府県名"]
print("回復推移")
dischargedDiffSparkline <- sapply(colnames(byDate)[2:48], function(region) {
data <- mhlwSummary[`都道府県名` == region]
# 新規
span <- nrow(data) - dateSpan
value <- data$dischargedDiff[ifelse(span < 0, 0, span):nrow(data)]
# 日付
date <- data$日付[ifelse(span < 0, 0, span):nrow(data)]
namesSetting <- as.list(date)
names(namesSetting) <- 0:(length(date) - 1)
if (length(value) > 0) {
diff <- spk_chr(
values = value,
type = "bar",
width = 80,
barColor = middleGreen,
tooltipFormat = "{{offset:names}}<br><span style='color: {{color}}'>●</span> 新規回復{{value}}名",
tooltipValueLookups = list(
names = namesSetting
)
)
} else {
diff <- NA
}
return(diff)
})
print("死亡カラム作成")
deathByRegion <- stack(colSums(death[, 2:ncol(byDate)]))
print("感染者内訳")
detailSparkLineDt <- mhlwSummary[日付 == max(日付)]
detailSparkLine <- sapply(detailSparkLineDt$都道府県名, function(region) {
# 速報値との差分処理
regionNew <- ifelse(region == "空港検疫", "検疫職員", region)
confirmed <- ifelse(total[names(total) == regionNew][[1]] > detailSparkLineDt[都道府県名 == region, 陽性者],
total[names(total) == regionNew][[1]],
detailSparkLineDt[都道府県名 == region, 陽性者]
)
spk_chr(
type = "pie",
values = c(
confirmed - sum(detailSparkLineDt[都道府県名 == region, .(入院中, 退院者, 死亡者)], na.rm = T) -
ifelse(region == "クルーズ船", 40, 0),
detailSparkLineDt[都道府県名 == region, 入院中],
detailSparkLineDt[都道府県名 == region, 退院者],
detailSparkLineDt[都道府県名 == region, 死亡者]
),
sliceColors = c(middleRed, middleYellow, middleGreen, darkNavy),
tooltipFormat = '<span style="color: {{color}}">●</span> {{offset:names}}<br>{{value}} 名 ({{percent.1}}%)',
tooltipValueLookups = list(
names = list(
"0" = "情報待ち陽性者",
"1" = "入院者",
"2" = "回復者",
"3" = "死亡者"
)
)
)
})
print("二倍時間集計")
dt <- byDate[, 2:ncol(byDate)]
halfCount <- colSums(dt) / 2
dt <- cumsum(dt)
doubleTimeDay <- lapply(seq(halfCount), function(index) {
prefDt <- dt[, index, with = F]
nrow(prefDt[c(prefDt > halfCount[index])])
})
names(doubleTimeDay) <- names(dt)
# 回復者総数
totalDischarged <- mhlwSummary[日付 == max(日付), .(都道府県名, 退院者)]
colnames(totalDischarged) <- c("region", "totalDischarged")
print("都道府県別PCRデータ作成")
mhlwSummary[, 前日比 := 検査人数 - shift(検査人数), by = c("都道府県名")]
mhlwSummary[, 週間平均移動 := round(frollmean(前日比, 7), 0), by = c("都道府県名")]
mhlwSummary[, 陽性率 := round(陽性者 / 検査人数 * 100, 1)]
pcrByRegionToday <- mhlwSummary[日付 == max(日付)]
pcrDiffSparkline <- sapply(pcrByRegionToday$都道府県名, function(region) {
data <- mhlwSummary[都道府県名 == region]
# 新規
span <- nrow(data) - dateSpan
value <- data$前日比[ifelse(span < 0, 0, span):nrow(data)]
# 日付
date <- data$日付[ifelse(span < 0, 0, span):nrow(data)]
namesSetting <- as.list(date)
names(namesSetting) <- 0:(length(date) - 1)
if (length(value) > 0) {
diff <- spk_chr(
values = value,
type = "bar",
width = 80,
barColor = middleYellow,
tooltipFormat = "{{offset:names}}<br><span style='color: {{color}}'>●</span> 新規{{value}}",
tooltipValueLookups = list(
names = namesSetting
)
)
} else {
diff <- NA
}
return(diff)
})
positiveRatioSparkline <- sapply(pcrByRegionToday$都道府県名, function(region) {
data <- mhlwSummary[都道府県名 == region]
# 新規
span <- nrow(data) - dateSpan
value <- data$陽性率[ifelse(span < 0, 0, span):nrow(data)]
# 日付
date <- data$日付[ifelse(span < 0, 0, span):nrow(data)]
namesSetting <- as.list(date)
names(namesSetting) <- 0:(length(date) - 1)
if (length(value) > 0) {
diff <- spk_chr(
values = value,
type = "line",
width = 80,
lineColor = darkRed,
fillColor = "#f2b3aa",
tooltipFormat = "{{offset:names}}<br><span style='color: {{color}}'>●</span> 陽性率:{{y}}%",
tooltipValueLookups = list(
names = namesSetting
)
)
} else {
diff <- NA
}
return(diff)
})
pcrByRegionToday$検査数推移 <- pcrDiffSparkline
pcrByRegionToday$陽性率推移 <- positiveRatioSparkline
print("テーブル作成")
totalToday <- paste(sprintf("%06d", total), total, today, sep = "|")
mergeDt <- data.table(
region = names(total),
count = total,
today = today,
totalToday = totalToday,
untilToday = untilToday,
diff = diffSparkline,
dischargeDiff = "",
detailBullet = "",
death = deathByRegion$values,
zeroContinuousDay = zeroContinuousDay$values,
doubleTimeDay = doubleTimeDay
)
mergeDt <- merge(mergeDt, totalDischarged, all.x = T, sort = F)
signateSub <- provinceAttr[, .(都道府県略称, 人口)]
colnames(signateSub) <- c("region", "population")
mergeDt <- merge(mergeDt, signateSub, all.x = T, sort = F)
mergeDt[, perMillion := round(count / (population / 1000000), 0)]
mergeDt[, perMillionDeath := round(death / (population / 1000000), 0)]
for (i in mergeDt$region) {
mergeDt[region == i]$dischargeDiff <- dischargedDiffSparkline[i][[1]]
mergeDt[region == i]$detailBullet <- detailSparkLine[i][[1]]
}
# グルーピング
groupList <- list(
"北海道・東北" = provinceAttr[都道府県コード %in% 1:7]$都道府県略称,
"関東" = provinceAttr[都道府県コード %in% 8:14]$都道府県略称,
"中部" = provinceAttr[都道府県コード %in% 15:23]$都道府県略称,
"近畿" = provinceAttr[都道府県コード %in% 24:30]$都道府県略称,
"中国" = provinceAttr[都道府県コード %in% 31:35]$都道府県略称,
"四国" = provinceAttr[都道府県コード %in% 36:39]$都道府県略称,
"九州・沖縄" = provinceAttr[都道府県コード %in% 40:47]$都道府県略称,
"他" = colnames(byDate)[(ncol(byDate) - 3):ncol(byDate)]
)
mergeDt$group = ""
for (i in seq(nrow(mergeDt))) {
mergeDt[i]$group <- names(which(lapply(groupList, function(x) { mergeDt$region[i] %in% x }) == T))
}
# 面積あたりの感染者数
area <- fread(paste0(DATA_PATH, "Collection/area.csv"))
area[, 都道府県略称 := 都道府県]
area[, 都道府県略称 := gsub("県", "", 都道府県略称)]
area[, 都道府県略称 := gsub("府", "", 都道府県略称)]
area[, 都道府県略称 := gsub("東京都", "東京", 都道府県略称)]
mergeDt <- merge(mergeDt, area, by.x = "region", by.y = "都道府県略称", all.x = T, no.dups = T, sort = F)
mergeDt[, perArea := round(sqrt(可住地面積 / count), 2)]
mergeDt[, `:=` (コード = NULL, 都道府県 = NULL, 可住地面積 = NULL, 可住地面積割合 = NULL, 宅地面積 = NULL, 宅地面積割合 = NULL)]
pcrByRegionToday[, `:=` (dischargedDiff = NULL)]
mergeDt <- merge(mergeDt, pcrByRegionToday, by.x = "region", by.y = "都道府県名", all.x = T, no.dups = T, sort = F)
active <- mergeDt$陽性者 - mergeDt$退院者 - ifelse(is.na(mergeDt$死亡者), 0, mergeDt$死亡者)
mergeDt[, `:=` (日付 = NULL, 陽性者 = NULL, 入院中 = NULL, 退院者 = NULL, 死亡者 = NULL, 確認中 = NULL, 分類 = NULL)]
mergeDt[, 百万人あたり := round(検査人数 / (population / 1000000), 0)]
mergeDt[, population := NULL]
# 現在患者数
mergeDt$active <- active
mergeDt[active < 0, active := 0] # チャーター便の単独対応
mergeDt[region == "クルーズ船", active := active - 40] # クルーズ船の単独対応
# 13個特定警戒都道府県
alertPref <-
c(
"東京",
"大阪",
# "北海道",
# "茨城",
# "埼玉",
# "千葉",
"神奈川",
# "石川",
"岐阜",
"愛知",
"京都",
"三重",
# "兵庫",
"福岡",
"沖縄"
)
for(i in seq(nrow(mergeDt))) {
if (mergeDt[i]$region %in% alertPref) {
mergeDt[i]$region <- paste0("<i style='color:#DD4B39;' class=\"fa fa-exclamation-triangle\"></i>", "<span style='float:right;'>", mergeDt[i]$region, "</span>")
} else if (mergeDt[i]$active == 0 && !is.na(mergeDt[i]$active)) {
mergeDt[i]$region <- paste0("<i style='color:#01A65A;' class=\"fa fa-check-circle\"></i>", "<span style='float:right;'>", mergeDt[i]$region, "</span>")
} else {
mergeDt[i]$region <- paste0("<span style='float:right;'>", mergeDt[i]$region, "</span>")
}
}
# 自治体名前ソート用
prefNameId <- sprintf('%02d', seq(2:ncol(byDate)))
mergeDt[, region := paste0(prefNameId, "|", region)]
# オーダー
# setorder(mergeDt, - count)
# 読み取り時のエラーを回避するため
mergeDt[, diff := gsub("\\n", "", diff)]
mergeDt[, dischargeDiff := gsub("\\n", "", dischargeDiff)]
mergeDt[, detailBullet := gsub("\\n", "", detailBullet)]
mergeDt[, 検査数推移 := gsub("\\n", "", 検査数推移)]
mergeDt[, 陽性率推移 := gsub("\\n", "", 陽性率推移)]
# クルーズ船とチャーター便データ除外
# mergeDt <- mergeDt[!grepl(pattern = paste0(lang[[langCode]][35:36], collapse = "|"), x = mergeDt$region)]
print("テーブル出力")
fwrite(x = mergeDt, file = paste0(DATA_PATH, "Generated/resultSummaryTable.ja.csv"), sep = "@", quote = F)
source(file = "00_System/CreateTable.Translate.R")
# ====マップ用のデータ作成====
dt <- data.frame(date = byDate$date)
for (i in 2:ncol(byDate)) {
dt[, i] <- cumsum(byDate[, i, with = F])
}
dt <- reshape2::melt(dt, id.vars = "date")
dt <- data.table(dt)
mapDt <- dt[!(variable %in% c("クルーズ船", "伊客船", "チャーター便", "検疫職員"))]
# マップデータ用意
mapDt <- merge(x = mapDt, y = provinceCode, by.x = "variable", by.y = "name-ja", all = T)
mapDt <- merge(x = mapDt, y = provinceAttr, by.x = "variable", by.y = "都道府県略称", all = T)
# 必要なカラムを保存
mapDt <- mapDt[, .(date, variable, 都道府県, `name-en`, value, regions, lat, lng)]
# カラム名変更
colnames(mapDt) <- c("date", "ja", "full_ja", "en", "count", "regions", "lat", "lng")
fwrite(x = mapDt, file = paste0(DATA_PATH, "result.map.csv"))
# ====COVID DATA HUB====
source(file = "00_System/Generate.covid19datahub.R")
|
source("borehole_func.R")
# set dimensions for gpro
d <- 8
#Set the function Name here
funcname <- 'bore.function'
#Read in the base design.
xf <- read.table('XD8N40.txt',header=F)
colnames(xf) <- paste('x',1:d,sep='')
#Compute test set data
xp <- data.matrix(read.table('test8.csv',header=F,sep=',')[,1:d])
colnames(xp) <- colnames(xf)
yp <- data.frame(y = do.call(funcname,list(xp)))
borehole.test <- data.frame(xp, y = yp)
write.table(borehole.test, file = "borehole_test.csv", sep = ",", row.names = FALSE)
# Permutation on the base design
ocn <- colnames(xf)
cp.m <- data.frame(read.csv("order_borehole.csv", h=F)[, 1:d])
# purmute training data 24 times
train <- as.data.frame(matrix(NA, ncol = d + 1, nrow = dim(xf)[1]*25))
for(i in 1:25) {
cp <- as.numeric(cp.m[i,])
xf_train = data.matrix(xf[,cp])
yf_train = data.frame(y = do.call(funcname, list(xf_train)))
train = data.frame(xf_train, y = yf_train)
gpro_train[(((i-1)*40)+1):(i*40), 1:9] <- train
}
colnames(train) = c(ocn, "y")
write.table(train, file = "borehole.csv", sep = ",", row.names = F)
|
/Designs/Borehole/n40_mLHD/job_borehole.R
|
no_license
|
Dustin21/GaSP-Ensemble
|
R
| false
| false
| 1,079
|
r
|
source("borehole_func.R")
# set dimensions for gpro
d <- 8
#Set the function Name here
funcname <- 'bore.function'
#Read in the base design.
xf <- read.table('XD8N40.txt',header=F)
colnames(xf) <- paste('x',1:d,sep='')
#Compute test set data
xp <- data.matrix(read.table('test8.csv',header=F,sep=',')[,1:d])
colnames(xp) <- colnames(xf)
yp <- data.frame(y = do.call(funcname,list(xp)))
borehole.test <- data.frame(xp, y = yp)
write.table(borehole.test, file = "borehole_test.csv", sep = ",", row.names = FALSE)
# Permutation on the base design
ocn <- colnames(xf)
cp.m <- data.frame(read.csv("order_borehole.csv", h=F)[, 1:d])
# purmute training data 24 times
train <- as.data.frame(matrix(NA, ncol = d + 1, nrow = dim(xf)[1]*25))
for(i in 1:25) {
cp <- as.numeric(cp.m[i,])
xf_train = data.matrix(xf[,cp])
yf_train = data.frame(y = do.call(funcname, list(xf_train)))
train = data.frame(xf_train, y = yf_train)
gpro_train[(((i-1)*40)+1):(i*40), 1:9] <- train
}
colnames(train) = c(ocn, "y")
write.table(train, file = "borehole.csv", sep = ",", row.names = F)
|
params.df <- cbind(data.frame(param=gsub(":\\(Intercept\\)","",
rownames(summary(logmorphine_SHPfit)$coefficient)),stringsAsFactors=F),
data.frame(summary(logmorphine_SHPfit)$coefficient))
rownames(params.df) <- NULL
ann.df <- data.frame(Parameter=gsub(" Limit","",params.df$param),
Value=signif(params.df[,2],3),stringsAsFactors=F)
rownames(ann.df) <- NULL
thm <- ttheme_minimal(
core=list(fg_params = list(hjust=rep(c(0, 1), each=4),
x=rep(c(0.15, 0.85), each=4)),
bg_params = list(fill = NA)),
colhead=list(bg_params=list(fill = NA)))
ggdraw(logmorphine_SHP_graph) + draw_grob(tableGrob(ann.df, rows=NULL, theme=thm),
x=0.26, y=0.41, width=0.25, height=0.5)
ED(logmorphine_SHPfit, c(25, 50, 75), interval = "delta")
#SHP morphine DR
SHP_morphine_DR <- BPS_morphine_DR_data %>%
filter(Assay == "SHP") %>%
melt(id = c("Assay")) %>%
filter(!is.na(value))
leveneTest(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
bartlett.test(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
SHP_morphine_anova <- aov(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
distBCMod <- caret::BoxCoxTrans(logSHP_morphine_DR$log_latency_correction)
print(distBCMod)
par(mfrow=c(2,2))
plot(SHP_morphine_anova)
SHP_morphine_anova <- aov(dist_new ~ factor(Dose), data = logSHP_morphine_DR)
par(mfrow=c(2,2))
plot(SHP_morphine_anova)
logSHP_morphine_DR <- cbind(logSHP_morphine_DR, dist_new=predict(distBCMod, logSHP_morphine_DR$log_latency_correction))
head(logSHP_morphine_DR)
het_corrected_SHP_anova <- Anova(SHP_morphine_anova, type ="II", white.adjust = T)
SHP_morphine_DR$variable <- as.numeric(as.character(SHP_morphine_DR$variable))
SHP_morphine_DR <- SHP_morphine_DR %>% dplyr::rename(Dose = variable)
SHP_morphine_DR <- SHP_morphine_DR %>% dplyr::rename(Latency = value)
morphine_SHPfit <- drm(Latency ~ Dose, data = SHP_morphine_DR,
fct = LL.4(fixed = c(NA, NA, 120, NA),
names = c("Slope","Lower Limit","Upper Limit","ED50")))
morphine_SHPline <- expand.grid(Dose = exp(seq(log(max(SHP_morphine_DR$Dose)),
log(min(SHP_morphine_DR$Dose)),length=100)))
morphine_SHP <- predict(morphine_SHPfit,newdata=morphine_SHPline,interval="confidence")
morphine_SHPline$p <- morphine_SHP[,1]
morphine_SHPline$pmin <- morphine_SHP[,2]
morphine_SHPline$pmax <- morphine_SHP[,3]
morphine_SHP_graph <- ggplot(SHP_morphine_DR, aes(x = Dose, y = Latency)) +
geom_point(colour = "black", fill = "black", alpha = 0.25, size = 3) +
geom_line(data = morphine_SHPline, aes(x = Dose,y = p)) +
theme_bw() +
labs(title = "Standard hot plate: Morphine",
subtitle = "Upper limit constraint = 120; n = 7 per group", x = "Dose (mg/kg)", y = "Latency (s)") +
stat_summary(fun.data = mean_sdl, geom = "errorbar", colour = "black", width = 0.25) +
stat_summary(fun.data = mean_se, geom = "errorbar", colour = "cadetblue4", alpha = 0.5, width = 0.2) +
stat_summary(fun.y = mean, geom = "point", colour = "cadetblue4", alpha = 0.85, size = 3, pch = 15) +
geom_ribbon(data = morphine_SHPline, aes(x = Dose,y = p, ymin = pmin, ymax = pmax), alpha = 0.2) +
scale_x_continuous(trans = "log10", breaks = c(0.01, 0.1, 1, 10),
labels =c("Vehicle", "0.1", "1.0", "10")) +
theme(text = element_text(size = 14),
axis.text = element_text(size = 14)) +
geom_abline(slope = 0, intercept = 120, lty = 3, alpha = 0.8) +
scale_y_continuous(limits = c(-10, 155), breaks = seq(0, 150, 30))
#
#
#
#
#
#log transformed SHP morphine DR
logSHP_morphine_DR <- BPS_morphine_DR_data %>%
filter(Assay == "logSHP") %>%
melt(id = c("Assay")) %>%
filter(!is.na(value)) %>%
mutate(log_latency_correction = (value - 0.9542425) / 1.125)
leveneTest(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
bartlett.test(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
logSHP_morphine_DR$variable <- as.numeric(as.character(logSHP_morphine_DR$variable))
logSHP_morphine_DR <- logSHP_morphine_DR %>% dplyr::rename(Dose = variable)
logSHP_morphine_DR <- logSHP_morphine_DR %>% dplyr::rename(Latency = value)
logmorphine_SHPfit <- drm(log_latency_correction ~ Dose, data = logSHP_morphine_DR,
fct = LL.4(fixed = c(NA, NA, 1, NA),
names = c("Slope","Lower Limit","Upper Limit","ED50")))
logmorphine_SHPline <- expand.grid(Dose = exp(seq(log(max(logSHP_morphine_DR$Dose)),
log(min(logSHP_morphine_DR$Dose)),length=100)))
logmorphine_SHP <- predict(logmorphine_SHPfit,newdata=logmorphine_SHPline,interval="confidence")
logmorphine_SHPline$p <- logmorphine_SHP[,1]
logmorphine_SHPline$pmin <- logmorphine_SHP[,2]
logmorphine_SHPline$pmax <- logmorphine_SHP[,3]
logmorphine_SHP_graph <- ggplot(logSHP_morphine_DR, aes(x = Dose, y = log_latency_correction)) +
geom_point(colour = "black", fill = "black", alpha = 0.25, size = 3) +
geom_line(data = logmorphine_SHPline, aes(x = Dose,y = p)) +
theme_bw() +
labs(title = "Standard hot plate: Morphine",
subtitle = "n = 7 per group", x = "Dose (mg/kg)", y = "Proportion of effect") +
stat_summary(fun.data = mean_sdl, geom = "errorbar", colour = "black", width = 0.25) +
stat_summary(fun.data = mean_se, geom = "errorbar", colour = "seagreen", alpha = 0.75, width = 0.2, size = 1) +
stat_summary(fun.y = mean, geom = "point", colour = "seagreen", alpha = 0.85, size = 3, pch = 15) +
geom_ribbon(data = logmorphine_SHPline, aes(x = Dose,y = p, ymin = pmin, ymax = pmax), alpha = 0.2) +
scale_x_continuous(trans = "log10", breaks = c(0.01, 0.1, 1, 10),
labels =c("Vehicle", "0.1", "1.0", "10")) +
theme(text = element_text(size = 14, family = "Century Gothic"),
axis.text = element_text(size = 14, family = "Century Gothic")) +
geom_abline(slope = 0, intercept = 1, lty = 3, alpha = 0.8) +
scale_y_continuous(limits = c(0, 1.24), breaks = seq(0, 1, 0.2))
ED(logmorphine_SHPfit, c(25, 50, 75), interval = "delta")
#
#
#
#
#
#
#RHP morphine Dose-response graph
RHP_morphine_DR <- BPS_morphine_DR_data %>%
filter(Assay == "logRHP") %>%
melt(id = c("Assay")) %>%
filter(!is.na(value)) %>%
mutate(log_latency_correction = (value - 2.117901) / 0.235)
leveneTest(log_latency_correction ~ factor(Dose), data = RHP_morphine_DR)
bartlett.test(log_latency_correction ~ factor(Dose), data = RHP_morphine_DR)
RHP_morphine_DR$variable <- as.numeric(as.character(RHP_morphine_DR$variable))
RHP_morphine_DR <- RHP_morphine_DR %>% dplyr::rename(Dose = variable)
RHP_morphine_DR <- RHP_morphine_DR %>% dplyr::rename(Latency = value)
morphine_RHPfit <- drm(log_latency_correction ~ Dose, data = RHP_morphine_DR,
fct = LL.4(fixed = c(NA, NA, 1, NA),
names = c("Slope","Lower Limit","Upper Limit","ED50")))
morphine_RHPline <- expand.grid(Dose = exp(seq(log(max(RHP_morphine_DR$Dose)),
log(min(RHP_morphine_DR$Dose)),length=100)))
morphine_RHP <- predict(morphine_RHPfit,newdata=morphine_RHPline,interval="confidence")
morphine_RHPline$p <- morphine_RHP[,1]
morphine_RHPline$pmin <- morphine_RHP[,2]
morphine_RHPline$pmax <- morphine_RHP[,3]
morphine_RHP_graph <- ggplot(RHP_morphine_DR, aes(x = Dose, y = log_latency_correction)) +
geom_point(colour = "black", fill = "black", alpha = 0.25, size = 3) +
geom_line(data = morphine_RHPline, aes(x = Dose,y = p)) +
theme_bw() +
labs(title = "Ramped hot plate: Morphine",
subtitle = "n = 8 per group", x = "Dose (mg/kg)", y = "Proportion of effect") +
stat_summary(fun.data = mean_sdl, geom = "errorbar", colour = "black", width = 0.25) +
stat_summary(fun.data = mean_se, geom = "errorbar", colour = "tomato3", alpha = 0.75, width = 0.2, size = 1) +
stat_summary(fun.y = mean, geom = "point", colour = "tomato3", alpha = 0.85, size = 3, pch = 15) +
geom_ribbon(data = morphine_RHPline, aes(x = Dose,y = p, ymin = pmin, ymax = pmax), alpha = 0.2) +
scale_x_continuous(trans = "log10", breaks = c(0.01, 0.1, 1, 10),
labels =c("Vehicle", "0.1", "1.0", "10")) +
theme(text = element_text(size = 14, family = "Century Gothic"),
axis.text = element_text(size = 14, family = "Century Gothic")) +
geom_abline(slope = 0, intercept = 1, lty = 3, alpha = 0.8) +
scale_y_continuous(limits = c(0, 1.24), breaks = seq(0, 1, 0.2))
ED(morphine_RHPfit, c(25, 50, 75), interval = "delta")
#
#
plot_grid(logmorphine_SHP_graph, morphine_RHP_graph, align = "h")
summary(logmorphine_SHPfit)
summary(morphine_RHPfit)
#
#
#
#log RHP morphine DR
logRHP_morphine_DR <- BPS_morphine_DR_data %>%
filter(Assay == "logRHP") %>%
melt(id = c("Assay")) %>%
filter(!is.na(value))
logRHP_morphine_DR$variable <- as.numeric(as.character(logRHP_morphine_DR$variable))
logRHP_morphine_DR <- logRHP_morphine_DR %>% dplyr::rename(Dose = variable)
logRHP_morphine_DR <- logRHP_morphine_DR %>% dplyr::rename(Latency = value)
logmorphine_RHPfit <- drm(Latency ~ Dose, data = logRHP_morphine_DR,
fct = LL.4(fixed = c(NA, NA, 2.352, NA),
names = c("Slope","Lower Limit","Upper Limit","ED50")))
logmorphine_RHPline <- expand.grid(Dose = exp(seq(log(max(logRHP_morphine_DR$Dose)),
log(min(logRHP_morphine_DR$Dose)),length=100)))
logmorphine_RHP <- predict(logmorphine_RHPfit,newdata=logmorphine_RHPline,interval="confidence")
logmorphine_RHPline$p <- logmorphine_RHP[,1]
logmorphine_RHPline$pmin <- logmorphine_RHP[,2]
logmorphine_RHPline$pmax <- logmorphine_RHP[,3]
logmorphine_RHP_graph <- ggplot(logRHP_morphine_DR, aes(x = Dose, y = Latency)) +
geom_point(colour = "black", fill = "black", alpha = 0.25, size = 3) +
geom_line(data = logmorphine_RHPline, aes(x = Dose,y = p)) +
theme_bw() +
labs(title = "Ramped hot plate: Morphine",
subtitle = "Upper limit constraint = 225; n = 8 per group", x = "Dose (mg/kg)", y = "Log Latency (s)") +
stat_summary(fun.data = mean_sdl, geom = "errorbar", colour = "black", width = 0.25) +
stat_summary(fun.data = mean_se, geom = "errorbar", colour = "orangered4", alpha = 0.5, width = 0.2) +
stat_summary(fun.y = mean, geom = "point", colour = "orangered4", alpha = 0.85, size = 3, pch = 15) +
geom_ribbon(data = logmorphine_RHPline, aes(x = Dose,y = p, ymin = pmin, ymax = pmax), alpha = 0.2) +
scale_x_continuous(trans = "log10", breaks = c(0.01, 0.1, 1, 10),
labels =c("Vehicle", "0.1", "1.0", "10")) +
theme(text = element_text(size = 14),
axis.text = element_text(size = 14)) +
geom_abline(slope = 0, intercept = 2.352, lty = 3, alpha = 0.8) +
scale_y_continuous(limits = c(2.1, 2.41), breaks = seq(2.1, 2.5, 0.1))
modelFit(logmorphine_SHPfit, method = "cum")
modelFit(morphine_SHPfit, method = "cum")
modelFit(morphine_RHPfit, method = "cum")
modelFit(SHPfit, method = "cum")
modelFit(logSHPfit, method = "cum")
modelFit(RHPfit, method = "cum")
modelFit(logmorphine_RHPfit, method = "cum")
RHPmorphine_aov <- aov(Latency ~ Dose, data = RHP_morphine_DR)
plot(RHPmorphine_aov, 3)
|
/Morphine DR BPS poster.R
|
no_license
|
bkiyota/Cannevert_Co-op2017-19
|
R
| false
| false
| 11,428
|
r
|
params.df <- cbind(data.frame(param=gsub(":\\(Intercept\\)","",
rownames(summary(logmorphine_SHPfit)$coefficient)),stringsAsFactors=F),
data.frame(summary(logmorphine_SHPfit)$coefficient))
rownames(params.df) <- NULL
ann.df <- data.frame(Parameter=gsub(" Limit","",params.df$param),
Value=signif(params.df[,2],3),stringsAsFactors=F)
rownames(ann.df) <- NULL
thm <- ttheme_minimal(
core=list(fg_params = list(hjust=rep(c(0, 1), each=4),
x=rep(c(0.15, 0.85), each=4)),
bg_params = list(fill = NA)),
colhead=list(bg_params=list(fill = NA)))
ggdraw(logmorphine_SHP_graph) + draw_grob(tableGrob(ann.df, rows=NULL, theme=thm),
x=0.26, y=0.41, width=0.25, height=0.5)
ED(logmorphine_SHPfit, c(25, 50, 75), interval = "delta")
#SHP morphine DR
SHP_morphine_DR <- BPS_morphine_DR_data %>%
filter(Assay == "SHP") %>%
melt(id = c("Assay")) %>%
filter(!is.na(value))
leveneTest(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
bartlett.test(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
SHP_morphine_anova <- aov(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
distBCMod <- caret::BoxCoxTrans(logSHP_morphine_DR$log_latency_correction)
print(distBCMod)
par(mfrow=c(2,2))
plot(SHP_morphine_anova)
SHP_morphine_anova <- aov(dist_new ~ factor(Dose), data = logSHP_morphine_DR)
par(mfrow=c(2,2))
plot(SHP_morphine_anova)
logSHP_morphine_DR <- cbind(logSHP_morphine_DR, dist_new=predict(distBCMod, logSHP_morphine_DR$log_latency_correction))
head(logSHP_morphine_DR)
het_corrected_SHP_anova <- Anova(SHP_morphine_anova, type ="II", white.adjust = T)
SHP_morphine_DR$variable <- as.numeric(as.character(SHP_morphine_DR$variable))
SHP_morphine_DR <- SHP_morphine_DR %>% dplyr::rename(Dose = variable)
SHP_morphine_DR <- SHP_morphine_DR %>% dplyr::rename(Latency = value)
morphine_SHPfit <- drm(Latency ~ Dose, data = SHP_morphine_DR,
fct = LL.4(fixed = c(NA, NA, 120, NA),
names = c("Slope","Lower Limit","Upper Limit","ED50")))
morphine_SHPline <- expand.grid(Dose = exp(seq(log(max(SHP_morphine_DR$Dose)),
log(min(SHP_morphine_DR$Dose)),length=100)))
morphine_SHP <- predict(morphine_SHPfit,newdata=morphine_SHPline,interval="confidence")
morphine_SHPline$p <- morphine_SHP[,1]
morphine_SHPline$pmin <- morphine_SHP[,2]
morphine_SHPline$pmax <- morphine_SHP[,3]
morphine_SHP_graph <- ggplot(SHP_morphine_DR, aes(x = Dose, y = Latency)) +
geom_point(colour = "black", fill = "black", alpha = 0.25, size = 3) +
geom_line(data = morphine_SHPline, aes(x = Dose,y = p)) +
theme_bw() +
labs(title = "Standard hot plate: Morphine",
subtitle = "Upper limit constraint = 120; n = 7 per group", x = "Dose (mg/kg)", y = "Latency (s)") +
stat_summary(fun.data = mean_sdl, geom = "errorbar", colour = "black", width = 0.25) +
stat_summary(fun.data = mean_se, geom = "errorbar", colour = "cadetblue4", alpha = 0.5, width = 0.2) +
stat_summary(fun.y = mean, geom = "point", colour = "cadetblue4", alpha = 0.85, size = 3, pch = 15) +
geom_ribbon(data = morphine_SHPline, aes(x = Dose,y = p, ymin = pmin, ymax = pmax), alpha = 0.2) +
scale_x_continuous(trans = "log10", breaks = c(0.01, 0.1, 1, 10),
labels =c("Vehicle", "0.1", "1.0", "10")) +
theme(text = element_text(size = 14),
axis.text = element_text(size = 14)) +
geom_abline(slope = 0, intercept = 120, lty = 3, alpha = 0.8) +
scale_y_continuous(limits = c(-10, 155), breaks = seq(0, 150, 30))
#
#
#
#
#
#log transformed SHP morphine DR
logSHP_morphine_DR <- BPS_morphine_DR_data %>%
filter(Assay == "logSHP") %>%
melt(id = c("Assay")) %>%
filter(!is.na(value)) %>%
mutate(log_latency_correction = (value - 0.9542425) / 1.125)
leveneTest(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
bartlett.test(log_latency_correction ~ factor(Dose), data = logSHP_morphine_DR)
logSHP_morphine_DR$variable <- as.numeric(as.character(logSHP_morphine_DR$variable))
logSHP_morphine_DR <- logSHP_morphine_DR %>% dplyr::rename(Dose = variable)
logSHP_morphine_DR <- logSHP_morphine_DR %>% dplyr::rename(Latency = value)
logmorphine_SHPfit <- drm(log_latency_correction ~ Dose, data = logSHP_morphine_DR,
fct = LL.4(fixed = c(NA, NA, 1, NA),
names = c("Slope","Lower Limit","Upper Limit","ED50")))
logmorphine_SHPline <- expand.grid(Dose = exp(seq(log(max(logSHP_morphine_DR$Dose)),
log(min(logSHP_morphine_DR$Dose)),length=100)))
logmorphine_SHP <- predict(logmorphine_SHPfit,newdata=logmorphine_SHPline,interval="confidence")
logmorphine_SHPline$p <- logmorphine_SHP[,1]
logmorphine_SHPline$pmin <- logmorphine_SHP[,2]
logmorphine_SHPline$pmax <- logmorphine_SHP[,3]
logmorphine_SHP_graph <- ggplot(logSHP_morphine_DR, aes(x = Dose, y = log_latency_correction)) +
geom_point(colour = "black", fill = "black", alpha = 0.25, size = 3) +
geom_line(data = logmorphine_SHPline, aes(x = Dose,y = p)) +
theme_bw() +
labs(title = "Standard hot plate: Morphine",
subtitle = "n = 7 per group", x = "Dose (mg/kg)", y = "Proportion of effect") +
stat_summary(fun.data = mean_sdl, geom = "errorbar", colour = "black", width = 0.25) +
stat_summary(fun.data = mean_se, geom = "errorbar", colour = "seagreen", alpha = 0.75, width = 0.2, size = 1) +
stat_summary(fun.y = mean, geom = "point", colour = "seagreen", alpha = 0.85, size = 3, pch = 15) +
geom_ribbon(data = logmorphine_SHPline, aes(x = Dose,y = p, ymin = pmin, ymax = pmax), alpha = 0.2) +
scale_x_continuous(trans = "log10", breaks = c(0.01, 0.1, 1, 10),
labels =c("Vehicle", "0.1", "1.0", "10")) +
theme(text = element_text(size = 14, family = "Century Gothic"),
axis.text = element_text(size = 14, family = "Century Gothic")) +
geom_abline(slope = 0, intercept = 1, lty = 3, alpha = 0.8) +
scale_y_continuous(limits = c(0, 1.24), breaks = seq(0, 1, 0.2))
ED(logmorphine_SHPfit, c(25, 50, 75), interval = "delta")
#
#
#
#
#
#
#RHP morphine Dose-response graph
RHP_morphine_DR <- BPS_morphine_DR_data %>%
filter(Assay == "logRHP") %>%
melt(id = c("Assay")) %>%
filter(!is.na(value)) %>%
mutate(log_latency_correction = (value - 2.117901) / 0.235)
leveneTest(log_latency_correction ~ factor(Dose), data = RHP_morphine_DR)
bartlett.test(log_latency_correction ~ factor(Dose), data = RHP_morphine_DR)
RHP_morphine_DR$variable <- as.numeric(as.character(RHP_morphine_DR$variable))
RHP_morphine_DR <- RHP_morphine_DR %>% dplyr::rename(Dose = variable)
RHP_morphine_DR <- RHP_morphine_DR %>% dplyr::rename(Latency = value)
morphine_RHPfit <- drm(log_latency_correction ~ Dose, data = RHP_morphine_DR,
fct = LL.4(fixed = c(NA, NA, 1, NA),
names = c("Slope","Lower Limit","Upper Limit","ED50")))
morphine_RHPline <- expand.grid(Dose = exp(seq(log(max(RHP_morphine_DR$Dose)),
log(min(RHP_morphine_DR$Dose)),length=100)))
morphine_RHP <- predict(morphine_RHPfit,newdata=morphine_RHPline,interval="confidence")
morphine_RHPline$p <- morphine_RHP[,1]
morphine_RHPline$pmin <- morphine_RHP[,2]
morphine_RHPline$pmax <- morphine_RHP[,3]
morphine_RHP_graph <- ggplot(RHP_morphine_DR, aes(x = Dose, y = log_latency_correction)) +
geom_point(colour = "black", fill = "black", alpha = 0.25, size = 3) +
geom_line(data = morphine_RHPline, aes(x = Dose,y = p)) +
theme_bw() +
labs(title = "Ramped hot plate: Morphine",
subtitle = "n = 8 per group", x = "Dose (mg/kg)", y = "Proportion of effect") +
stat_summary(fun.data = mean_sdl, geom = "errorbar", colour = "black", width = 0.25) +
stat_summary(fun.data = mean_se, geom = "errorbar", colour = "tomato3", alpha = 0.75, width = 0.2, size = 1) +
stat_summary(fun.y = mean, geom = "point", colour = "tomato3", alpha = 0.85, size = 3, pch = 15) +
geom_ribbon(data = morphine_RHPline, aes(x = Dose,y = p, ymin = pmin, ymax = pmax), alpha = 0.2) +
scale_x_continuous(trans = "log10", breaks = c(0.01, 0.1, 1, 10),
labels =c("Vehicle", "0.1", "1.0", "10")) +
theme(text = element_text(size = 14, family = "Century Gothic"),
axis.text = element_text(size = 14, family = "Century Gothic")) +
geom_abline(slope = 0, intercept = 1, lty = 3, alpha = 0.8) +
scale_y_continuous(limits = c(0, 1.24), breaks = seq(0, 1, 0.2))
ED(morphine_RHPfit, c(25, 50, 75), interval = "delta")
#
#
plot_grid(logmorphine_SHP_graph, morphine_RHP_graph, align = "h")
summary(logmorphine_SHPfit)
summary(morphine_RHPfit)
#
#
#
#log RHP morphine DR
logRHP_morphine_DR <- BPS_morphine_DR_data %>%
filter(Assay == "logRHP") %>%
melt(id = c("Assay")) %>%
filter(!is.na(value))
logRHP_morphine_DR$variable <- as.numeric(as.character(logRHP_morphine_DR$variable))
logRHP_morphine_DR <- logRHP_morphine_DR %>% dplyr::rename(Dose = variable)
logRHP_morphine_DR <- logRHP_morphine_DR %>% dplyr::rename(Latency = value)
logmorphine_RHPfit <- drm(Latency ~ Dose, data = logRHP_morphine_DR,
fct = LL.4(fixed = c(NA, NA, 2.352, NA),
names = c("Slope","Lower Limit","Upper Limit","ED50")))
logmorphine_RHPline <- expand.grid(Dose = exp(seq(log(max(logRHP_morphine_DR$Dose)),
log(min(logRHP_morphine_DR$Dose)),length=100)))
logmorphine_RHP <- predict(logmorphine_RHPfit,newdata=logmorphine_RHPline,interval="confidence")
logmorphine_RHPline$p <- logmorphine_RHP[,1]
logmorphine_RHPline$pmin <- logmorphine_RHP[,2]
logmorphine_RHPline$pmax <- logmorphine_RHP[,3]
logmorphine_RHP_graph <- ggplot(logRHP_morphine_DR, aes(x = Dose, y = Latency)) +
geom_point(colour = "black", fill = "black", alpha = 0.25, size = 3) +
geom_line(data = logmorphine_RHPline, aes(x = Dose,y = p)) +
theme_bw() +
labs(title = "Ramped hot plate: Morphine",
subtitle = "Upper limit constraint = 225; n = 8 per group", x = "Dose (mg/kg)", y = "Log Latency (s)") +
stat_summary(fun.data = mean_sdl, geom = "errorbar", colour = "black", width = 0.25) +
stat_summary(fun.data = mean_se, geom = "errorbar", colour = "orangered4", alpha = 0.5, width = 0.2) +
stat_summary(fun.y = mean, geom = "point", colour = "orangered4", alpha = 0.85, size = 3, pch = 15) +
geom_ribbon(data = logmorphine_RHPline, aes(x = Dose,y = p, ymin = pmin, ymax = pmax), alpha = 0.2) +
scale_x_continuous(trans = "log10", breaks = c(0.01, 0.1, 1, 10),
labels =c("Vehicle", "0.1", "1.0", "10")) +
theme(text = element_text(size = 14),
axis.text = element_text(size = 14)) +
geom_abline(slope = 0, intercept = 2.352, lty = 3, alpha = 0.8) +
scale_y_continuous(limits = c(2.1, 2.41), breaks = seq(2.1, 2.5, 0.1))
modelFit(logmorphine_SHPfit, method = "cum")
modelFit(morphine_SHPfit, method = "cum")
modelFit(morphine_RHPfit, method = "cum")
modelFit(SHPfit, method = "cum")
modelFit(logSHPfit, method = "cum")
modelFit(RHPfit, method = "cum")
modelFit(logmorphine_RHPfit, method = "cum")
RHPmorphine_aov <- aov(Latency ~ Dose, data = RHP_morphine_DR)
plot(RHPmorphine_aov, 3)
|
qcProbes=list(
BSI="^BISULFITE CONVERSION I$",
BSII="^BISULFITE CONVERSION II$",
EC="^EXTENSION$",
SPI="^SPECIFICITY I$",
HYB= "^HYBRIDIZATION$",
NP="^NON-POLYMORPHIC$",
SPII="^SPECIFICITY II$",
TR="^TARGET REMOVAL$",
SC="^STAINING$",
NC="^NEGATIVE$") ## we don't use the normalization controls NORM_A, NORM_G, NORM_C or NORM_T
qcplot <- function(object, plotName, col,
plotType=c("boxplot", "sample", "scatter"),
threshold=NULL, showOutliers, background=FALSE)
{
plotType <- match.arg(plotType)
p <- if(plotName == "MU")
plotMU(object, col, threshold, showOutliers, background)
else if(plotName == "OP")
plotOP(object, col, threshold, showOutliers, background)
else if(plotName == "BS")
plotBS(object, col, threshold, showOutliers, background)
else if(plotName == "HC")
plotHC(object, col, threshold, showOutliers, background)
else if(plotName == "DP")
plotDP(object, col, threshold, showOutliers, background)
else ##if "BSI", "BSII", "HYB", "NP", "EC", "NC", "SC", "TR", "SPI", "SPII"
switch(plotType,
scatter=qcscatterplot(object, plotName, showOutliers),
sample=qcsampleplot(object, plotName, showOutliers),
boxplot=qcboxplot(object, plotName, showOutliers))
if(any(class(p) %in% "ggplot"))
return(invisible(print(p)))
else
return(invisible(p))
}
setHighlight <- function(x, y)
{
location <- get("location", envir=globalenv())
rm(list="location", envir=globalenv())
##scale x and y range
location$x <- (location$x - mean(x, na.rm=TRUE))/sd(x, na.rm=TRUE)
location$y <- (location$y - mean(y, na.rm=TRUE))/sd(y, na.rm=TRUE)
x <- (x - mean(x, na.rm=TRUE))/sd(x, na.rm=TRUE)
y <- (y - mean(y, na.rm=TRUE))/sd(y, na.rm=TRUE)
d <- sqrt((x - location$x)^2 + (y - location$y)^2)
if(length(d) == 0)
return(NULL)
##clicked in empty space remove highlighted
if(min(d, na.rm=TRUE) >
0.05*sqrt(diff(range(x, na.rm=TRUE))^2 + diff(range(y, na.rm=TRUE))^2))
{
if(exists("highlight", envir=globalenv()))
rm(list="highlight", envir=globalenv())
}
else
{
id <- which.min(d)
highlight <- names(x)[id]
assign("highlight", highlight, envir=globalenv())
}
}
getHighLightIndex <- function()
{
get("highlight", envir=globalenv())
}
setOutliers <- function(outliers, type)
{
if(!exists("outliers", envir = globalenv()))
return(NULL)
out <- get("outliers", envir = globalenv())
out[, type] <- FALSE ##reset
out[, type] <- rownames(out) %in% outliers
assign("outliers", out, envir = globalenv())
}
getOutliers <- function(sampleIds)
{
if(!exists("outliers", envir = globalenv()))
return(FALSE)
outliers <- get("outliers", envir = globalenv())
outliers <- rownames(outliers[rowSums(outliers) > 0,, drop=FALSE])
sampleIds %in% outliers
}
prepareData <- function(object)
{
##TODO add logarithm as plot option
R <- log2(object@Rcontrols)
G <- log2(object@Gcontrols)
controls <- object@controls[!(object@controls$Type %in%
c("NORM_A", "NORM_G", "NORM_C", "NORM_T")), ] ##not used yet!
data <- data.frame(Address=rep(rownames(R), ncol(R)),
Samples=rep(colnames(R), each=nrow(R)),
IntRed=as.vector(R),
IntGrn=as.vector(G))
merge(controls, data)
}
##Taken from minfi
##Added: argument na.rm
## as.matrix in case the RGset contains only one sample
detectionP <- function (rgSet, type = "m+u", na.rm = FALSE) {
locusNames <- getManifestInfo(rgSet, "locusNames")
detP <- matrix(NA_real_, ncol=ncol(rgSet), nrow=length(locusNames),
dimnames=list(locusNames, sampleNames(rgSet)))
controlIdx <- getControlAddress(rgSet, controlType="NEGATIVE")
r <- getRed(rgSet)
rBg <- r[controlIdx, ]
rMu <- colMedians(as.matrix(rBg), na.rm = na.rm)
rSd <- colMads(as.matrix(rBg), na.rm = na.rm)
g <- getGreen(rgSet)
gBg <- g[controlIdx, ]
gMu <- colMedians(as.matrix(gBg), na.rm = na.rm)
gSd <- colMads(as.matrix(gBg), na.rm = na.rm)
TypeII <- getProbeInfo(rgSet, type="II")
TypeI.Red <- getProbeInfo(rgSet, type="I-Red")
TypeI.Green <- getProbeInfo(rgSet, type="I-Green")
for (i in 1:ncol(rgSet)) {
intensity <- r[TypeI.Red$AddressA, i] + r[TypeI.Red$AddressB, i]
detP[TypeI.Red$Name, i] <- 1 - pnorm(intensity,
mean=rMu[i] * 2,
sd=rSd[i] * 2)
intensity <- g[TypeI.Green$AddressA, i] + g[TypeI.Green$AddressB, i]
detP[TypeI.Green$Name, i] <- 1 - pnorm(intensity,
mean=gMu[i] *2,
sd=gSd[i] * 2)
intensity <- r[TypeII$AddressA, i] + g[TypeII$AddressA, i]
detP[TypeII$Name, i] <- 1 - pnorm(intensity,
mean=rMu[i] + gMu[i],
sd=rSd[i] + gSd[i])
}
detP
}
|
/R/util.R
|
no_license
|
bbmri-nl/MethylAid
|
R
| false
| false
| 5,159
|
r
|
qcProbes=list(
BSI="^BISULFITE CONVERSION I$",
BSII="^BISULFITE CONVERSION II$",
EC="^EXTENSION$",
SPI="^SPECIFICITY I$",
HYB= "^HYBRIDIZATION$",
NP="^NON-POLYMORPHIC$",
SPII="^SPECIFICITY II$",
TR="^TARGET REMOVAL$",
SC="^STAINING$",
NC="^NEGATIVE$") ## we don't use the normalization controls NORM_A, NORM_G, NORM_C or NORM_T
qcplot <- function(object, plotName, col,
plotType=c("boxplot", "sample", "scatter"),
threshold=NULL, showOutliers, background=FALSE)
{
plotType <- match.arg(plotType)
p <- if(plotName == "MU")
plotMU(object, col, threshold, showOutliers, background)
else if(plotName == "OP")
plotOP(object, col, threshold, showOutliers, background)
else if(plotName == "BS")
plotBS(object, col, threshold, showOutliers, background)
else if(plotName == "HC")
plotHC(object, col, threshold, showOutliers, background)
else if(plotName == "DP")
plotDP(object, col, threshold, showOutliers, background)
else ##if "BSI", "BSII", "HYB", "NP", "EC", "NC", "SC", "TR", "SPI", "SPII"
switch(plotType,
scatter=qcscatterplot(object, plotName, showOutliers),
sample=qcsampleplot(object, plotName, showOutliers),
boxplot=qcboxplot(object, plotName, showOutliers))
if(any(class(p) %in% "ggplot"))
return(invisible(print(p)))
else
return(invisible(p))
}
setHighlight <- function(x, y)
{
location <- get("location", envir=globalenv())
rm(list="location", envir=globalenv())
##scale x and y range
location$x <- (location$x - mean(x, na.rm=TRUE))/sd(x, na.rm=TRUE)
location$y <- (location$y - mean(y, na.rm=TRUE))/sd(y, na.rm=TRUE)
x <- (x - mean(x, na.rm=TRUE))/sd(x, na.rm=TRUE)
y <- (y - mean(y, na.rm=TRUE))/sd(y, na.rm=TRUE)
d <- sqrt((x - location$x)^2 + (y - location$y)^2)
if(length(d) == 0)
return(NULL)
##clicked in empty space remove highlighted
if(min(d, na.rm=TRUE) >
0.05*sqrt(diff(range(x, na.rm=TRUE))^2 + diff(range(y, na.rm=TRUE))^2))
{
if(exists("highlight", envir=globalenv()))
rm(list="highlight", envir=globalenv())
}
else
{
id <- which.min(d)
highlight <- names(x)[id]
assign("highlight", highlight, envir=globalenv())
}
}
getHighLightIndex <- function()
{
get("highlight", envir=globalenv())
}
setOutliers <- function(outliers, type)
{
if(!exists("outliers", envir = globalenv()))
return(NULL)
out <- get("outliers", envir = globalenv())
out[, type] <- FALSE ##reset
out[, type] <- rownames(out) %in% outliers
assign("outliers", out, envir = globalenv())
}
getOutliers <- function(sampleIds)
{
if(!exists("outliers", envir = globalenv()))
return(FALSE)
outliers <- get("outliers", envir = globalenv())
outliers <- rownames(outliers[rowSums(outliers) > 0,, drop=FALSE])
sampleIds %in% outliers
}
prepareData <- function(object)
{
##TODO add logarithm as plot option
R <- log2(object@Rcontrols)
G <- log2(object@Gcontrols)
controls <- object@controls[!(object@controls$Type %in%
c("NORM_A", "NORM_G", "NORM_C", "NORM_T")), ] ##not used yet!
data <- data.frame(Address=rep(rownames(R), ncol(R)),
Samples=rep(colnames(R), each=nrow(R)),
IntRed=as.vector(R),
IntGrn=as.vector(G))
merge(controls, data)
}
##Taken from minfi
##Added: argument na.rm
## as.matrix in case the RGset contains only one sample
detectionP <- function (rgSet, type = "m+u", na.rm = FALSE) {
locusNames <- getManifestInfo(rgSet, "locusNames")
detP <- matrix(NA_real_, ncol=ncol(rgSet), nrow=length(locusNames),
dimnames=list(locusNames, sampleNames(rgSet)))
controlIdx <- getControlAddress(rgSet, controlType="NEGATIVE")
r <- getRed(rgSet)
rBg <- r[controlIdx, ]
rMu <- colMedians(as.matrix(rBg), na.rm = na.rm)
rSd <- colMads(as.matrix(rBg), na.rm = na.rm)
g <- getGreen(rgSet)
gBg <- g[controlIdx, ]
gMu <- colMedians(as.matrix(gBg), na.rm = na.rm)
gSd <- colMads(as.matrix(gBg), na.rm = na.rm)
TypeII <- getProbeInfo(rgSet, type="II")
TypeI.Red <- getProbeInfo(rgSet, type="I-Red")
TypeI.Green <- getProbeInfo(rgSet, type="I-Green")
for (i in 1:ncol(rgSet)) {
intensity <- r[TypeI.Red$AddressA, i] + r[TypeI.Red$AddressB, i]
detP[TypeI.Red$Name, i] <- 1 - pnorm(intensity,
mean=rMu[i] * 2,
sd=rSd[i] * 2)
intensity <- g[TypeI.Green$AddressA, i] + g[TypeI.Green$AddressB, i]
detP[TypeI.Green$Name, i] <- 1 - pnorm(intensity,
mean=gMu[i] *2,
sd=gSd[i] * 2)
intensity <- r[TypeII$AddressA, i] + g[TypeII$AddressA, i]
detP[TypeII$Name, i] <- 1 - pnorm(intensity,
mean=rMu[i] + gMu[i],
sd=rSd[i] + gSd[i])
}
detP
}
|
library(ggplot2)
data <- function(){
data_df[,-1:-2]
}
regress_plot <- function(predictorx, responsey, years){
data_df <- data_df[,-1:-2]
data_df <- data_df[data_df$Year>=as.numeric(years),]
if (responsey=="wins"){
qplot(data=data_df,x=as.numeric(data_df[,as.numeric(predictorx)]),y=as.numeric(Wins),color=Wins,
main=paste("Analysis of the San Francisco 49ers Wins \n During the Given Years"), xlab="Predictor Variable",
ylab="Wins", geom=c("point","smooth"))
}
else if (responsey=="losses"){
qplot(data=data_df,x=as.numeric(data_df[,as.numeric(predictorx)]),y=as.numeric(Losses),color=Losses,
main=paste("Analysis of the San Francisco 49ers Losses \n During the Given Years"), xlab="Predictor Variable",
ylab="Losses", geom=c("point","smooth"))
}
}
summa <- function(x, y, years){
data_df <- data_df[,-1:-2]
data_df <- data_df[data_df$Year>=as.numeric(years),]
if (y=="wins"){
line <- lm(as.numeric(Wins) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
print(summary(line))
}
else if (y=="losses"){
line <- lm(as.numeric(Losses) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
print(summary(line))
}
}
residual <- function(x, y, years){
data_df <- data_df[,-1:-2]
data_df <- data_df[data_df$Year>=as.numeric(years),]
par(mfrow=c(1,2), mar=c(5,5,1,1))
if (y=="wins"){
line <- lm(as.numeric(Wins) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
plot(as.numeric(data_df[,as.numeric(x)]), resid(line), main="Residuals Scatterplot", xlab="Predictor Variable", col="red",
ylab="Residuals", pch=19)
abline(h = 0, lty = 2)
hist(resid(line), col="blue", xlab="Residual Value", main="Histogram of Residuals")
}
else if (y=="losses"){
line <- lm(as.numeric(Losses) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
plot(as.numeric(data_df[,as.numeric(x)]), resid(line), main="Residuals Scatterplot", xlab="Predictor Variable", col="red",
ylab="Residuals", pch=19)
abline(h = 0, lty = 2)
hist(resid(line), col="blue", xlab="Residual Value", main="Histogram of Residuals")
}
}
residual2 <- function(x, y, years){
data_df <- data_df[,-1:-2]
data_df <- data_df[data_df$Year>=as.numeric(years),]
if (y=="wins"){
line <- lm(as.numeric(Wins) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
qqnorm(rstandard(line), col="red")
qqline(rstandard(line))
}
else if (y=="losses"){
line <- lm(as.numeric(Losses) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
qqnorm(rstandard(line), col="red", pch=19)
qqline(rstandard(line))
}
}
shinyServer(
function(input, output){
output$data_table <- renderDataTable({data()})
output$regression_plot <- renderPlot({regress_plot(input$Predictors, input$Response, input$Years)})
output$Summary <- renderPrint({summa(input$Predictors, input$Response, input$Years)})
output$residual_plot <- renderPlot({residual(input$Predictors, input$Response, input$Years)})
output$residual_plot2 <- renderPlot({residual2(input$Predictors, input$Response, input$Years)})
})
|
/R Programming project/server.R
|
no_license
|
SVG23/SoftwareDevelopment
|
R
| false
| false
| 3,297
|
r
|
library(ggplot2)
data <- function(){
data_df[,-1:-2]
}
regress_plot <- function(predictorx, responsey, years){
data_df <- data_df[,-1:-2]
data_df <- data_df[data_df$Year>=as.numeric(years),]
if (responsey=="wins"){
qplot(data=data_df,x=as.numeric(data_df[,as.numeric(predictorx)]),y=as.numeric(Wins),color=Wins,
main=paste("Analysis of the San Francisco 49ers Wins \n During the Given Years"), xlab="Predictor Variable",
ylab="Wins", geom=c("point","smooth"))
}
else if (responsey=="losses"){
qplot(data=data_df,x=as.numeric(data_df[,as.numeric(predictorx)]),y=as.numeric(Losses),color=Losses,
main=paste("Analysis of the San Francisco 49ers Losses \n During the Given Years"), xlab="Predictor Variable",
ylab="Losses", geom=c("point","smooth"))
}
}
summa <- function(x, y, years){
data_df <- data_df[,-1:-2]
data_df <- data_df[data_df$Year>=as.numeric(years),]
if (y=="wins"){
line <- lm(as.numeric(Wins) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
print(summary(line))
}
else if (y=="losses"){
line <- lm(as.numeric(Losses) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
print(summary(line))
}
}
residual <- function(x, y, years){
data_df <- data_df[,-1:-2]
data_df <- data_df[data_df$Year>=as.numeric(years),]
par(mfrow=c(1,2), mar=c(5,5,1,1))
if (y=="wins"){
line <- lm(as.numeric(Wins) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
plot(as.numeric(data_df[,as.numeric(x)]), resid(line), main="Residuals Scatterplot", xlab="Predictor Variable", col="red",
ylab="Residuals", pch=19)
abline(h = 0, lty = 2)
hist(resid(line), col="blue", xlab="Residual Value", main="Histogram of Residuals")
}
else if (y=="losses"){
line <- lm(as.numeric(Losses) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
plot(as.numeric(data_df[,as.numeric(x)]), resid(line), main="Residuals Scatterplot", xlab="Predictor Variable", col="red",
ylab="Residuals", pch=19)
abline(h = 0, lty = 2)
hist(resid(line), col="blue", xlab="Residual Value", main="Histogram of Residuals")
}
}
residual2 <- function(x, y, years){
data_df <- data_df[,-1:-2]
data_df <- data_df[data_df$Year>=as.numeric(years),]
if (y=="wins"){
line <- lm(as.numeric(Wins) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
qqnorm(rstandard(line), col="red")
qqline(rstandard(line))
}
else if (y=="losses"){
line <- lm(as.numeric(Losses) ~ as.numeric(data_df[,as.numeric(x)]), data=data_df)
qqnorm(rstandard(line), col="red", pch=19)
qqline(rstandard(line))
}
}
shinyServer(
function(input, output){
output$data_table <- renderDataTable({data()})
output$regression_plot <- renderPlot({regress_plot(input$Predictors, input$Response, input$Years)})
output$Summary <- renderPrint({summa(input$Predictors, input$Response, input$Years)})
output$residual_plot <- renderPlot({residual(input$Predictors, input$Response, input$Years)})
output$residual_plot2 <- renderPlot({residual2(input$Predictors, input$Response, input$Years)})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exports.R
\name{md.survcox}
\alias{md.survcox}
\title{Fit a proportional hazards regression model over disease recurrence data
with missing information of possible deaths}
\usage{
md.survcox(data, f, maxtime, D, ratetable, iterations = 4, R = 50)
}
\arguments{
\item{data}{a data.frame in which to interpret the variables named in the
formula.}
\item{f}{a formula object, with the response on the left of a ~ operator,
and the terms on the right. The response must be a survival object as
returned by the \code{Surv} function.}
\item{maxtime}{maximum potential observation time (number of days).
where \code{status}=0 equals \code{time}.
where \code{status}=1 equals potential time of right censoring if no event
would be observed.}
\item{D}{demographic information compatible with \code{ratetable}, see
\code{\link{md.D}}.}
\item{ratetable}{a population mortality table, default is \code{slopop}}
\item{iterations}{the number of iteration steps to be performed, default is
4}
\item{R}{the number of multiple imputations performed to adjust the
estimated variance of estimates, default is 50.}
}
\value{
if \code{R} equals 1 then an object of class
\code{\link[survival]{coxph.object}} representing the fit.
if \code{R} > 1 then the result of the \code{\link[mitools]{MIcombine}} of
the \code{coxph} objects.
}
\description{
An iterative approach is used in this method to estimate the conditional
distribution required to correctly impute the times of deaths using
population mortality tables.\cr\cr
Note, that simply imputing expected survival times may seem intuitive,
but does not give unbiased estimates, since the right censored individuals
are not a random subsample of the patients.
}
\examples{
\dontrun{
library(missDeaths)
data(slopop)
data(observed)
observed$time = observed$time*365.2425
D = md.D(age=observed$age*365.2425, sex=observed$sex, year=(observed$year - 1970)*365.2425)
#fit a cox model (NOTE: estimated std error is slightly underestimated!)
md.survcox(observed, Surv(time, status) ~ age + sex + iq + elevation,
observed$maxtime*365.2425, D, slopop, iterations=4, R=1)
#multiple imputations to correct the stimated std error
md.survcox(observed, Surv(time, status) ~ age + sex + iq + elevation,
observed$maxtime*365.2425, D, slopop, iterations=4, R=50)
}
}
\references{
Stupnik T., Pohar Perme M. (2015) "Analysing disease recurrence
with missing at risk information." Statistics in Medicine 35. p1130-43.
\url{https://onlinelibrary.wiley.com/doi/abs/10.1002/sim.6766}
}
\seealso{
\code{\link{md.impute}}, \code{\link[mitools]{MIcombine}}
}
|
/man/md.survcox.Rd
|
no_license
|
cran/missDeaths
|
R
| false
| true
| 2,746
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exports.R
\name{md.survcox}
\alias{md.survcox}
\title{Fit a proportional hazards regression model over disease recurrence data
with missing information of possible deaths}
\usage{
md.survcox(data, f, maxtime, D, ratetable, iterations = 4, R = 50)
}
\arguments{
\item{data}{a data.frame in which to interpret the variables named in the
formula.}
\item{f}{a formula object, with the response on the left of a ~ operator,
and the terms on the right. The response must be a survival object as
returned by the \code{Surv} function.}
\item{maxtime}{maximum potential observation time (number of days).
where \code{status}=0 equals \code{time}.
where \code{status}=1 equals potential time of right censoring if no event
would be observed.}
\item{D}{demographic information compatible with \code{ratetable}, see
\code{\link{md.D}}.}
\item{ratetable}{a population mortality table, default is \code{slopop}}
\item{iterations}{the number of iteration steps to be performed, default is
4}
\item{R}{the number of multiple imputations performed to adjust the
estimated variance of estimates, default is 50.}
}
\value{
if \code{R} equals 1 then an object of class
\code{\link[survival]{coxph.object}} representing the fit.
if \code{R} > 1 then the result of the \code{\link[mitools]{MIcombine}} of
the \code{coxph} objects.
}
\description{
An iterative approach is used in this method to estimate the conditional
distribution required to correctly impute the times of deaths using
population mortality tables.\cr\cr
Note, that simply imputing expected survival times may seem intuitive,
but does not give unbiased estimates, since the right censored individuals
are not a random subsample of the patients.
}
\examples{
\dontrun{
library(missDeaths)
data(slopop)
data(observed)
observed$time = observed$time*365.2425
D = md.D(age=observed$age*365.2425, sex=observed$sex, year=(observed$year - 1970)*365.2425)
#fit a cox model (NOTE: estimated std error is slightly underestimated!)
md.survcox(observed, Surv(time, status) ~ age + sex + iq + elevation,
observed$maxtime*365.2425, D, slopop, iterations=4, R=1)
#multiple imputations to correct the stimated std error
md.survcox(observed, Surv(time, status) ~ age + sex + iq + elevation,
observed$maxtime*365.2425, D, slopop, iterations=4, R=50)
}
}
\references{
Stupnik T., Pohar Perme M. (2015) "Analysing disease recurrence
with missing at risk information." Statistics in Medicine 35. p1130-43.
\url{https://onlinelibrary.wiley.com/doi/abs/10.1002/sim.6766}
}
\seealso{
\code{\link{md.impute}}, \code{\link[mitools]{MIcombine}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/facet_nodes.R
\name{facet_nodes}
\alias{facet_nodes}
\title{Create small multiples based on node attributes}
\usage{
facet_nodes(facets, nrow = NULL, ncol = NULL, scales = "fixed",
shrink = TRUE, labeller = "label_value", as.table = TRUE,
switch = NULL, drop = TRUE, dir = "h", strip.position = "top")
}
\arguments{
\item{facets}{A set of variables or expressions quoted by \code{\link[=vars]{vars()}}
and defining faceting groups on the rows or columns dimension.
The variables can be named (the names are passed to \code{labeller}).
For compatibility with the classic interface, can also be a
formula or character vector. Use either a one sided formula, \code{~a + b},
or a character vector, \code{c("a", "b")}.}
\item{nrow}{Number of rows and columns.}
\item{ncol}{Number of rows and columns.}
\item{scales}{Should scales be fixed (\code{"fixed"}, the default),
free (\code{"free"}), or free in one dimension (\code{"free_x"},
\code{"free_y"})?}
\item{shrink}{If \code{TRUE}, will shrink scales to fit output of
statistics, not raw data. If \code{FALSE}, will be range of raw data
before statistical summary.}
\item{labeller}{A function that takes one data frame of labels and
returns a list or data frame of character vectors. Each input
column corresponds to one factor. Thus there will be more than
one with formulae of the type \code{~cyl + am}. Each output
column gets displayed as one separate line in the strip
label. This function should inherit from the "labeller" S3 class
for compatibility with \code{\link[=labeller]{labeller()}}. See
\code{\link[=label_value]{label_value()}} for more details and pointers to other
options.}
\item{as.table}{If \code{TRUE}, the default, the facets are laid out like
a table with highest values at the bottom-right. If \code{FALSE}, the
facets are laid out like a plot with the highest value at the top-right.}
\item{switch}{By default, the labels are displayed on the top and
right of the plot. If \code{"x"}, the top labels will be
displayed to the bottom. If \code{"y"}, the right-hand side
labels will be displayed to the left. Can also be set to
\code{"both"}.}
\item{drop}{If \code{TRUE}, the default, all factor levels not used in the
data will automatically be dropped. If \code{FALSE}, all factor levels
will be shown, regardless of whether or not they appear in the data.}
\item{dir}{Direction: either \code{"h"} for horizontal, the default, or \code{"v"},
for vertical.}
\item{strip.position}{By default, the labels are displayed on the top of
the plot. Using \code{strip.position} it is possible to place the labels on
either of the four sides by setting \code{strip.position = c("top",
"bottom", "left", "right")}}
}
\description{
This function is equivalent to \code{\link[ggplot2:facet_wrap]{ggplot2::facet_wrap()}} but only
facets nodes. Edges are drawn if their terminal nodes are both present in a
panel.
}
\examples{
library(tidygraph)
gr <- as_tbl_graph(highschool) \%>\%
mutate(popularity = as.character(cut(centrality_degree(mode = 'in'),
breaks = 3,
labels = c('low', 'medium', 'high')
)))
ggraph(gr) +
geom_edge_link() +
geom_node_point() +
facet_nodes(~popularity)
}
\seealso{
Other ggraph-facets: \code{\link{facet_edges}},
\code{\link{facet_graph}}
}
\concept{ggraph-facets}
|
/man/facet_nodes.Rd
|
permissive
|
schochastics/ggraph
|
R
| false
| true
| 3,365
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/facet_nodes.R
\name{facet_nodes}
\alias{facet_nodes}
\title{Create small multiples based on node attributes}
\usage{
facet_nodes(facets, nrow = NULL, ncol = NULL, scales = "fixed",
shrink = TRUE, labeller = "label_value", as.table = TRUE,
switch = NULL, drop = TRUE, dir = "h", strip.position = "top")
}
\arguments{
\item{facets}{A set of variables or expressions quoted by \code{\link[=vars]{vars()}}
and defining faceting groups on the rows or columns dimension.
The variables can be named (the names are passed to \code{labeller}).
For compatibility with the classic interface, can also be a
formula or character vector. Use either a one sided formula, \code{~a + b},
or a character vector, \code{c("a", "b")}.}
\item{nrow}{Number of rows and columns.}
\item{ncol}{Number of rows and columns.}
\item{scales}{Should scales be fixed (\code{"fixed"}, the default),
free (\code{"free"}), or free in one dimension (\code{"free_x"},
\code{"free_y"})?}
\item{shrink}{If \code{TRUE}, will shrink scales to fit output of
statistics, not raw data. If \code{FALSE}, will be range of raw data
before statistical summary.}
\item{labeller}{A function that takes one data frame of labels and
returns a list or data frame of character vectors. Each input
column corresponds to one factor. Thus there will be more than
one with formulae of the type \code{~cyl + am}. Each output
column gets displayed as one separate line in the strip
label. This function should inherit from the "labeller" S3 class
for compatibility with \code{\link[=labeller]{labeller()}}. See
\code{\link[=label_value]{label_value()}} for more details and pointers to other
options.}
\item{as.table}{If \code{TRUE}, the default, the facets are laid out like
a table with highest values at the bottom-right. If \code{FALSE}, the
facets are laid out like a plot with the highest value at the top-right.}
\item{switch}{By default, the labels are displayed on the top and
right of the plot. If \code{"x"}, the top labels will be
displayed to the bottom. If \code{"y"}, the right-hand side
labels will be displayed to the left. Can also be set to
\code{"both"}.}
\item{drop}{If \code{TRUE}, the default, all factor levels not used in the
data will automatically be dropped. If \code{FALSE}, all factor levels
will be shown, regardless of whether or not they appear in the data.}
\item{dir}{Direction: either \code{"h"} for horizontal, the default, or \code{"v"},
for vertical.}
\item{strip.position}{By default, the labels are displayed on the top of
the plot. Using \code{strip.position} it is possible to place the labels on
either of the four sides by setting \code{strip.position = c("top",
"bottom", "left", "right")}}
}
\description{
This function is equivalent to \code{\link[ggplot2:facet_wrap]{ggplot2::facet_wrap()}} but only
facets nodes. Edges are drawn if their terminal nodes are both present in a
panel.
}
\examples{
library(tidygraph)
gr <- as_tbl_graph(highschool) \%>\%
mutate(popularity = as.character(cut(centrality_degree(mode = 'in'),
breaks = 3,
labels = c('low', 'medium', 'high')
)))
ggraph(gr) +
geom_edge_link() +
geom_node_point() +
facet_nodes(~popularity)
}
\seealso{
Other ggraph-facets: \code{\link{facet_edges}},
\code{\link{facet_graph}}
}
\concept{ggraph-facets}
|
library(qgtools)
### Name: adc.simudata
### Title: An R function to generate an ADC model simulated data set
### Aliases: adc.simudata
### Keywords: ADC model cotton simuated data cotf2
### ** Examples
library(qgtools)
data(cotf2)
Ped=cotf2[,c(1:5)]
Y=cotf2[,-c(1:5)]
YS=adc.simudata(Y,Ped,v=rep(20,9),b=c(100))
##End
|
/data/genthat_extracted_code/qgtools/examples/adc.simudata.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 335
|
r
|
library(qgtools)
### Name: adc.simudata
### Title: An R function to generate an ADC model simulated data set
### Aliases: adc.simudata
### Keywords: ADC model cotton simuated data cotf2
### ** Examples
library(qgtools)
data(cotf2)
Ped=cotf2[,c(1:5)]
Y=cotf2[,-c(1:5)]
YS=adc.simudata(Y,Ped,v=rep(20,9),b=c(100))
##End
|
### This script takes the 1000 EWAS permutations and for each site calculates its average ranking
# Setting up
setwd("../Results")
filenames<-paste("../../Data/EWASPermutations/EWASPermutations100Num", seq(1,10,1), ".rdata", sep="")
allres<-matrix(ncol=0, nrow=804826)
for (f in 1:10){
filename<-filenames[f]
load(filename)
allres<-cbind(allres, res)
}
dim(allres)
#[1] 804826 1000
# Looping through each EWAS and ranking the sites by their p-value
ranks<-matrix(NA, ncol=ncol(allres), nrow=nrow(allres))
rownames(ranks)<-rownames(allres)
for(i in 1:ncol(allres)){
perm<-allres[,i]
ranks[,i]<-order(perm)
}
# Finding average rank for each site
avrank<-rowMeans(ranks)
# Saving
write.csv(avrank, "AvPermuationsRank.csv")
|
/6.CalcAverageRank.r
|
no_license
|
ejh243/EPICStatsPaper
|
R
| false
| false
| 737
|
r
|
### This script takes the 1000 EWAS permutations and for each site calculates its average ranking
# Setting up
setwd("../Results")
filenames<-paste("../../Data/EWASPermutations/EWASPermutations100Num", seq(1,10,1), ".rdata", sep="")
allres<-matrix(ncol=0, nrow=804826)
for (f in 1:10){
filename<-filenames[f]
load(filename)
allres<-cbind(allres, res)
}
dim(allres)
#[1] 804826 1000
# Looping through each EWAS and ranking the sites by their p-value
ranks<-matrix(NA, ncol=ncol(allres), nrow=nrow(allres))
rownames(ranks)<-rownames(allres)
for(i in 1:ncol(allres)){
perm<-allres[,i]
ranks[,i]<-order(perm)
}
# Finding average rank for each site
avrank<-rowMeans(ranks)
# Saving
write.csv(avrank, "AvPermuationsRank.csv")
|
library(shiny)
library(datasets)
# Define server logic requirments
shinyServer(function(input, output) {
# Compute the forumla text in a reactive expression
# Generate a plot of the requested variable against mpg and only
# include outliers if requested
output$semeionplot<- renderPlot(plot(history))
})
|
/ADS_FINAL/shinybasicfiles/server.R
|
no_license
|
manethochen/PrudentialReport
|
R
| false
| false
| 338
|
r
|
library(shiny)
library(datasets)
# Define server logic requirments
shinyServer(function(input, output) {
# Compute the forumla text in a reactive expression
# Generate a plot of the requested variable against mpg and only
# include outliers if requested
output$semeionplot<- renderPlot(plot(history))
})
|
library(raster)
prj <- "+proj=stere +lat_0=90 +lat_ts=71 +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"
src <- readAll(raadtools::readtopo("gebco_14")) ##, xylim = extent(-180, 180, -90, 0))
r <- raster(projectExtent(raster(extent(-180, 180, 5, 90), crs = "+init=epsg:4326"), prj))
## cleanup and rebuild
r <- raster(spex::buffer_extent(r, 16000), crs = prj)
res(r) <- 16000
Bathy <- projectRaster(src, r)
dataType(Bathy) <- "INT2S"
Bathy <- setValues(Bathy, as.integer(values(Bathy)))
usethis::use_data(Bathy)
|
/data-raw/Bathy.R
|
no_license
|
mdsumner/NOmap
|
R
| false
| false
| 558
|
r
|
library(raster)
prj <- "+proj=stere +lat_0=90 +lat_ts=71 +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"
src <- readAll(raadtools::readtopo("gebco_14")) ##, xylim = extent(-180, 180, -90, 0))
r <- raster(projectExtent(raster(extent(-180, 180, 5, 90), crs = "+init=epsg:4326"), prj))
## cleanup and rebuild
r <- raster(spex::buffer_extent(r, 16000), crs = prj)
res(r) <- 16000
Bathy <- projectRaster(src, r)
dataType(Bathy) <- "INT2S"
Bathy <- setValues(Bathy, as.integer(values(Bathy)))
usethis::use_data(Bathy)
|
#' Get the number of efficacy events seen at the doses under investigation.
#'
#' @param x An R object of class \code{"dose_finding_fit"}
#' @param dose Optional integer, at which dose-level? Omit to get data on all doses.
#' @param ... arguments passed to other methods
#'
#' @return integer vector
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # EffTox example
#' x <- stan_efftox_demo(outcome_str = '1N 2E')
#' eff_at_dose(fit) # c(0, 1, 0, 0)
#' eff_at_dose(fit, dose = 2) # 1
#' eff_at_dose(fit, dose = 3) # 0
#' }
eff_at_dose <- function(x, dose, ...) {
UseMethod('eff_at_dose')
}
#' @rdname eff_at_dose
#' @export
eff_at_dose.efftox_fit <- function(x, dose = NULL, ...) {
if(is.null(dose))
sapply(x$dose_indices, function(i) sum(x$eff[x$doses == i]))
else
sum(x$eff[x$doses == dose])
}
|
/R/eff_at_dose.R
|
no_license
|
brockk/trialr
|
R
| false
| false
| 822
|
r
|
#' Get the number of efficacy events seen at the doses under investigation.
#'
#' @param x An R object of class \code{"dose_finding_fit"}
#' @param dose Optional integer, at which dose-level? Omit to get data on all doses.
#' @param ... arguments passed to other methods
#'
#' @return integer vector
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # EffTox example
#' x <- stan_efftox_demo(outcome_str = '1N 2E')
#' eff_at_dose(fit) # c(0, 1, 0, 0)
#' eff_at_dose(fit, dose = 2) # 1
#' eff_at_dose(fit, dose = 3) # 0
#' }
eff_at_dose <- function(x, dose, ...) {
UseMethod('eff_at_dose')
}
#' @rdname eff_at_dose
#' @export
eff_at_dose.efftox_fit <- function(x, dose = NULL, ...) {
if(is.null(dose))
sapply(x$dose_indices, function(i) sum(x$eff[x$doses == i]))
else
sum(x$eff[x$doses == dose])
}
|
dget(fake.dat, "fakedat.txt")
library(runjags)
sim_test <- run.jags("model.txt", data = fake.dat$jags.data,
monitor = fake.dat$jags.pars, adapt = 100, n.chains = 2, sample = 200, burnin = 0,
inits = fake.dat$jags.inits, method = "parallel")
#this runs fine when line 61 is commented out. The minute it is included, the error "SimpleRange:leftoffset" appears.
tt <- as.matrix(sim_test$mcmc)
min(tt); max(tt)
|
/Runcode.R
|
no_license
|
heathergaya/modelproblem
|
R
| false
| false
| 456
|
r
|
dget(fake.dat, "fakedat.txt")
library(runjags)
sim_test <- run.jags("model.txt", data = fake.dat$jags.data,
monitor = fake.dat$jags.pars, adapt = 100, n.chains = 2, sample = 200, burnin = 0,
inits = fake.dat$jags.inits, method = "parallel")
#this runs fine when line 61 is commented out. The minute it is included, the error "SimpleRange:leftoffset" appears.
tt <- as.matrix(sim_test$mcmc)
min(tt); max(tt)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/matstat.R
\name{rollFun}
\alias{rollFun}
\title{Compute rolling (a.k.a. moving) window statistics}
\usage{
rollFun(dat, width, FUN, force_rollapply = FALSE, ...)
}
\arguments{
\item{dat}{a numeric vector, matrix or data.frame. In the latter cases
rolling statistics are computed column-wise.}
\item{width}{width of moving window; can be an integer value or vector.}
\item{FUN}{the function to be applied to compute moving window statistics.
See details.}
\item{force_rollapply}{logical variable; if yes, \code{zoo::rollapply} is
called (default = FALSE).}
\item{...}{optional arguments to the corresponding function in \pkg{caTools}
or \code{zoo::rollapply}}
}
\value{
An object having the same attributes as dat.
}
\description{
\code{rollFun} computes rolling window statistics on vectors or matrices.
}
\details{
If FUN is one of \code{min}, \code{max}, \code{mean}, \code{sd},
\code{mad}, \code{quantile} (OR "min", "max", "mean", etc.) \code{rollFun}
calls the corresponding function from the \pkg{caTools} package (e.g.
\code{caTools::runmin}). Otherwise, or if \code{force_rollapply} is TRUE,
\code{zoo::rollapply} is called.
}
|
/man/rollFun.Rd
|
no_license
|
kapilsaxena33/eegR
|
R
| false
| false
| 1,226
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/matstat.R
\name{rollFun}
\alias{rollFun}
\title{Compute rolling (a.k.a. moving) window statistics}
\usage{
rollFun(dat, width, FUN, force_rollapply = FALSE, ...)
}
\arguments{
\item{dat}{a numeric vector, matrix or data.frame. In the latter cases
rolling statistics are computed column-wise.}
\item{width}{width of moving window; can be an integer value or vector.}
\item{FUN}{the function to be applied to compute moving window statistics.
See details.}
\item{force_rollapply}{logical variable; if yes, \code{zoo::rollapply} is
called (default = FALSE).}
\item{...}{optional arguments to the corresponding function in \pkg{caTools}
or \code{zoo::rollapply}}
}
\value{
An object having the same attributes as dat.
}
\description{
\code{rollFun} computes rolling window statistics on vectors or matrices.
}
\details{
If FUN is one of \code{min}, \code{max}, \code{mean}, \code{sd},
\code{mad}, \code{quantile} (OR "min", "max", "mean", etc.) \code{rollFun}
calls the corresponding function from the \pkg{caTools} package (e.g.
\code{caTools::runmin}). Otherwise, or if \code{force_rollapply} is TRUE,
\code{zoo::rollapply} is called.
}
|
library(DAAG)
library(ridge)
panel.cor <- function(x, y, digits=2, prefix="", cex.cor)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.8/strwidth(txt)
test <- cor.test(x,y)
# borrowed from printCoefmat
Signif <- symnum(test$p.value, corr = FALSE, na = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", ".", " "))
text(0.5, 0.5, txt, cex = 2)
text(.8, .8, Signif, cex=cex, col=2)
}
setwd("/dmine/data/USDA/agmesh-scenarios/palouse/summaries3/")
#files <- list.files(pattern = "\\_WHEAT_drought$")
#myfiles = do.call(rbind, lapply(files, function(x)
myfiles <- read.csv("1989-2015_combined_revised.csv")
#names(myfiles)[19] <- c("year")
myfiles$prpet <- (myfiles$pr - myfiles$pet)
write.csv(myfiles, file = "WHEAT_drought_summary")
setwd("/dmine/data/USDA/agmesh-scenarios/palouse/summaries3/")
myfiles <- read.csv("1989-2015_combined_revised.csv", strip.white=TRUE)
#setwd("/dmine/data/USDA/agmesh-scenarios/Washington/summaries/")
#myfiles2 <- read.csv("2001_2015_usda_gridmet_Washington", strip.white=TRUE)
#setwd("/dmine/data/USDA/agmesh-scenarios/Oregon/summaries/")
#myfiles3 <- read.csv("2001_2015_usda_gridmet_Oregon", strip.white=TRUE)
#myfile4 <- rbind(myfiles1,myfiles2,myfiles3)
myfiles_allyears <- subset(myfiles, , c(pr, pdsi, pet, prpet, tmmx, erc, soil_moisture_shorterm, soil_moisture_longterm, loss, count, countratio, county, commodity, damagecause, year))
myfiles_allyears$county <- factor(myfiles_allyears$county)
myfiles_allyears$year <- factor(myfiles_allyears$year)
myfiles_allyears$loss <- scale(myfiles_allyears$loss, center = TRUE, scale = FALSE)
myfiles_allyears[1:7] <- scale(myfiles_allyears[1:7], center = TRUE, scale = TRUE)
#--allyears pairwise plot
#--countratio
myfiles_allyears <- subset(myfiles_allyears, county =="Whitman")
myfiles_whitman <- subset(myfiles_allyears, damagecause =="Heat" | damagecause == "Drought" | damagecause == "Failure Irrig Supply" | damagecause == "Hot Wind")
myfiles_f <- subset(myfiles_whitman, commodity =="WHEAT")
#pairs(data.matrix(myfiles_allyears[c(1,2,3,4,5,6)]), lower.panel=panel.smooth, upper.panel=panel.cor)
pairs(count ~ pr + pdsi + pet + prpet + erc + tmmx + soil_moisture_shorterm + soil_moisture_longterm, lower.panel=panel.smooth, upper.panel=panel.cor, data=myfiles_f, main="1989-2015 WHEAT Drought Whitman County, Count")
dev.off()
#-loss
dev.off()
pairs(myfiles_allyears[c(1,2,3,4,5,6,7)], lower.panel=panel.smooth, upper.panel=panel.cor)
dev.off()
#---only 2008
myfiles_2008 <- subset(data.frame(myfiles_allyears), year == "2008")
#---only 2009
myfiles_2009 <- subset(data.frame(myfiles_allyears), year == "2009")
#--only 2015
myfiles_2015 <- subset(data.frame(myfiles_allyears), year == "2015")
#--some linear models
dev.off()
plot(lm(count ~ pr+pdsi+pet+prpet+tmmx+soil_moisture_shorterm*count, data=myfiles_f), panel = panel.smooth)
dev.off()
layout(matrix(c(1,2,3,4,5,6),3,2)) # optional 4 graphs/page
lmcount <- lm(count ~ pr+pdsi+pet+tmmx+soil_moisture_shorterm*year, data=myfiles_f)
plot(lmcount, which = 1:6, panel = panel.smooth)
mtext("2007-2015 Palouse Regression pr+pdsi+prpet+tmmx*year", side = 3, line = -2, outer = TRUE)
dev.off()
#--loss and acre ranges as variables change over the length of the dataset
layout(matrix(c(1,2,3,4,5,6),3,2)) # optional 4 graphs/page
library(effects)
model.lm <- lm(formula=count ~ pr+soil_moisture_shorterm+pet+erc+tmmx*year,data=myfiles_allyears)
plot(effect(term="year",mod=model.lm,default.levels=20),multiline=TRUE, las = 2)
dev.off()
library(effects)
model.lm <- lm(formula=loss ~ pr+pet+erc+tmmx*year,data=myfiles_allyears)
plot(effect(term="year",mod=model.lm,default.levels=20),multiline=TRUE)
dev.off()
#--multiple regression 2009
fit <- lm(loss ~ pr + pet + soil_moisture_shorterm + tmmx, data=myfiles_f)
coefficients(fit) # model coefficients
confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
influence(fit) # regression diagnostics
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
summary(fit)
#--multiple regression 2008
fit <- lm(loss ~ pr + pet + prpet + tmmx, data=myfiles_2008)
coefficients(fit) # model coefficients
confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
influence(fit) # regression diagnostics
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
#--multiple regression 2015
fit <- lm(loss ~ pr + pet + prpet + tmmx, data=myfiles_2015)
coefficients(fit) # model coefficients
confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
influence(fit) # regression diagnostics
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
#-3fold cross validation with
dev.off()
#--cv.lm for all three years compared
layout(matrix(c(1,2,3,4,5,6),3,2))
#---Multicollinearity test
fit08_VIF <- vif(lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears))
fit08 <- lm(loss ~ erc + pr + pet + pdsi + soil_moisture_longterm + tmmx + soil_moisture_shorterm, data=myfiles_f)
cv.lm(data=myfiles_whitman, fit08, main = "Wheat Whitman, Heat/Drought/Hot Wind/Failed Irrig 1989-2015") # 3 fold cross-validation
lm(data=myfiles_whitman, fit08, main = "Wheat loss regression 2008") # 3 fold cross-validation
#---Multicollinearity test
fit09_VIF <- VIF(lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_2009))
fit09 <- lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_2009)
cv.lm(data=myfiles_2009, fit09, m=3, main = "Wheat loss regression 2009")
#---Multicollinearity test
fit15_VIF <- VIF(lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_2015))
fit15 <- lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_2015)
cv.lm(data=myfiles_2015, fit15, m=3, main = "Wheat loss regression 2015")
text <- capture.output(summary(fit08))
textplot(text, cex=.8, halign="right", valign="center")
text <- capture.output(summary(fit09))
textplot(text, cex=.8, halign="right", valign="center")
text <- capture.output(summary(fit15))
textplot(text, cex=.8, halign="right", valign="center")
dev.off()
layout(matrix(c(1,2,3,4),2,2))
#---Multicollinearity test all files
fit_VIF_loss <- VIF(lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears))
fitallyears_loss <- lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears)
cv.lm(data=myfiles_allyears, fitallyears_loss, m=3, main = "Wheat loss regression 2007-2015")
fit_VIF_countratio <- VIF(lm(countratio ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears))
fitallyears_count <- lm(countratio ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears)
cv.lm(data=myfiles_allyears, fitallyears_count, m=3, main = "Wheat count ratio regression 2007-2015")
text <- capture.output(summary(fitallyears_loss))
textplot(text, cex=.8, halign="right", valign="center")
text <- capture.output(summary(fitallyears_count))
textplot(text, cex=.8, halign="right", valign="center")
#--manova
dev.off()
manova_allyears <- manova(cbind(pr, pdsi, prpet) ~ year + commodity, data = myfiles_allyears)
summary(manova_allyears)
#--are counties and years significantly different between climate variables?
summary.aov(manova_allyears)
#--interaction between
dev.off()
layout(matrix(c(1,2,3,4),2,1)) # optional 4 graphs/page
myfiles_wbcd <- subset(myfiles_allyears, commodity == "WHEAT")
myfiles_allallall <- subset(myfiles_allall, damagecause == "Drought")
attach(myfiles_allyears)
year <- factor(year)
commodity <- factor(commodity)
interaction.plot(year, commodity, count, type="b", las=2, col=c(1:7),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,22,24),
xlab="Years",
ylab="Loss ($)",
main="Interaction of Loss across counties by year", data = myfiles_allyears)
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(year, county, acres, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Loss unscaled",
main="Interaction of Loss unscaled across counties by year")
dev.off()
layout(matrix(c(1,2,3,4),2,1)) # optional 4 graphs/page
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(county, year, count, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Count",
main="Interaction of frequency of WHEAT.DROUGHT claims across counties by year")
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(county, year, countratio, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Count ratio",
main="Interaction of frequency RATIO of WHEAT.DROUGHT claims across counties by year")
dev.off()
layout(matrix(c(1,2,3,4),2,1)) # optional 4 graphs/page
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(year, county, pet, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Count ratio",
main="Interaction of frequency RATIO of WHEAT.DROUGHT claims across counties by year")
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(year, county, pdsi, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Count ratio",
main="Interaction of frequency RATIO of WHEAT.DROUGHT claims across counties by year")
dev.off()
# Plot Means with Error Bars
library(gplots)
attach(myfiles_allyears)
plotmeans(count~year,xlab="years",
ylab="loss ($) ", main="Mean Claim Count Plot\nwith 95% CI")
dev.off()
library(gplots)
attach(myfiles_allyears)
plotmeans(pr~year,xlab="years",
ylab="loss ($) ", main="Mean Claim Count Plot\nwith 95% CI")
|
/eda_output/agmesh-commodity-annual-palouse_ed42.R
|
no_license
|
erichseamon/dmine
|
R
| false
| false
| 10,722
|
r
|
library(DAAG)
library(ridge)
panel.cor <- function(x, y, digits=2, prefix="", cex.cor)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.8/strwidth(txt)
test <- cor.test(x,y)
# borrowed from printCoefmat
Signif <- symnum(test$p.value, corr = FALSE, na = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", ".", " "))
text(0.5, 0.5, txt, cex = 2)
text(.8, .8, Signif, cex=cex, col=2)
}
setwd("/dmine/data/USDA/agmesh-scenarios/palouse/summaries3/")
#files <- list.files(pattern = "\\_WHEAT_drought$")
#myfiles = do.call(rbind, lapply(files, function(x)
myfiles <- read.csv("1989-2015_combined_revised.csv")
#names(myfiles)[19] <- c("year")
myfiles$prpet <- (myfiles$pr - myfiles$pet)
write.csv(myfiles, file = "WHEAT_drought_summary")
setwd("/dmine/data/USDA/agmesh-scenarios/palouse/summaries3/")
myfiles <- read.csv("1989-2015_combined_revised.csv", strip.white=TRUE)
#setwd("/dmine/data/USDA/agmesh-scenarios/Washington/summaries/")
#myfiles2 <- read.csv("2001_2015_usda_gridmet_Washington", strip.white=TRUE)
#setwd("/dmine/data/USDA/agmesh-scenarios/Oregon/summaries/")
#myfiles3 <- read.csv("2001_2015_usda_gridmet_Oregon", strip.white=TRUE)
#myfile4 <- rbind(myfiles1,myfiles2,myfiles3)
myfiles_allyears <- subset(myfiles, , c(pr, pdsi, pet, prpet, tmmx, erc, soil_moisture_shorterm, soil_moisture_longterm, loss, count, countratio, county, commodity, damagecause, year))
myfiles_allyears$county <- factor(myfiles_allyears$county)
myfiles_allyears$year <- factor(myfiles_allyears$year)
myfiles_allyears$loss <- scale(myfiles_allyears$loss, center = TRUE, scale = FALSE)
myfiles_allyears[1:7] <- scale(myfiles_allyears[1:7], center = TRUE, scale = TRUE)
#--allyears pairwise plot
#--countratio
myfiles_allyears <- subset(myfiles_allyears, county =="Whitman")
myfiles_whitman <- subset(myfiles_allyears, damagecause =="Heat" | damagecause == "Drought" | damagecause == "Failure Irrig Supply" | damagecause == "Hot Wind")
myfiles_f <- subset(myfiles_whitman, commodity =="WHEAT")
#pairs(data.matrix(myfiles_allyears[c(1,2,3,4,5,6)]), lower.panel=panel.smooth, upper.panel=panel.cor)
pairs(count ~ pr + pdsi + pet + prpet + erc + tmmx + soil_moisture_shorterm + soil_moisture_longterm, lower.panel=panel.smooth, upper.panel=panel.cor, data=myfiles_f, main="1989-2015 WHEAT Drought Whitman County, Count")
dev.off()
#-loss
dev.off()
pairs(myfiles_allyears[c(1,2,3,4,5,6,7)], lower.panel=panel.smooth, upper.panel=panel.cor)
dev.off()
#---only 2008
myfiles_2008 <- subset(data.frame(myfiles_allyears), year == "2008")
#---only 2009
myfiles_2009 <- subset(data.frame(myfiles_allyears), year == "2009")
#--only 2015
myfiles_2015 <- subset(data.frame(myfiles_allyears), year == "2015")
#--some linear models
dev.off()
plot(lm(count ~ pr+pdsi+pet+prpet+tmmx+soil_moisture_shorterm*count, data=myfiles_f), panel = panel.smooth)
dev.off()
layout(matrix(c(1,2,3,4,5,6),3,2)) # optional 4 graphs/page
lmcount <- lm(count ~ pr+pdsi+pet+tmmx+soil_moisture_shorterm*year, data=myfiles_f)
plot(lmcount, which = 1:6, panel = panel.smooth)
mtext("2007-2015 Palouse Regression pr+pdsi+prpet+tmmx*year", side = 3, line = -2, outer = TRUE)
dev.off()
#--loss and acre ranges as variables change over the length of the dataset
layout(matrix(c(1,2,3,4,5,6),3,2)) # optional 4 graphs/page
library(effects)
model.lm <- lm(formula=count ~ pr+soil_moisture_shorterm+pet+erc+tmmx*year,data=myfiles_allyears)
plot(effect(term="year",mod=model.lm,default.levels=20),multiline=TRUE, las = 2)
dev.off()
library(effects)
model.lm <- lm(formula=loss ~ pr+pet+erc+tmmx*year,data=myfiles_allyears)
plot(effect(term="year",mod=model.lm,default.levels=20),multiline=TRUE)
dev.off()
#--multiple regression 2009
fit <- lm(loss ~ pr + pet + soil_moisture_shorterm + tmmx, data=myfiles_f)
coefficients(fit) # model coefficients
confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
influence(fit) # regression diagnostics
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
summary(fit)
#--multiple regression 2008
fit <- lm(loss ~ pr + pet + prpet + tmmx, data=myfiles_2008)
coefficients(fit) # model coefficients
confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
influence(fit) # regression diagnostics
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
#--multiple regression 2015
fit <- lm(loss ~ pr + pet + prpet + tmmx, data=myfiles_2015)
coefficients(fit) # model coefficients
confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
influence(fit) # regression diagnostics
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
#-3fold cross validation with
dev.off()
#--cv.lm for all three years compared
layout(matrix(c(1,2,3,4,5,6),3,2))
#---Multicollinearity test
fit08_VIF <- vif(lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears))
fit08 <- lm(loss ~ erc + pr + pet + pdsi + soil_moisture_longterm + tmmx + soil_moisture_shorterm, data=myfiles_f)
cv.lm(data=myfiles_whitman, fit08, main = "Wheat Whitman, Heat/Drought/Hot Wind/Failed Irrig 1989-2015") # 3 fold cross-validation
lm(data=myfiles_whitman, fit08, main = "Wheat loss regression 2008") # 3 fold cross-validation
#---Multicollinearity test
fit09_VIF <- VIF(lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_2009))
fit09 <- lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_2009)
cv.lm(data=myfiles_2009, fit09, m=3, main = "Wheat loss regression 2009")
#---Multicollinearity test
fit15_VIF <- VIF(lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_2015))
fit15 <- lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_2015)
cv.lm(data=myfiles_2015, fit15, m=3, main = "Wheat loss regression 2015")
text <- capture.output(summary(fit08))
textplot(text, cex=.8, halign="right", valign="center")
text <- capture.output(summary(fit09))
textplot(text, cex=.8, halign="right", valign="center")
text <- capture.output(summary(fit15))
textplot(text, cex=.8, halign="right", valign="center")
dev.off()
layout(matrix(c(1,2,3,4),2,2))
#---Multicollinearity test all files
fit_VIF_loss <- VIF(lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears))
fitallyears_loss <- lm(loss ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears)
cv.lm(data=myfiles_allyears, fitallyears_loss, m=3, main = "Wheat loss regression 2007-2015")
fit_VIF_countratio <- VIF(lm(countratio ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears))
fitallyears_count <- lm(countratio ~ pr + pet + prpet + pdsi + tmmx, data=myfiles_allyears)
cv.lm(data=myfiles_allyears, fitallyears_count, m=3, main = "Wheat count ratio regression 2007-2015")
text <- capture.output(summary(fitallyears_loss))
textplot(text, cex=.8, halign="right", valign="center")
text <- capture.output(summary(fitallyears_count))
textplot(text, cex=.8, halign="right", valign="center")
#--manova
dev.off()
manova_allyears <- manova(cbind(pr, pdsi, prpet) ~ year + commodity, data = myfiles_allyears)
summary(manova_allyears)
#--are counties and years significantly different between climate variables?
summary.aov(manova_allyears)
#--interaction between
dev.off()
layout(matrix(c(1,2,3,4),2,1)) # optional 4 graphs/page
myfiles_wbcd <- subset(myfiles_allyears, commodity == "WHEAT")
myfiles_allallall <- subset(myfiles_allall, damagecause == "Drought")
attach(myfiles_allyears)
year <- factor(year)
commodity <- factor(commodity)
interaction.plot(year, commodity, count, type="b", las=2, col=c(1:7),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,22,24),
xlab="Years",
ylab="Loss ($)",
main="Interaction of Loss across counties by year", data = myfiles_allyears)
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(year, county, acres, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Loss unscaled",
main="Interaction of Loss unscaled across counties by year")
dev.off()
layout(matrix(c(1,2,3,4),2,1)) # optional 4 graphs/page
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(county, year, count, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Count",
main="Interaction of frequency of WHEAT.DROUGHT claims across counties by year")
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(county, year, countratio, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Count ratio",
main="Interaction of frequency RATIO of WHEAT.DROUGHT claims across counties by year")
dev.off()
layout(matrix(c(1,2,3,4),2,1)) # optional 4 graphs/page
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(year, county, pet, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Count ratio",
main="Interaction of frequency RATIO of WHEAT.DROUGHT claims across counties by year")
attach(myfiles_allyears)
year <- factor(year)
county <- factor(county)
interaction.plot(year, county, pdsi, type="b", las=2, col=c(1:3),
leg.bty="o", leg.bg="beige", lwd=2, pch=c(18,24,22),
xlab="Years",
ylab="Count ratio",
main="Interaction of frequency RATIO of WHEAT.DROUGHT claims across counties by year")
dev.off()
# Plot Means with Error Bars
library(gplots)
attach(myfiles_allyears)
plotmeans(count~year,xlab="years",
ylab="loss ($) ", main="Mean Claim Count Plot\nwith 95% CI")
dev.off()
library(gplots)
attach(myfiles_allyears)
plotmeans(pr~year,xlab="years",
ylab="loss ($) ", main="Mean Claim Count Plot\nwith 95% CI")
|
library(dplyr)
loadData <- function() {
read.table('data.txt', sep=';', header=TRUE) %>%
mutate(Date = as.Date(Date, '%d/%m/%Y')) %>%
mutate(Time = as.POSIXct(strptime(paste(Date, ' ', Time), '%Y-%m-%d %H:%M:%S'))) %>%
filter(Time >= strftime('2007-02-01 00:00:00'), Time < strftime('2007-02-03 00:00:00')) %>%
tbl_df
}
|
/load_data.r
|
no_license
|
mrrmaurya/DataPlotting1
|
R
| false
| false
| 352
|
r
|
library(dplyr)
loadData <- function() {
read.table('data.txt', sep=';', header=TRUE) %>%
mutate(Date = as.Date(Date, '%d/%m/%Y')) %>%
mutate(Time = as.POSIXct(strptime(paste(Date, ' ', Time), '%Y-%m-%d %H:%M:%S'))) %>%
filter(Time >= strftime('2007-02-01 00:00:00'), Time < strftime('2007-02-03 00:00:00')) %>%
tbl_df
}
|
######################################################################################
#The following is a script for creating a tidy data set containing measurements from #
#6 types of activities collected by accelerometers for subjects in training and in #
#testing groups. Please read the codebook see the comments along the way to #
#understand why certain manipulations were made. For project details refer to: # #
#http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones #
######################################################################################
#Set working directory.
setwd("~/Coursera/R Projects/Getting and Cleaning Data")
#Download the data and save the zipped file with the name "Dataset.zip"
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip ","Dataset.zip", mode="wb")
#Unzip the file in your working directory
unzip("Dataset.zip")
#Manually take files out of test and training folders & placed in working directory,
#inside the "UCI HAR Dataset" folder.
setwd("~/Coursera/R Projects/Getting and Cleaning Data/UCI HAR Dataset")
#download packages needed that have already been installed.
library(plyr)
library(dplyr)
#Read all the files first.
subjectTest = read.table("subject_test.txt")
yTest = read.table("y_test.txt")
xTest = read.table("X_test.txt") #Contains measurements in test group
featuresDF = read.table("features.txt")
subjectTrain = read.table("subject_train.txt")
yTrain = read.table("y_train.txt")
xTrain = read.table("X_train.txt") #Contains measurements in train group
activityLabel = read.table("activity_labels.txt")
#################################################################################
#The next set of codes rename and extract certain columns, based on the project #
#requirements. #
#################################################################################
#The xTest & xTrain contain the measurements for the test and train subjects.
#featuresDF contains the column names of the measurements. Paste the variable
#names to the xTest & xTrain and select only mean and standard deviation columns
colnames(xTest) <- featuresDF[, 2] #Subsetted 2nd col of featuresDF pasted to the first file as col names
colnames(xTrain) <- featuresDF[, 2] #Subsetted 2nd col of featuresDF pasted to the first file as col names
xTestMean <- xTest[,grepl("mean", colnames(xTest))] #get cols that only have "mean" in col name
xTrainMean <- xTrain[,grepl("mean", colnames(xTrain))] #get cols that only have "mean" in col name
xTestStd <- xTest[,grepl("std", colnames(xTest))] #get cols that only have "std" in col name
xTrainStd <- xTrain[,grepl("std", colnames(xTest))] #get cols that only have "std" in col name
#Delete objects I no longer need to make space in the global environment
xTest <- NA
xTrain <- NA
featuresDF <- NA
#subjectTest & subjectTrain contain subject ID #s. Rename cols V1 in each one to SubjectID
subjectTest <- dplyr::rename(subjectTest, SubjectID = V1)
subjectTrain <- dplyr::rename(subjectTrain, SubjectID = V1)
#yTest & yTrain contain activity type by using numbers 1-6 for each subject.
#Rename the column to "Activity"
yTest <- dplyr::rename(yTest, Activity = V1)
yTrain <- dplyr::rename(yTrain, Activity = V1)
#####################################################
#The next set of codes bind and join files/objects. #
#####################################################
#Column bind all means and standard deviations for test and train. Then, row
#bind those objects into 1 object.
xTestMeanStd <- cbind(xTestMean,xTestStd) #Binds by col
xTrainMeanStd <- cbind(xTrainMean, xTrainStd) #Binds by col
xTestTrainMeanStd <- rbind(xTestMeanStd, xTrainMeanStd) #Binds 1st and 2nd object by row
#Delete objects I no longer need to make space in the global environment
xTestMean <- NA
xTestStd <- NA
xTrainMean <- NA
xTrainStd <- NA
xTestMeanStd <- NA
xTrainMeanStd <- NA
#Rename column with activity # to be "Activity". Then join activityLabel with 2 objects
#that have activity numbers for test and train
activityLabel <- dplyr::rename(activityLabel, Activity = V1) #Rename the 1st col. This col will be used for the join below.
yTestLabel = join(yTest, activityLabel, type = "full") #Join these 2 objects so that activity names can be matched up to activity numbers on yTestRename
yTrainLabel = join(yTrain, activityLabel, type = "full") #Join these 2 objects so that activity names can be matched up to activity numbers on yTestRename
#Delete objects I no longer need to make space in the global environment
activityLabel <- NA
yTest <- NA
yTrain <- NA
#Bind activity label objects, remove the col with numbers and rename the colume with labels I want to keep
yTestTrainLabel <- rbind(yTestLabel, yTrainLabel) #bind test & train rows with activity labels and numbers
yTestTrainLabel <- select(yTestTrainLabel, -Activity) #Take out the column with numbers that I used for the join
yTestTrainActivity <- dplyr::rename(yTestTrainLabel, Activity = V2) #Now have the right label for this col
#Delete objects I no longer need to make space in the global environment
yTestLabel <- NA
yTrainLabel <- NA
yTestTrainLabel <- NA
#bind all Subject IDs
AllSubjectIDs <- rbind(subjectTest, subjectTrain)
#Delete objects I no longer need to make space in the global environment
subjectTest <- NA
subjectTrain <- NA
#Column bind IDs and activity labels
IDandLabel <- cbind(AllSubjectIDs, yTestTrainActivity) #bind IDs to labels
#Delete objects I no longer need to make space in the global environment
AllSubjectIDs <- NA
yTestTrainActivity <- NA
#Last bind: column bind object with IDs and labels with object with Mean & Std measurements
#for all participants
final <- cbind(IDandLabel, xTestTrainMeanStd) #This has 10299 obs & 81 variables
#Delete objects I no longer need to make space in the global environment
IDandLabel <- NA
xTestTrainMeanStd <- NA
#############################################################################################
#Now, write a table with the above data frame with means for each activity for each subject.#
#Each row will represent a different activity for each participant, so each participant will#
#have 6 rows. I will use a wide-tidy, as opposed to a long-tidy, format for the data frame. #
#############################################################################################
finalAverages <- ddply(final, c('SubjectID','Activity'), numcolwise(mean)) #requires plyr. 180 rows and 81 columns
write.table(finalAverages, file = "finalAverages.txt", row.names=FALSE) #create TXT file with data
|
/run_analysis.R
|
no_license
|
PS930/GCDProject
|
R
| false
| false
| 6,706
|
r
|
######################################################################################
#The following is a script for creating a tidy data set containing measurements from #
#6 types of activities collected by accelerometers for subjects in training and in #
#testing groups. Please read the codebook see the comments along the way to #
#understand why certain manipulations were made. For project details refer to: # #
#http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones #
######################################################################################
#Set working directory.
setwd("~/Coursera/R Projects/Getting and Cleaning Data")
#Download the data and save the zipped file with the name "Dataset.zip"
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip ","Dataset.zip", mode="wb")
#Unzip the file in your working directory
unzip("Dataset.zip")
#Manually take files out of test and training folders & placed in working directory,
#inside the "UCI HAR Dataset" folder.
setwd("~/Coursera/R Projects/Getting and Cleaning Data/UCI HAR Dataset")
#download packages needed that have already been installed.
library(plyr)
library(dplyr)
#Read all the files first.
subjectTest = read.table("subject_test.txt")
yTest = read.table("y_test.txt")
xTest = read.table("X_test.txt") #Contains measurements in test group
featuresDF = read.table("features.txt")
subjectTrain = read.table("subject_train.txt")
yTrain = read.table("y_train.txt")
xTrain = read.table("X_train.txt") #Contains measurements in train group
activityLabel = read.table("activity_labels.txt")
#################################################################################
#The next set of codes rename and extract certain columns, based on the project #
#requirements. #
#################################################################################
#The xTest & xTrain contain the measurements for the test and train subjects.
#featuresDF contains the column names of the measurements. Paste the variable
#names to the xTest & xTrain and select only mean and standard deviation columns
colnames(xTest) <- featuresDF[, 2] #Subsetted 2nd col of featuresDF pasted to the first file as col names
colnames(xTrain) <- featuresDF[, 2] #Subsetted 2nd col of featuresDF pasted to the first file as col names
xTestMean <- xTest[,grepl("mean", colnames(xTest))] #get cols that only have "mean" in col name
xTrainMean <- xTrain[,grepl("mean", colnames(xTrain))] #get cols that only have "mean" in col name
xTestStd <- xTest[,grepl("std", colnames(xTest))] #get cols that only have "std" in col name
xTrainStd <- xTrain[,grepl("std", colnames(xTest))] #get cols that only have "std" in col name
#Delete objects I no longer need to make space in the global environment
xTest <- NA
xTrain <- NA
featuresDF <- NA
#subjectTest & subjectTrain contain subject ID #s. Rename cols V1 in each one to SubjectID
subjectTest <- dplyr::rename(subjectTest, SubjectID = V1)
subjectTrain <- dplyr::rename(subjectTrain, SubjectID = V1)
#yTest & yTrain contain activity type by using numbers 1-6 for each subject.
#Rename the column to "Activity"
yTest <- dplyr::rename(yTest, Activity = V1)
yTrain <- dplyr::rename(yTrain, Activity = V1)
#####################################################
#The next set of codes bind and join files/objects. #
#####################################################
#Column bind all means and standard deviations for test and train. Then, row
#bind those objects into 1 object.
xTestMeanStd <- cbind(xTestMean,xTestStd) #Binds by col
xTrainMeanStd <- cbind(xTrainMean, xTrainStd) #Binds by col
xTestTrainMeanStd <- rbind(xTestMeanStd, xTrainMeanStd) #Binds 1st and 2nd object by row
#Delete objects I no longer need to make space in the global environment
xTestMean <- NA
xTestStd <- NA
xTrainMean <- NA
xTrainStd <- NA
xTestMeanStd <- NA
xTrainMeanStd <- NA
#Rename column with activity # to be "Activity". Then join activityLabel with 2 objects
#that have activity numbers for test and train
activityLabel <- dplyr::rename(activityLabel, Activity = V1) #Rename the 1st col. This col will be used for the join below.
yTestLabel = join(yTest, activityLabel, type = "full") #Join these 2 objects so that activity names can be matched up to activity numbers on yTestRename
yTrainLabel = join(yTrain, activityLabel, type = "full") #Join these 2 objects so that activity names can be matched up to activity numbers on yTestRename
#Delete objects I no longer need to make space in the global environment
activityLabel <- NA
yTest <- NA
yTrain <- NA
#Bind activity label objects, remove the col with numbers and rename the colume with labels I want to keep
yTestTrainLabel <- rbind(yTestLabel, yTrainLabel) #bind test & train rows with activity labels and numbers
yTestTrainLabel <- select(yTestTrainLabel, -Activity) #Take out the column with numbers that I used for the join
yTestTrainActivity <- dplyr::rename(yTestTrainLabel, Activity = V2) #Now have the right label for this col
#Delete objects I no longer need to make space in the global environment
yTestLabel <- NA
yTrainLabel <- NA
yTestTrainLabel <- NA
#bind all Subject IDs
AllSubjectIDs <- rbind(subjectTest, subjectTrain)
#Delete objects I no longer need to make space in the global environment
subjectTest <- NA
subjectTrain <- NA
#Column bind IDs and activity labels
IDandLabel <- cbind(AllSubjectIDs, yTestTrainActivity) #bind IDs to labels
#Delete objects I no longer need to make space in the global environment
AllSubjectIDs <- NA
yTestTrainActivity <- NA
#Last bind: column bind object with IDs and labels with object with Mean & Std measurements
#for all participants
final <- cbind(IDandLabel, xTestTrainMeanStd) #This has 10299 obs & 81 variables
#Delete objects I no longer need to make space in the global environment
IDandLabel <- NA
xTestTrainMeanStd <- NA
#############################################################################################
#Now, write a table with the above data frame with means for each activity for each subject.#
#Each row will represent a different activity for each participant, so each participant will#
#have 6 rows. I will use a wide-tidy, as opposed to a long-tidy, format for the data frame. #
#############################################################################################
finalAverages <- ddply(final, c('SubjectID','Activity'), numcolwise(mean)) #requires plyr. 180 rows and 81 columns
write.table(finalAverages, file = "finalAverages.txt", row.names=FALSE) #create TXT file with data
|
library(hillR)
### Name: hill_taxa_parti
### Title: Decompostion of Taxonomic diversity through Hill Numbers
### Aliases: hill_taxa_parti
### ** Examples
dummy = FD::dummy
hill_taxa_parti(comm = dummy$abun, q = 0)
hill_taxa_parti(comm = dummy$abun, q = 1)
hill_taxa_parti(comm = dummy$abun, q = 0.9999999)
hill_taxa_parti(comm = dummy$abun, q = 0.9999999, rel_then_pool = FALSE)
hill_taxa_parti(comm = dummy$abun, q = 1, rel_then_pool = FALSE)
hill_taxa_parti(comm = dummy$abun, q = 2)
hill_taxa_parti(comm = dummy$abun, q = 3)
|
/data/genthat_extracted_code/hillR/examples/hill_taxa_parti.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 536
|
r
|
library(hillR)
### Name: hill_taxa_parti
### Title: Decompostion of Taxonomic diversity through Hill Numbers
### Aliases: hill_taxa_parti
### ** Examples
dummy = FD::dummy
hill_taxa_parti(comm = dummy$abun, q = 0)
hill_taxa_parti(comm = dummy$abun, q = 1)
hill_taxa_parti(comm = dummy$abun, q = 0.9999999)
hill_taxa_parti(comm = dummy$abun, q = 0.9999999, rel_then_pool = FALSE)
hill_taxa_parti(comm = dummy$abun, q = 1, rel_then_pool = FALSE)
hill_taxa_parti(comm = dummy$abun, q = 2)
hill_taxa_parti(comm = dummy$abun, q = 3)
|
library(readxl)
siegedata23 <- function(siegex) {
Siege1 <- read_excel("Siege1.xlsx")
Siege2 <- read_excel("Siege2.xlsx")
siege1 <-(Siege1$score)
siege2 <-(Siege2$score)
g_range2 <- range(1000, siege1, siege2)
plot(siege1,type="o", col="green",ylim=g_range2,ylab = "Score", xlab = "Game Number (Out of 10)", main = "Rainbow Six: Siege Scores")
lines(siege2, type = "o", pch=22, lty=2, col="red")
legend(1, g_range2[2], c("After Stim","Before Stim"), cex=0.6,
col=c("green","red"), pch=21:22, lty=1:2)
}
|
/R/siegedata.R
|
no_license
|
Dakkerr/StimulantsInVideoGames
|
R
| false
| false
| 530
|
r
|
library(readxl)
siegedata23 <- function(siegex) {
Siege1 <- read_excel("Siege1.xlsx")
Siege2 <- read_excel("Siege2.xlsx")
siege1 <-(Siege1$score)
siege2 <-(Siege2$score)
g_range2 <- range(1000, siege1, siege2)
plot(siege1,type="o", col="green",ylim=g_range2,ylab = "Score", xlab = "Game Number (Out of 10)", main = "Rainbow Six: Siege Scores")
lines(siege2, type = "o", pch=22, lty=2, col="red")
legend(1, g_range2[2], c("After Stim","Before Stim"), cex=0.6,
col=c("green","red"), pch=21:22, lty=1:2)
}
|
#' Spark ML -- K-Means Clustering
#'
#' K-means clustering with support for k-means|| initialization proposed by Bahmani et al.
#' Using `ml_kmeans()` with the formula interface requires Spark 2.0+.
#'
#' @template roxlate-ml-clustering-algo
#' @template roxlate-ml-clustering-params
#' @template roxlate-ml-tol
#' @template roxlate-ml-prediction-col
#' @template roxlate-ml-formula-params
#' @param init_steps Number of steps for the k-means|| initialization mode. This is an advanced setting -- the default of 2 is almost always enough. Must be > 0. Default: 2.
#' @param init_mode Initialization algorithm. This can be either "random" to choose random points as initial cluster centers, or "k-means||" to use a parallel variant of k-means++ (Bahmani et al., Scalable K-Means++, VLDB 2012). Default: k-means||.
#'
#' @examples
#'\dontrun{
#' sc <- spark_connect(master = "local")
#' iris_tbl <- sdf_copy_to(sc, iris, name = "iris_tbl", overwrite = TRUE)
#' ml_kmeans(iris_tbl, Species ~ .)
#'}
#'
#' @export
ml_kmeans <- function(x, formula = NULL, k = 2, max_iter = 20, tol = 1e-4,
init_steps = 2, init_mode = "k-means||", seed = NULL,
features_col = "features", prediction_col = "prediction",
uid = random_string("kmeans_"), ...) {
UseMethod("ml_kmeans")
}
#' @export
ml_kmeans.spark_connection <- function(x, formula = NULL, k = 2, max_iter = 20, tol = 1e-4,
init_steps = 2, init_mode = "k-means||", seed = NULL,
features_col = "features", prediction_col = "prediction",
uid = random_string("kmeans_"), ...) {
.args <- list(
k = k,
max_iter = max_iter,
tol = tol,
init_steps = init_steps,
init_mode = init_mode,
seed = seed,
features_col = features_col,
prediction_col = prediction_col
) %>%
c(rlang::dots_list(...)) %>%
validator_ml_kmeans()
jobj <- spark_pipeline_stage(
x, "org.apache.spark.ml.clustering.KMeans", uid,
features_col = .args[["features_col"]], k = .args[["k"]],
max_iter = .args[["max_iter"]], seed = .args[["seed"]]
) %>%
invoke("setTol", .args[["tol"]]) %>%
invoke("setInitSteps", .args[["init_steps"]]) %>%
invoke("setInitMode" , .args[["init_mode"]]) %>%
invoke("setPredictionCol", .args[["prediction_col"]])
new_ml_kmeans(jobj)
}
#' @export
ml_kmeans.ml_pipeline <- function(x, formula = NULL, k = 2, max_iter = 20, tol = 1e-4,
init_steps = 2, init_mode = "k-means||", seed = NULL,
features_col = "features", prediction_col = "prediction",
uid = random_string("kmeans_"), ...) {
stage <- ml_kmeans.spark_connection(
x = spark_connection(x),
formula = formula,
k = k,
max_iter = max_iter,
tol = tol,
init_steps = init_steps,
init_mode = init_mode,
seed = seed,
features_col = features_col,
prediction_col = prediction_col,
uid = uid,
...
)
ml_add_stage(x, stage)
}
#' @export
ml_kmeans.tbl_spark <- function(x, formula = NULL, k = 2, max_iter = 20, tol = 1e-4,
init_steps = 2, init_mode = "k-means||", seed = NULL,
features_col = "features", prediction_col = "prediction",
uid = random_string("kmeans_"), features = NULL, ...) {
formula <- ml_standardize_formula(formula, features = features)
stage <- ml_kmeans.spark_connection(
x = spark_connection(x),
formula = NULL,
k = k,
max_iter = max_iter,
tol = tol,
init_steps = init_steps,
init_mode = init_mode,
seed = seed,
features_col = features_col,
prediction_col = prediction_col,
uid = uid,
...
)
if (is.null(formula)) {
stage %>%
ml_fit(x)
} else {
ml_construct_model_clustering(
new_ml_model_kmeans,
predictor = stage,
dataset = x,
formula = formula,
features_col = features_col
)
}
}
# Validator
validator_ml_kmeans <- function(.args) {
.args <- ml_backwards_compatibility(.args, list(
centers = "k",
tolerance = "tol",
iter.max = "max_iter"
)) %>%
validate_args_clustering()
.args[["tol"]] <- cast_scalar_double(.args[["tol"]])
.args[["init_steps"]] <- cast_scalar_integer(.args[["init_steps"]])
.args[["init_mode"]] <- cast_choice(.args[["init_mode"]], c("random", "k-means||"))
.args[["prediction_col"]] <- cast_string(.args[["prediction_col"]])
.args
}
new_ml_kmeans <- function(jobj) {
new_ml_estimator(jobj, class = "ml_kmeans")
}
new_ml_kmeans_model <- function(jobj) {
summary <- possibly_null(~ new_ml_summary_kmeans_model(invoke(jobj, "summary")))()
new_ml_clustering_model(
jobj,
# `def clusterCenters`
cluster_centers = possibly_null(
~ invoke(jobj, "clusterCenters") %>%
purrr::map(invoke, "toArray")
),
compute_cost = function(dataset) {
invoke(jobj, "computeCost", spark_dataframe(dataset))
},
summary = summary,
class = "ml_kmeans_model")
}
new_ml_summary_kmeans_model <- function(jobj) {
new_ml_summary_clustering(
jobj,
class = "ml_summary_kmeans")
}
|
/R/ml_clustering_kmeans.R
|
permissive
|
benblucas/sparklyr
|
R
| false
| false
| 5,288
|
r
|
#' Spark ML -- K-Means Clustering
#'
#' K-means clustering with support for k-means|| initialization proposed by Bahmani et al.
#' Using `ml_kmeans()` with the formula interface requires Spark 2.0+.
#'
#' @template roxlate-ml-clustering-algo
#' @template roxlate-ml-clustering-params
#' @template roxlate-ml-tol
#' @template roxlate-ml-prediction-col
#' @template roxlate-ml-formula-params
#' @param init_steps Number of steps for the k-means|| initialization mode. This is an advanced setting -- the default of 2 is almost always enough. Must be > 0. Default: 2.
#' @param init_mode Initialization algorithm. This can be either "random" to choose random points as initial cluster centers, or "k-means||" to use a parallel variant of k-means++ (Bahmani et al., Scalable K-Means++, VLDB 2012). Default: k-means||.
#'
#' @examples
#'\dontrun{
#' sc <- spark_connect(master = "local")
#' iris_tbl <- sdf_copy_to(sc, iris, name = "iris_tbl", overwrite = TRUE)
#' ml_kmeans(iris_tbl, Species ~ .)
#'}
#'
#' @export
ml_kmeans <- function(x, formula = NULL, k = 2, max_iter = 20, tol = 1e-4,
init_steps = 2, init_mode = "k-means||", seed = NULL,
features_col = "features", prediction_col = "prediction",
uid = random_string("kmeans_"), ...) {
UseMethod("ml_kmeans")
}
#' @export
ml_kmeans.spark_connection <- function(x, formula = NULL, k = 2, max_iter = 20, tol = 1e-4,
init_steps = 2, init_mode = "k-means||", seed = NULL,
features_col = "features", prediction_col = "prediction",
uid = random_string("kmeans_"), ...) {
.args <- list(
k = k,
max_iter = max_iter,
tol = tol,
init_steps = init_steps,
init_mode = init_mode,
seed = seed,
features_col = features_col,
prediction_col = prediction_col
) %>%
c(rlang::dots_list(...)) %>%
validator_ml_kmeans()
jobj <- spark_pipeline_stage(
x, "org.apache.spark.ml.clustering.KMeans", uid,
features_col = .args[["features_col"]], k = .args[["k"]],
max_iter = .args[["max_iter"]], seed = .args[["seed"]]
) %>%
invoke("setTol", .args[["tol"]]) %>%
invoke("setInitSteps", .args[["init_steps"]]) %>%
invoke("setInitMode" , .args[["init_mode"]]) %>%
invoke("setPredictionCol", .args[["prediction_col"]])
new_ml_kmeans(jobj)
}
#' @export
ml_kmeans.ml_pipeline <- function(x, formula = NULL, k = 2, max_iter = 20, tol = 1e-4,
init_steps = 2, init_mode = "k-means||", seed = NULL,
features_col = "features", prediction_col = "prediction",
uid = random_string("kmeans_"), ...) {
stage <- ml_kmeans.spark_connection(
x = spark_connection(x),
formula = formula,
k = k,
max_iter = max_iter,
tol = tol,
init_steps = init_steps,
init_mode = init_mode,
seed = seed,
features_col = features_col,
prediction_col = prediction_col,
uid = uid,
...
)
ml_add_stage(x, stage)
}
#' @export
ml_kmeans.tbl_spark <- function(x, formula = NULL, k = 2, max_iter = 20, tol = 1e-4,
init_steps = 2, init_mode = "k-means||", seed = NULL,
features_col = "features", prediction_col = "prediction",
uid = random_string("kmeans_"), features = NULL, ...) {
formula <- ml_standardize_formula(formula, features = features)
stage <- ml_kmeans.spark_connection(
x = spark_connection(x),
formula = NULL,
k = k,
max_iter = max_iter,
tol = tol,
init_steps = init_steps,
init_mode = init_mode,
seed = seed,
features_col = features_col,
prediction_col = prediction_col,
uid = uid,
...
)
if (is.null(formula)) {
stage %>%
ml_fit(x)
} else {
ml_construct_model_clustering(
new_ml_model_kmeans,
predictor = stage,
dataset = x,
formula = formula,
features_col = features_col
)
}
}
# Validator
validator_ml_kmeans <- function(.args) {
.args <- ml_backwards_compatibility(.args, list(
centers = "k",
tolerance = "tol",
iter.max = "max_iter"
)) %>%
validate_args_clustering()
.args[["tol"]] <- cast_scalar_double(.args[["tol"]])
.args[["init_steps"]] <- cast_scalar_integer(.args[["init_steps"]])
.args[["init_mode"]] <- cast_choice(.args[["init_mode"]], c("random", "k-means||"))
.args[["prediction_col"]] <- cast_string(.args[["prediction_col"]])
.args
}
new_ml_kmeans <- function(jobj) {
new_ml_estimator(jobj, class = "ml_kmeans")
}
new_ml_kmeans_model <- function(jobj) {
summary <- possibly_null(~ new_ml_summary_kmeans_model(invoke(jobj, "summary")))()
new_ml_clustering_model(
jobj,
# `def clusterCenters`
cluster_centers = possibly_null(
~ invoke(jobj, "clusterCenters") %>%
purrr::map(invoke, "toArray")
),
compute_cost = function(dataset) {
invoke(jobj, "computeCost", spark_dataframe(dataset))
},
summary = summary,
class = "ml_kmeans_model")
}
new_ml_summary_kmeans_model <- function(jobj) {
new_ml_summary_clustering(
jobj,
class = "ml_summary_kmeans")
}
|
#-----------------------------------------------------------------------------------------------------------
#- Combine leaf temperatures and flux measurements to calculate
# assimilation-weighted leaf temperatures.
# Some of this code was copied over and simplified from "leaf thermocouples.R"
#-----------------------------------------------------------------------------------------------------------
source("R/loadLibraries.R")
library(merTools)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# D A T A M A N I P U L A T I O N
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- merge the IR temperatures with the leaf temperatures
#- Read in the the leaf temperature data (i.e., the actual thermocouples!)
# Recall that these thermocouples were installed in chambers 1, 2, 5, 9, 10, and 12
# Thermocouples were installed in all chambers on 27 June 2016
leafT <- as.data.frame(data.table::fread("Output/WTC_TEMP-PARRA_LEAFT-THERMOCOUPLE_20160702-20161125_L0.csv"))
leafT$DateTime <- as.POSIXct(leafT$DateTime,format="%Y-%m-%d %T",tz="GMT")
#- read in the AIRVARS data (PPFD and IR-T data)
IRT <-as.data.frame(data.table::fread("Output/WTC_TEMP-PARRA_LEAFT-IR_2016010-20161125_L0.csv"))
IRT$DateTime <- as.POSIXct(IRT$DateTime,format="%Y-%m-%d %T",tz="GMT")
IRTsub <- IRT[,c("DateTime","TargTempC_Avg","PPFD_Avg","chamber")]
IRTsub$DateTime <- nearestTimeStep(IRTsub$DateTime,nminutes=15,align="floor")
IRTsub.m <- data.frame(dplyr::summarize(dplyr::group_by(IRTsub, DateTime, chamber),
TargTempC_Avg=mean(TargTempC_Avg,na.rm=T),
PPFD_Avg=mean(PPFD_Avg,na.rm=T)))
leafTsub <- leafT[,c("DateTime","chamber","LeafT1","LeafT2")]
leafTsub$DateTime <- nearestTimeStep(leafTsub$DateTime,nminutes=15,align="floor")
leafTsub.m <- data.frame(dplyr::summarize(dplyr::group_by(leafTsub, DateTime, chamber),
LeafT_Avg.1.=mean(LeafT1,na.rm=T),
LeafT_Avg.2.=mean(LeafT2,na.rm=T)))
d3 <- merge(leafTsub.m,IRTsub.m,by=c("DateTime","chamber"),all.y=T)
chamber_n <- as.numeric(substr(d3$chamber,start=2,stop=3))
d3$T_treatment <- ifelse(chamber_n %% 2 == 0, "elevated","ambient")
d3$chamber <- factor(d3$chamber)
d3$Tleaf_mean <- rowMeans(d3[,c("LeafT_Avg.1.","LeafT_Avg.2.")],na.rm=T)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- Download the within chamber met data. Note that I upload these data monthly by extracting them from the
# trendlogs. This takes a long time and is a pain, or I would do it more frequently
downloadHIEv(searchHIEv("WTC_TEMP-PARRA_CM_WTCMET-MIN"),
topath="C:/Repos/wtc4_flux/data/fromHIEv",
cachefile="C:/Repos/wtc4_flux/data/fromHIEv/wtc4_MET_cache.rdata")
#- read in the files. They are large, so this takes a few moments
metfiles <- list.files(path="data/fromHIEv/",pattern="WTCMET-MIN",full.names=T)
metdat <- do.call(rbind,lapply(metfiles,read.csv))
metdat$DateTime <- as.POSIXct(metdat$DateTime,format="%Y-%m-%d %T",tz="UTC")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- merging the within-chamber met datasets and the IR temperature datasets became difficult
# "Error: cannot allocate vector of size 142.5 Mb"
# So, calculate 15-minutely averages, and then merge those
d3$DateTime <- nearestTimeStep(d3$DateTime,nminutes=15,align="floor")
d3.m <- data.frame(dplyr::summarize(dplyr::group_by(d3, DateTime, chamber,T_treatment),
LeafT_Avg.1.=mean(LeafT_Avg.1.,na.rm=T),
LeafT_Avg.2.=mean(LeafT_Avg.2.,na.rm=T),
TargTempC_Avg=mean(TargTempC_Avg,na.rm=T),
PPFD_Avg=mean(PPFD_Avg,na.rm=T),
Tleaf_mean=mean(Tleaf_mean,na.rm=T)))
metdat$DateTime <- nearestTimeStep(metdat$DateTime,nminutes=15,align="floor")
metdat.m <- data.frame(dplyr::summarize(dplyr::group_by(metdat, DateTime, chamber),
Tair_SP=mean(Tair_SP,na.rm=T),
RH_al=mean(RH_al,na.rm=T),
DP_al=mean( DP_al,na.rm=T),
Tsub_al=mean(Tsub_al,na.rm=T),
RH_SP=mean(RH_SP,na.rm=T),
Tair_al=mean(Tair_al,na.rm=T)))
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- merge the within-chamber met data with the leaf temperature datasets
metdat_sum <- metdat.m[,c("DateTime","Tair_al","chamber")]
d4 <- merge(d3.m,metdat_sum,by=c("DateTime","chamber"))
d4$Tdiff_IR <- with(d4,TargTempC_Avg-Tair_al)
d4$Tdiff_TC <- with(d4,Tleaf_mean-Tair_al)
d4$chamber <- factor(d4$chamber,levels=levels(metdat$chamber))
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- read in the flux data (processed by the wtc4_flux_processing R project)
fluxdat <- read.csv("c:/Repos/wtc4_flux_processing/output/WTC_TEMP-PARRA_WTCFLUX_20160228-20161123_L0.csv")
fluxdat$DateTime <- as.POSIXct(fluxdat$DateTime,format="%Y-%m-%d %T",tz="UTC")
fluxdat$VPD <- RHtoVPD(RH=fluxdat$RH_al,TdegC=fluxdat$Tair_al)
#- subset to just the data corresponding to the dates in the d4 dataframe
starttime <- min(d4$DateTime)
endtime <- max(d4$DateTime)
fluxdat2 <- subset(fluxdat,DateTime>starttime & DateTime < endtime)
#- 30-min averages across treatments
fluxdat2$DateTime_hr <- nearestTimeStep(fluxdat2$DateTime,nminutes=30,align="floor")
fluxdat.hr1 <- summaryBy(FluxCO2+FluxH2O+Tair_al+PAR+VPD~DateTime_hr+chamber+T_treatment,
data=subset(fluxdat2,DoorCnt==0),FUN=mean,na.rm=T,keep.names=T)
fluxdat.hr <- summaryBy(.~DateTime_hr+T_treatment,data=fluxdat.hr1,FUN=c(mean,standard.error),na.rm=T,keep.names=F)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- merge fluxdata.hr and IRT (fluxes and temperatures).
d4$DateTime_hr <- nearestTimeStep(d4$DateTime,nminutes=30,align="floor")
d4.hr1 <- data.frame(dplyr::summarize(dplyr::group_by(d4, DateTime_hr, chamber,T_treatment),
TargTempC_Avg=mean(TargTempC_Avg,na.rm=T),
PPFD_Avg=mean(PPFD_Avg,na.rm=T),
LeafT_Avg.1.=mean(LeafT_Avg.1.,na.rm=T),
LeafT_Avg.2.=mean(LeafT_Avg.2.,na.rm=T)))
combodat <- merge(fluxdat.hr1,d4.hr1,by=c("DateTime_hr","T_treatment","chamber"))
combodat$week <- factor(week(combodat$DateTime_hr))
combodat$weekfac <- factor(paste(combodat$chamber,combodat$week,sep="-"))
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- calculate assimilation weighted leaf temperature
#- instead of using weeks, cut the 259 days of observations into 259 bins of 1-day each
combodat$bin4days <- as.factor(cut(as.Date(combodat$DateTime_hr),breaks=259,labels=1:259))
combodat$weekfac <- factor(paste(combodat$chamber,combodat$bin4days,sep="-"))
#- calculate weighted average leaf temperatures, for each bin
combodat.list <- split(combodat,combodat$weekfac)
chamber <-meanAirT<- meanLeafT <- weightedMeanLeafT <- Date <- bin <- T_treatment <- VPD <- list()
for(i in 1:length(combodat.list)){
tocalc <- subset(combodat.list[[i]], PAR> 20 & FluxCO2>0)
# zero fill negative fluxes
tona <- which(tocalc$FluxCO2 < 0)
tocalc$FluxCO2[tona] <- 0
chamber[[i]] <- as.character(tocalc$chamber[1])
bin[[i]] <- as.character(tocalc$bin4days[1])
T_treatment[[i]] <- as.character(tocalc$T_treatment[1])
meanAirT[[i]] <- mean(tocalc$Tair_al)
meanLeafT[[i]] <- mean(tocalc$TargTempC_Avg)
VPD[[i]] <- mean(tocalc$VPD)
Date[[i]] <- as.Date(tocalc$DateTime_hr)[1]
weightedMeanLeafT[[i]] <- weighted.mean(tocalc$TargTempC_Avg,tocalc$FluxCO2)
}
#output_meanT <- data.frame(chamber=levels(fluxdat2$chamber),T_treatment=factor(rep(c("ambient","elevated"),6)))
output_meanT <- data.frame(bin4days=levels(combodat$weekfac))
output_meanT$chamber <- do.call(rbind,chamber)
output_meanT$T_treatment <- do.call(rbind,T_treatment)
output_meanT$bin <- do.call(rbind,bin)
output_meanT$Date <- as.Date(do.call(rbind,Date),origin="1970-01-01")
output_meanT$meanAirT <- do.call(rbind,meanAirT)
output_meanT$meanLeafT <- do.call(rbind,meanLeafT)
output_meanT$weightedMeanLeafT <- do.call(rbind,weightedMeanLeafT)
output_meanT$VPD <- do.call(rbind,VPD)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# E N D O F D A T A M A N I P U L A T I O N
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# S T A T I S T I C A L A N A L Y S I S
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# SMATR statistical analysis of Tleaf vs. Tair
#sma1 <- sma(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment,
# data=output_meanT) # treats each observation as independent, which inflates the statistical power
#-------------
#- random effects ANCOVA for Tleaf vs. Tair. Some evidence that the warmed treatment had a lower slope
# but both slope 95% CI's included 1.0.
lme1 <- lmer(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
anova(lme1) # some evidence of difference in slope, but not terribly strong (p = 0.02)
confint(lme1)
modelout <- data.frame(summary(lme1)$coefficients)
ambCI <- c(modelout$Estimate[[2]]-modelout$Std..Error[[2]]*1.96,modelout$Estimate[[2]]+modelout$Std..Error[[2]]*1.96)
eleCI <- c((modelout$Estimate[[2]]+modelout$Estimate[[4]])-(modelout$Std..Error[[4]]*1.96),
(modelout$Estimate[[2]]+modelout$Estimate[[4]])+(modelout$Std..Error[[4]]*1.96))
lme1.test <- lmer(meanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme1.test2 <- lmer(meanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme1,lme1.test,lme1.test2) # simplest model is preferred
AIC(lme1,lme1.test,lme1.test2) # models have very similar AICs
confint(lme1.test2)
visreg(lme1,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme1.test,xvar="T_treatment")
#-------------
#- random effects ANCOVA for assimilation-weighted Tleaf vs. Tair
lme2 <- lmer(weightedMeanLeafT~T_treatment+meanAirT+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test <- lmer(weightedMeanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test2 <- lmer(weightedMeanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme2,lme2.test,lme2.test2)
AIC(lme2,lme2.test,lme2.test2) # simpler model is preferred from AIC and logLik bases
anova(lme2.test2) # some evidence of difference in slope, but not terribly strong (p = 0.05)
modelout2 <- data.frame(summary(lme2.test2)$coefficients)
confint(lme2.test2)
#ambCI <- c(modelout2$Estimate[[3]]-modelout2$Std..Error[[3]]*1.96,modelout2$Estimate[[3]]+modelout2$Std..Error[[3]]*1.96)
#eleCI <- c((modelout2$Estimate[[3]]+modelout2$Estimate[[4]])-(modelout2$Std..Error[[4]]*1.96),
# (modelout2$Estimate[[3]]+modelout2$Estimate[[4]])+(modelout2$Std..Error[[4]]*1.96))
visreg(lme2.test2,xvar="meanAirT",overlay=T)
visreg(lme2,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme2,xvar="T_treatment")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# E N D O F S T A T I S T I C A L A N A L Y S I S
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# P L O T S
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- set up plot of Tleaf vs. Tair and assimilation-weighted Tleaf vs. Tair
windows(100,75)
par(mar=c(7,7,1,2),mfrow=c(1,2))
palette(c("blue","red"))
pchs=3
cexs=0.5
#------
#- plot Tleaf vs. Tair
mindate <- min(output_meanT$Date,na.rm=T)#as.Date("2016-05-01")
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
# lmT <- lm(meanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lmT,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lmT)[[2]],2),sep=""),bty="n")
legend("bottomright",pch=c(pchs,pchs),col=c("blue","red"),legend=c("Ambient","Warmed"))
legend("topleft",legend=letters[1],cex=1.2,bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(T[leaf]~(degree*C)~(measured)),xpd=NA,cex.lab=2)
#------
#- plot assimilation-weighted Tleaf vs. Tair
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
# lm1 <- lm(weightedMeanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lm1,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lm1)[[2]],2),sep=""),bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(Assimilation~weighted~T[leaf]~(degree*C)),xpd=NA,cex.lab=2)
legend("topleft",legend=letters[2],cex=1.2,bty="n")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- plot three example days. Low T, moderate T, extremely high T. Show divergent diurnal timecourses
lowTday <- as.Date("2016-09-30")
lowTdat <- subset(combodat,as.Date(DateTime_hr)==lowTday)
lowTdat.m1 <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment+chamber,FUN=mean,keep.names=T,data=lowTdat)
lowTdat.m <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment,FUN=c(mean,se),data=lowTdat.m1)
times <- subset(lowTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
windows(60,100)
#par(mar=c(7,7,1,2),mfrow=c(2,1))
par(mar=c(2,2,1,2),oma=c(4,5,4,4),cex.lab=1.6,las=1,cex.axis=1.2)
layout(matrix(c(1,2), 2, 2, byrow = F),
widths=c(1,1), heights=c(1,2))
times <- subset(lowTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
with(subset(lowTdat.m,T_treatment=="ambient"),plot(DateTime_hr,PAR.mean,type="l",ylim=c(0,2000),
xlab="",ylab="",
panel.first=shadeNight(times)))
title(ylab=expression(PPFD~(mu*mol~m^-2~s^-1)),line=3.5,xpd=NA)
par(new = T)
with(subset(lowTdat.m,T_treatment=="ambient"),plot(DateTime_hr,TargTempC_Avg.mean,
type="l",pch=16,xlab="",ylab="",col="red",ylim=c(0,45),axes=F))
axis(side=4,col="red",col.axis="red")
title(ylab=expression(T[l-IR]~(degree*C)),line=-26,xpd=NA,col.lab="red")
with(subset(lowTdat.m,T_treatment=="ambient"),plot(DateTime_hr,FluxCO2.mean,
type="b",pch=16, col="black",ylim=c(-0.05,0.2),legend=F,ylab="",
panel.first=shadeNight(times)))
adderrorbars(x=subset(lowTdat.m,T_treatment=="ambient")$DateTime_hr,
y=subset(lowTdat.m,T_treatment=="ambient")$FluxCO2.mean,
SE=subset(lowTdat.m,T_treatment=="ambient")$FluxCO2.se,direction="updown")
abline(h=0)
axis(side = 4)
title(ylab=expression(Net~CO[2]~flux~(mmol~CO[2]~s^-1)),line=3.5,xpd=NA)
modTday <- as.Date("2016-10-30")
modTdat <- subset(combodat,as.Date(DateTime_hr)==modTday)
modTdat.m1 <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment+chamber,FUN=mean,keep.names=T,data=modTdat)
modTdat.m <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment,FUN=c(mean,se),data=modTdat.m1)
times <- subset(modTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
windows(60,100)
#par(mar=c(7,7,1,2),mfrow=c(2,1))
par(mar=c(2,2,1,2),oma=c(4,5,4,4),cex.lab=1.6,las=1,cex.axis=1.2)
layout(matrix(c(1,2), 2, 2, byrow = F),
widths=c(1,1), heights=c(1,2))
times <- subset(modTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
with(subset(modTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,PAR.mean,type="l",ylim=c(0,2000),
xlab="",ylab="",
panel.first=shadeNight(times)))
title(ylab=expression(PPFD~(mu*mol~m^-2~s^-1)),line=3.5,xpd=NA)
par(new = T)
with(subset(modTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,TargTempC_Avg.mean,
type="l",pch=16,xlab="",ylab="",col="red",ylim=c(0,45),axes=F))
axis(side=4,col="red",col.axis="red")
title(ylab=expression(T[l-IR]~(degree*C)),line=-26,xpd=NA,col.lab="red")
with(subset(modTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,FluxCO2.mean,
type="b",pch=16, col="black",ylim=c(-0.05,0.2),legend=F,ylab="",
panel.first=shadeNight(times)))
adderrorbars(x=subset(modTdat.m,T_treatment=="ambient")$DateTime_hr+3600,
y=subset(modTdat.m,T_treatment=="ambient")$FluxCO2.mean,
SE=subset(modTdat.m,T_treatment=="ambient")$FluxCO2.se,direction="updown")
abline(h=0)
axis(side = 4)
title(ylab=expression(Net~CO[2]~flux~(mmol~CO[2]~s^-1)),line=3.5,xpd=NA)
hotTday <- as.Date("2016-11-01")
hotTdat <- subset(combodat,as.Date(DateTime_hr)==hotTday)
linkdf <- data.frame(chamber = levels(as.factor(hotTdat$chamber)),
HWtrt = c("C","C","HW","HW","C","C","HW","C","HW","HW","C","HW"))#swapped C12 and C08
hotTdat <- merge(hotTdat,linkdf)
hotTdat.m1 <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment+chamber,FUN=mean,keep.names=T,
data=subset(hotTdat,HWtrt=="HW"))
hotTdat.m <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment,FUN=c(mean,se),data=hotTdat.m1)
times <- subset(hotTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
windows(60,100)
#par(mar=c(7,7,1,2),mfrow=c(2,1))
par(mar=c(2,2,1,2),oma=c(4,5,4,4),cex.lab=1.6,las=1,cex.axis=1.2)
layout(matrix(c(1,2), 2, 2, byrow = F),
widths=c(1,1), heights=c(1,2))
with(subset(hotTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,PAR.mean,type="l",ylim=c(0,2000),
xlab="",ylab="",
panel.first=shadeNight(times)))
title(ylab=expression(PPFD~(mu*mol~m^-2~s^-1)),line=3.5,xpd=NA)
par(new = T)
with(subset(hotTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,TargTempC_Avg.mean,
type="l",pch=16,xlab="",ylab="",col="red",ylim=c(0,45),axes=F))
axis(side=4,col="red",col.axis="red")
title(ylab=expression(T[l-IR]~(degree*C)),line=-26,xpd=NA,col.lab="red")
with(subset(hotTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,FluxCO2.mean,
type="b",pch=16, col="black",ylim=c(-0.05,0.2),legend=F,ylab="",
panel.first=shadeNight(times)))
adderrorbars(x=subset(hotTdat.m,T_treatment=="ambient")$DateTime_hr+3600,
y=subset(hotTdat.m,T_treatment=="ambient")$FluxCO2.mean,
SE=subset(hotTdat.m,T_treatment=="ambient")$FluxCO2.se,direction="updown")
abline(h=0)
axis(side = 4)
title(ylab=expression(Net~CO[2]~flux~(mmol~CO[2]~s^-1)),line=3.5,xpd=NA)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-- Repeat the calculation of assimilation-weighted leaf temperature, but on a weekly timescale
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- calculate assimilation weighted leaf temperature
#- create weekly bins
combodat$bin4days <- as.factor(week(combodat$DateTime_hr))
combodat$weekfac <- factor(paste(combodat$chamber,combodat$bin4days,sep="-"))
#- calculate weighted average leaf temperatures, for each bin
combodat.list <- split(combodat,combodat$weekfac)
chamber <-meanAirT<- meanLeafT <- weightedMeanLeafT <- Date <- bin <- T_treatment <- VPD <- list()
for(i in 1:length(combodat.list)){
tocalc <- subset(combodat.list[[i]], PAR> 20 & FluxCO2>0)
# zero fill negative fluxes
tona <- which(tocalc$FluxCO2 < 0)
tocalc$FluxCO2[tona] <- 0
chamber[[i]] <- as.character(tocalc$chamber[1])
bin[[i]] <- as.character(tocalc$bin4days[1])
T_treatment[[i]] <- as.character(tocalc$T_treatment[1])
meanAirT[[i]] <- mean(tocalc$Tair_al)
meanLeafT[[i]] <- mean(tocalc$TargTempC_Avg)
VPD[[i]] <- mean(tocalc$VPD)
Date[[i]] <- as.Date(tocalc$DateTime_hr)[1]
weightedMeanLeafT[[i]] <- weighted.mean(tocalc$TargTempC_Avg,tocalc$FluxCO2)
}
#output_meanT <- data.frame(chamber=levels(fluxdat2$chamber),T_treatment=factor(rep(c("ambient","elevated"),6)))
output_meanT <- data.frame(bin4days=levels(combodat$weekfac))
output_meanT$chamber <- do.call(rbind,chamber)
output_meanT$T_treatment <- do.call(rbind,T_treatment)
output_meanT$bin <- do.call(rbind,bin)
output_meanT$Date <- as.Date(do.call(rbind,Date),origin="1970-01-01")
output_meanT$meanAirT <- do.call(rbind,meanAirT)
output_meanT$meanLeafT <- do.call(rbind,meanLeafT)
output_meanT$weightedMeanLeafT <- do.call(rbind,weightedMeanLeafT)
output_meanT$VPD <- do.call(rbind,VPD)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# SMATR statistical analysis of Tleaf vs. Tair
#sma1 <- sma(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment,
# data=output_meanT) # treats each observation as independent, which inflates the statistical power
#-------------
#- random effects ANCOVA for Tleaf vs. Tair. Some evidence that the warmed treatment had a lower slope
# but both slope 95% CI's included 1.0.
lme1 <- lmer(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
anova(lme1) # some evidence of difference in slope, but not terribly strong (p = 0.02)
confint(lme1)
modelout <- data.frame(summary(lme1)$coefficients)
ambCI <- c(modelout$Estimate[[2]]-modelout$Std..Error[[2]]*1.96,modelout$Estimate[[2]]+modelout$Std..Error[[2]]*1.96)
eleCI <- c((modelout$Estimate[[2]]+modelout$Estimate[[4]])-(modelout$Std..Error[[4]]*1.96),
(modelout$Estimate[[2]]+modelout$Estimate[[4]])+(modelout$Std..Error[[4]]*1.96))
lme1.test <- lmer(meanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme1.test2 <- lmer(meanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme1,lme1.test,lme1.test2) # simplest model is preferred
AIC(lme1,lme1.test,lme1.test2) # models have very similar AICs
confint(lme1.test2)
visreg(lme1,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme1.test,xvar="T_treatment")
#-------------
#- random effects ANCOVA for assimilation-weighted Tleaf vs. Tair
lme2 <- lmer(weightedMeanLeafT~T_treatment+meanAirT+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test <- lmer(weightedMeanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test2 <- lmer(weightedMeanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme2,lme2.test,lme2.test2)
AIC(lme2,lme2.test,lme2.test2) # simpler model is preferred from AIC and logLik bases
anova(lme2.test2) # some evidence of difference in slope, but not terribly strong (p = 0.05)
modelout2 <- data.frame(summary(lme2.test2)$coefficients)
confint(lme2.test2)
#ambCI <- c(modelout2$Estimate[[3]]-modelout2$Std..Error[[3]]*1.96,modelout2$Estimate[[3]]+modelout2$Std..Error[[3]]*1.96)
#eleCI <- c((modelout2$Estimate[[3]]+modelout2$Estimate[[4]])-(modelout2$Std..Error[[4]]*1.96),
# (modelout2$Estimate[[3]]+modelout2$Estimate[[4]])+(modelout2$Std..Error[[4]]*1.96))
visreg(lme2.test2,xvar="meanAirT",overlay=T)
visreg(lme2,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme2,xvar="T_treatment")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- set up plot of Tleaf vs. Tair and assimilation-weighted Tleaf vs. Tair
windows(100,75)
par(mar=c(7,7,1,2),mfrow=c(1,2))
palette(c("blue","red"))
pchs=3
cexs=0.5
#------
#- plot Tleaf vs. Tair
mindate <- min(output_meanT$Date,na.rm=T)#as.Date("2016-05-01")
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="",add=T)
# lmT <- lm(meanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lmT,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lmT)[[2]],2),sep=""),bty="n")
legend("bottomright",pch=c(pchs,pchs),col=c("blue","red"),legend=c("Ambient","Warmed"))
legend("topleft",legend=letters[1],cex=1.2,bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(T[leaf]~(degree*C)~(measured)),xpd=NA,cex.lab=2)
#------
#- plot assimilation-weighted Tleaf vs. Tair
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="",add=T)
# lm1 <- lm(weightedMeanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lm1,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lm1)[[2]],2),sep=""),bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(Assimilation~weighted~T[leaf]~(degree*C)),xpd=NA,cex.lab=2)
legend("topleft",legend=letters[2],cex=1.2,bty="n")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-- Repeat the calculation of assimilation-weighted leaf temperature, but on a monthly timescale
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- calculate assimilation weighted leaf temperature
#- create monthly bins
combodat$bin4days <- as.factor(month(combodat$DateTime_hr))
combodat <- subset(combodat,bin4days != "2") #extract a month with little data
combodat$weekfac <- factor(paste(combodat$chamber,combodat$bin4days,sep="-"))
#- calculate weighted average leaf temperatures, for each bin
combodat.list <- split(combodat,combodat$weekfac)
chamber <-meanAirT<- meanLeafT <- weightedMeanLeafT <- Date <- bin <- T_treatment <- VPD <- list()
for(i in 1:length(combodat.list)){
tocalc <- subset(combodat.list[[i]], PAR> 20 & FluxCO2>0)
# zero fill negative fluxes
tona <- which(tocalc$FluxCO2 < 0)
tocalc$FluxCO2[tona] <- 0
chamber[[i]] <- as.character(tocalc$chamber[1])
bin[[i]] <- as.character(tocalc$bin4days[1])
T_treatment[[i]] <- as.character(tocalc$T_treatment[1])
meanAirT[[i]] <- mean(tocalc$Tair_al)
meanLeafT[[i]] <- mean(tocalc$TargTempC_Avg)
VPD[[i]] <- mean(tocalc$VPD)
Date[[i]] <- as.Date(tocalc$DateTime_hr)[1]
weightedMeanLeafT[[i]] <- weighted.mean(tocalc$TargTempC_Avg,tocalc$FluxCO2)
}
#output_meanT <- data.frame(chamber=levels(fluxdat2$chamber),T_treatment=factor(rep(c("ambient","elevated"),6)))
output_meanT <- data.frame(bin4days=levels(combodat$weekfac))
output_meanT$chamber <- do.call(rbind,chamber)
output_meanT$T_treatment <- do.call(rbind,T_treatment)
output_meanT$bin <- do.call(rbind,bin)
output_meanT$Date <- as.Date(do.call(rbind,Date),origin="1970-01-01")
output_meanT$meanAirT <- do.call(rbind,meanAirT)
output_meanT$meanLeafT <- do.call(rbind,meanLeafT)
output_meanT$weightedMeanLeafT <- do.call(rbind,weightedMeanLeafT)
output_meanT$VPD <- do.call(rbind,VPD)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# SMATR statistical analysis of Tleaf vs. Tair
#sma1 <- sma(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment,
# data=output_meanT) # treats each observation as independent, which inflates the statistical power
#-------------
#- random effects ANCOVA for Tleaf vs. Tair. Some evidence that the warmed treatment had a lower slope
# but both slope 95% CI's included 1.0.
lme1 <- lmer(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
anova(lme1) # some evidence of difference in slope, but not terribly strong (p = 0.02)
confint(lme1)
modelout <- data.frame(summary(lme1)$coefficients)
ambCI <- c(modelout$Estimate[[2]]-modelout$Std..Error[[2]]*1.96,modelout$Estimate[[2]]+modelout$Std..Error[[2]]*1.96)
eleCI <- c((modelout$Estimate[[2]]+modelout$Estimate[[4]])-(modelout$Std..Error[[4]]*1.96),
(modelout$Estimate[[2]]+modelout$Estimate[[4]])+(modelout$Std..Error[[4]]*1.96))
lme1.test <- lmer(meanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme1.test2 <- lmer(meanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme1,lme1.test,lme1.test2) # simplest model is preferred
AIC(lme1,lme1.test,lme1.test2) # models have very similar AICs
confint(lme1.test2)
visreg(lme1,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme1.test,xvar="T_treatment")
#-------------
#- random effects ANCOVA for assimilation-weighted Tleaf vs. Tair
lme2 <- lmer(weightedMeanLeafT~T_treatment+meanAirT+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test <- lmer(weightedMeanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test2 <- lmer(weightedMeanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme2,lme2.test,lme2.test2)
AIC(lme2,lme2.test,lme2.test2) # simpler model is preferred from AIC and logLik bases
anova(lme2.test2) # some evidence of difference in slope, but not terribly strong (p = 0.05)
modelout2 <- data.frame(summary(lme2.test2)$coefficients)
confint(lme2.test2)
#ambCI <- c(modelout2$Estimate[[3]]-modelout2$Std..Error[[3]]*1.96,modelout2$Estimate[[3]]+modelout2$Std..Error[[3]]*1.96)
#eleCI <- c((modelout2$Estimate[[3]]+modelout2$Estimate[[4]])-(modelout2$Std..Error[[4]]*1.96),
# (modelout2$Estimate[[3]]+modelout2$Estimate[[4]])+(modelout2$Std..Error[[4]]*1.96))
visreg(lme2.test2,xvar="meanAirT",overlay=T)
visreg(lme2,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme2,xvar="T_treatment")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- set up plot of Tleaf vs. Tair and assimilation-weighted Tleaf vs. Tair
windows(100,75)
par(mar=c(7,7,1,2),mfrow=c(1,2))
palette(c("blue","red"))
pchs=16
cexs=1
#------
#- plot Tleaf vs. Tair
mindate <- min(output_meanT$Date,na.rm=T)#as.Date("2016-05-01")
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="",add=T)
# lmT <- lm(meanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lmT,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lmT)[[2]],2),sep=""),bty="n")
legend("bottomright",pch=c(pchs,pchs),col=c("blue","red"),legend=c("Ambient","Warmed"))
legend("topleft",legend=letters[1],cex=1.2,bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(T[leaf]~(degree*C)~(measured)),xpd=NA,cex.lab=2)
#------
#- plot assimilation-weighted Tleaf vs. Tair
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="",add=T)
# lm1 <- lm(weightedMeanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lm1,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lm1)[[2]],2),sep=""),bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(Assimilation~weighted~T[leaf]~(degree*C)),xpd=NA,cex.lab=2)
legend("topleft",legend=letters[2],cex=1.2,bty="n")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
|
/R/combine_data_plot_assimiationWeightedLeafT.R
|
no_license
|
jedrake/wtc4_flux
|
R
| false
| false
| 42,370
|
r
|
#-----------------------------------------------------------------------------------------------------------
#- Combine leaf temperatures and flux measurements to calculate
# assimilation-weighted leaf temperatures.
# Some of this code was copied over and simplified from "leaf thermocouples.R"
#-----------------------------------------------------------------------------------------------------------
source("R/loadLibraries.R")
library(merTools)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# D A T A M A N I P U L A T I O N
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- merge the IR temperatures with the leaf temperatures
#- Read in the the leaf temperature data (i.e., the actual thermocouples!)
# Recall that these thermocouples were installed in chambers 1, 2, 5, 9, 10, and 12
# Thermocouples were installed in all chambers on 27 June 2016
leafT <- as.data.frame(data.table::fread("Output/WTC_TEMP-PARRA_LEAFT-THERMOCOUPLE_20160702-20161125_L0.csv"))
leafT$DateTime <- as.POSIXct(leafT$DateTime,format="%Y-%m-%d %T",tz="GMT")
#- read in the AIRVARS data (PPFD and IR-T data)
IRT <-as.data.frame(data.table::fread("Output/WTC_TEMP-PARRA_LEAFT-IR_2016010-20161125_L0.csv"))
IRT$DateTime <- as.POSIXct(IRT$DateTime,format="%Y-%m-%d %T",tz="GMT")
IRTsub <- IRT[,c("DateTime","TargTempC_Avg","PPFD_Avg","chamber")]
IRTsub$DateTime <- nearestTimeStep(IRTsub$DateTime,nminutes=15,align="floor")
IRTsub.m <- data.frame(dplyr::summarize(dplyr::group_by(IRTsub, DateTime, chamber),
TargTempC_Avg=mean(TargTempC_Avg,na.rm=T),
PPFD_Avg=mean(PPFD_Avg,na.rm=T)))
leafTsub <- leafT[,c("DateTime","chamber","LeafT1","LeafT2")]
leafTsub$DateTime <- nearestTimeStep(leafTsub$DateTime,nminutes=15,align="floor")
leafTsub.m <- data.frame(dplyr::summarize(dplyr::group_by(leafTsub, DateTime, chamber),
LeafT_Avg.1.=mean(LeafT1,na.rm=T),
LeafT_Avg.2.=mean(LeafT2,na.rm=T)))
d3 <- merge(leafTsub.m,IRTsub.m,by=c("DateTime","chamber"),all.y=T)
chamber_n <- as.numeric(substr(d3$chamber,start=2,stop=3))
d3$T_treatment <- ifelse(chamber_n %% 2 == 0, "elevated","ambient")
d3$chamber <- factor(d3$chamber)
d3$Tleaf_mean <- rowMeans(d3[,c("LeafT_Avg.1.","LeafT_Avg.2.")],na.rm=T)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- Download the within chamber met data. Note that I upload these data monthly by extracting them from the
# trendlogs. This takes a long time and is a pain, or I would do it more frequently
downloadHIEv(searchHIEv("WTC_TEMP-PARRA_CM_WTCMET-MIN"),
topath="C:/Repos/wtc4_flux/data/fromHIEv",
cachefile="C:/Repos/wtc4_flux/data/fromHIEv/wtc4_MET_cache.rdata")
#- read in the files. They are large, so this takes a few moments
metfiles <- list.files(path="data/fromHIEv/",pattern="WTCMET-MIN",full.names=T)
metdat <- do.call(rbind,lapply(metfiles,read.csv))
metdat$DateTime <- as.POSIXct(metdat$DateTime,format="%Y-%m-%d %T",tz="UTC")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- merging the within-chamber met datasets and the IR temperature datasets became difficult
# "Error: cannot allocate vector of size 142.5 Mb"
# So, calculate 15-minutely averages, and then merge those
d3$DateTime <- nearestTimeStep(d3$DateTime,nminutes=15,align="floor")
d3.m <- data.frame(dplyr::summarize(dplyr::group_by(d3, DateTime, chamber,T_treatment),
LeafT_Avg.1.=mean(LeafT_Avg.1.,na.rm=T),
LeafT_Avg.2.=mean(LeafT_Avg.2.,na.rm=T),
TargTempC_Avg=mean(TargTempC_Avg,na.rm=T),
PPFD_Avg=mean(PPFD_Avg,na.rm=T),
Tleaf_mean=mean(Tleaf_mean,na.rm=T)))
metdat$DateTime <- nearestTimeStep(metdat$DateTime,nminutes=15,align="floor")
metdat.m <- data.frame(dplyr::summarize(dplyr::group_by(metdat, DateTime, chamber),
Tair_SP=mean(Tair_SP,na.rm=T),
RH_al=mean(RH_al,na.rm=T),
DP_al=mean( DP_al,na.rm=T),
Tsub_al=mean(Tsub_al,na.rm=T),
RH_SP=mean(RH_SP,na.rm=T),
Tair_al=mean(Tair_al,na.rm=T)))
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- merge the within-chamber met data with the leaf temperature datasets
metdat_sum <- metdat.m[,c("DateTime","Tair_al","chamber")]
d4 <- merge(d3.m,metdat_sum,by=c("DateTime","chamber"))
d4$Tdiff_IR <- with(d4,TargTempC_Avg-Tair_al)
d4$Tdiff_TC <- with(d4,Tleaf_mean-Tair_al)
d4$chamber <- factor(d4$chamber,levels=levels(metdat$chamber))
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- read in the flux data (processed by the wtc4_flux_processing R project)
fluxdat <- read.csv("c:/Repos/wtc4_flux_processing/output/WTC_TEMP-PARRA_WTCFLUX_20160228-20161123_L0.csv")
fluxdat$DateTime <- as.POSIXct(fluxdat$DateTime,format="%Y-%m-%d %T",tz="UTC")
fluxdat$VPD <- RHtoVPD(RH=fluxdat$RH_al,TdegC=fluxdat$Tair_al)
#- subset to just the data corresponding to the dates in the d4 dataframe
starttime <- min(d4$DateTime)
endtime <- max(d4$DateTime)
fluxdat2 <- subset(fluxdat,DateTime>starttime & DateTime < endtime)
#- 30-min averages across treatments
fluxdat2$DateTime_hr <- nearestTimeStep(fluxdat2$DateTime,nminutes=30,align="floor")
fluxdat.hr1 <- summaryBy(FluxCO2+FluxH2O+Tair_al+PAR+VPD~DateTime_hr+chamber+T_treatment,
data=subset(fluxdat2,DoorCnt==0),FUN=mean,na.rm=T,keep.names=T)
fluxdat.hr <- summaryBy(.~DateTime_hr+T_treatment,data=fluxdat.hr1,FUN=c(mean,standard.error),na.rm=T,keep.names=F)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- merge fluxdata.hr and IRT (fluxes and temperatures).
d4$DateTime_hr <- nearestTimeStep(d4$DateTime,nminutes=30,align="floor")
d4.hr1 <- data.frame(dplyr::summarize(dplyr::group_by(d4, DateTime_hr, chamber,T_treatment),
TargTempC_Avg=mean(TargTempC_Avg,na.rm=T),
PPFD_Avg=mean(PPFD_Avg,na.rm=T),
LeafT_Avg.1.=mean(LeafT_Avg.1.,na.rm=T),
LeafT_Avg.2.=mean(LeafT_Avg.2.,na.rm=T)))
combodat <- merge(fluxdat.hr1,d4.hr1,by=c("DateTime_hr","T_treatment","chamber"))
combodat$week <- factor(week(combodat$DateTime_hr))
combodat$weekfac <- factor(paste(combodat$chamber,combodat$week,sep="-"))
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- calculate assimilation weighted leaf temperature
#- instead of using weeks, cut the 259 days of observations into 259 bins of 1-day each
combodat$bin4days <- as.factor(cut(as.Date(combodat$DateTime_hr),breaks=259,labels=1:259))
combodat$weekfac <- factor(paste(combodat$chamber,combodat$bin4days,sep="-"))
#- calculate weighted average leaf temperatures, for each bin
combodat.list <- split(combodat,combodat$weekfac)
chamber <-meanAirT<- meanLeafT <- weightedMeanLeafT <- Date <- bin <- T_treatment <- VPD <- list()
for(i in 1:length(combodat.list)){
tocalc <- subset(combodat.list[[i]], PAR> 20 & FluxCO2>0)
# zero fill negative fluxes
tona <- which(tocalc$FluxCO2 < 0)
tocalc$FluxCO2[tona] <- 0
chamber[[i]] <- as.character(tocalc$chamber[1])
bin[[i]] <- as.character(tocalc$bin4days[1])
T_treatment[[i]] <- as.character(tocalc$T_treatment[1])
meanAirT[[i]] <- mean(tocalc$Tair_al)
meanLeafT[[i]] <- mean(tocalc$TargTempC_Avg)
VPD[[i]] <- mean(tocalc$VPD)
Date[[i]] <- as.Date(tocalc$DateTime_hr)[1]
weightedMeanLeafT[[i]] <- weighted.mean(tocalc$TargTempC_Avg,tocalc$FluxCO2)
}
#output_meanT <- data.frame(chamber=levels(fluxdat2$chamber),T_treatment=factor(rep(c("ambient","elevated"),6)))
output_meanT <- data.frame(bin4days=levels(combodat$weekfac))
output_meanT$chamber <- do.call(rbind,chamber)
output_meanT$T_treatment <- do.call(rbind,T_treatment)
output_meanT$bin <- do.call(rbind,bin)
output_meanT$Date <- as.Date(do.call(rbind,Date),origin="1970-01-01")
output_meanT$meanAirT <- do.call(rbind,meanAirT)
output_meanT$meanLeafT <- do.call(rbind,meanLeafT)
output_meanT$weightedMeanLeafT <- do.call(rbind,weightedMeanLeafT)
output_meanT$VPD <- do.call(rbind,VPD)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# E N D O F D A T A M A N I P U L A T I O N
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# S T A T I S T I C A L A N A L Y S I S
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# SMATR statistical analysis of Tleaf vs. Tair
#sma1 <- sma(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment,
# data=output_meanT) # treats each observation as independent, which inflates the statistical power
#-------------
#- random effects ANCOVA for Tleaf vs. Tair. Some evidence that the warmed treatment had a lower slope
# but both slope 95% CI's included 1.0.
lme1 <- lmer(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
anova(lme1) # some evidence of difference in slope, but not terribly strong (p = 0.02)
confint(lme1)
modelout <- data.frame(summary(lme1)$coefficients)
ambCI <- c(modelout$Estimate[[2]]-modelout$Std..Error[[2]]*1.96,modelout$Estimate[[2]]+modelout$Std..Error[[2]]*1.96)
eleCI <- c((modelout$Estimate[[2]]+modelout$Estimate[[4]])-(modelout$Std..Error[[4]]*1.96),
(modelout$Estimate[[2]]+modelout$Estimate[[4]])+(modelout$Std..Error[[4]]*1.96))
lme1.test <- lmer(meanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme1.test2 <- lmer(meanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme1,lme1.test,lme1.test2) # simplest model is preferred
AIC(lme1,lme1.test,lme1.test2) # models have very similar AICs
confint(lme1.test2)
visreg(lme1,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme1.test,xvar="T_treatment")
#-------------
#- random effects ANCOVA for assimilation-weighted Tleaf vs. Tair
lme2 <- lmer(weightedMeanLeafT~T_treatment+meanAirT+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test <- lmer(weightedMeanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test2 <- lmer(weightedMeanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme2,lme2.test,lme2.test2)
AIC(lme2,lme2.test,lme2.test2) # simpler model is preferred from AIC and logLik bases
anova(lme2.test2) # some evidence of difference in slope, but not terribly strong (p = 0.05)
modelout2 <- data.frame(summary(lme2.test2)$coefficients)
confint(lme2.test2)
#ambCI <- c(modelout2$Estimate[[3]]-modelout2$Std..Error[[3]]*1.96,modelout2$Estimate[[3]]+modelout2$Std..Error[[3]]*1.96)
#eleCI <- c((modelout2$Estimate[[3]]+modelout2$Estimate[[4]])-(modelout2$Std..Error[[4]]*1.96),
# (modelout2$Estimate[[3]]+modelout2$Estimate[[4]])+(modelout2$Std..Error[[4]]*1.96))
visreg(lme2.test2,xvar="meanAirT",overlay=T)
visreg(lme2,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme2,xvar="T_treatment")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# E N D O F S T A T I S T I C A L A N A L Y S I S
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# P L O T S
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- set up plot of Tleaf vs. Tair and assimilation-weighted Tleaf vs. Tair
windows(100,75)
par(mar=c(7,7,1,2),mfrow=c(1,2))
palette(c("blue","red"))
pchs=3
cexs=0.5
#------
#- plot Tleaf vs. Tair
mindate <- min(output_meanT$Date,na.rm=T)#as.Date("2016-05-01")
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
# lmT <- lm(meanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lmT,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lmT)[[2]],2),sep=""),bty="n")
legend("bottomright",pch=c(pchs,pchs),col=c("blue","red"),legend=c("Ambient","Warmed"))
legend("topleft",legend=letters[1],cex=1.2,bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(T[leaf]~(degree*C)~(measured)),xpd=NA,cex.lab=2)
#------
#- plot assimilation-weighted Tleaf vs. Tair
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
# lm1 <- lm(weightedMeanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lm1,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lm1)[[2]],2),sep=""),bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(Assimilation~weighted~T[leaf]~(degree*C)),xpd=NA,cex.lab=2)
legend("topleft",legend=letters[2],cex=1.2,bty="n")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- plot three example days. Low T, moderate T, extremely high T. Show divergent diurnal timecourses
lowTday <- as.Date("2016-09-30")
lowTdat <- subset(combodat,as.Date(DateTime_hr)==lowTday)
lowTdat.m1 <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment+chamber,FUN=mean,keep.names=T,data=lowTdat)
lowTdat.m <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment,FUN=c(mean,se),data=lowTdat.m1)
times <- subset(lowTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
windows(60,100)
#par(mar=c(7,7,1,2),mfrow=c(2,1))
par(mar=c(2,2,1,2),oma=c(4,5,4,4),cex.lab=1.6,las=1,cex.axis=1.2)
layout(matrix(c(1,2), 2, 2, byrow = F),
widths=c(1,1), heights=c(1,2))
times <- subset(lowTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
with(subset(lowTdat.m,T_treatment=="ambient"),plot(DateTime_hr,PAR.mean,type="l",ylim=c(0,2000),
xlab="",ylab="",
panel.first=shadeNight(times)))
title(ylab=expression(PPFD~(mu*mol~m^-2~s^-1)),line=3.5,xpd=NA)
par(new = T)
with(subset(lowTdat.m,T_treatment=="ambient"),plot(DateTime_hr,TargTempC_Avg.mean,
type="l",pch=16,xlab="",ylab="",col="red",ylim=c(0,45),axes=F))
axis(side=4,col="red",col.axis="red")
title(ylab=expression(T[l-IR]~(degree*C)),line=-26,xpd=NA,col.lab="red")
with(subset(lowTdat.m,T_treatment=="ambient"),plot(DateTime_hr,FluxCO2.mean,
type="b",pch=16, col="black",ylim=c(-0.05,0.2),legend=F,ylab="",
panel.first=shadeNight(times)))
adderrorbars(x=subset(lowTdat.m,T_treatment=="ambient")$DateTime_hr,
y=subset(lowTdat.m,T_treatment=="ambient")$FluxCO2.mean,
SE=subset(lowTdat.m,T_treatment=="ambient")$FluxCO2.se,direction="updown")
abline(h=0)
axis(side = 4)
title(ylab=expression(Net~CO[2]~flux~(mmol~CO[2]~s^-1)),line=3.5,xpd=NA)
modTday <- as.Date("2016-10-30")
modTdat <- subset(combodat,as.Date(DateTime_hr)==modTday)
modTdat.m1 <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment+chamber,FUN=mean,keep.names=T,data=modTdat)
modTdat.m <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment,FUN=c(mean,se),data=modTdat.m1)
times <- subset(modTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
windows(60,100)
#par(mar=c(7,7,1,2),mfrow=c(2,1))
par(mar=c(2,2,1,2),oma=c(4,5,4,4),cex.lab=1.6,las=1,cex.axis=1.2)
layout(matrix(c(1,2), 2, 2, byrow = F),
widths=c(1,1), heights=c(1,2))
times <- subset(modTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
with(subset(modTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,PAR.mean,type="l",ylim=c(0,2000),
xlab="",ylab="",
panel.first=shadeNight(times)))
title(ylab=expression(PPFD~(mu*mol~m^-2~s^-1)),line=3.5,xpd=NA)
par(new = T)
with(subset(modTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,TargTempC_Avg.mean,
type="l",pch=16,xlab="",ylab="",col="red",ylim=c(0,45),axes=F))
axis(side=4,col="red",col.axis="red")
title(ylab=expression(T[l-IR]~(degree*C)),line=-26,xpd=NA,col.lab="red")
with(subset(modTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,FluxCO2.mean,
type="b",pch=16, col="black",ylim=c(-0.05,0.2),legend=F,ylab="",
panel.first=shadeNight(times)))
adderrorbars(x=subset(modTdat.m,T_treatment=="ambient")$DateTime_hr+3600,
y=subset(modTdat.m,T_treatment=="ambient")$FluxCO2.mean,
SE=subset(modTdat.m,T_treatment=="ambient")$FluxCO2.se,direction="updown")
abline(h=0)
axis(side = 4)
title(ylab=expression(Net~CO[2]~flux~(mmol~CO[2]~s^-1)),line=3.5,xpd=NA)
hotTday <- as.Date("2016-11-01")
hotTdat <- subset(combodat,as.Date(DateTime_hr)==hotTday)
linkdf <- data.frame(chamber = levels(as.factor(hotTdat$chamber)),
HWtrt = c("C","C","HW","HW","C","C","HW","C","HW","HW","C","HW"))#swapped C12 and C08
hotTdat <- merge(hotTdat,linkdf)
hotTdat.m1 <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment+chamber,FUN=mean,keep.names=T,
data=subset(hotTdat,HWtrt=="HW"))
hotTdat.m <- summaryBy(FluxCO2+PAR+TargTempC_Avg~DateTime_hr+T_treatment,FUN=c(mean,se),data=hotTdat.m1)
times <- subset(hotTdat.m,T_treatment=="ambient")$DateTime_hr # extract times for shadeNight
windows(60,100)
#par(mar=c(7,7,1,2),mfrow=c(2,1))
par(mar=c(2,2,1,2),oma=c(4,5,4,4),cex.lab=1.6,las=1,cex.axis=1.2)
layout(matrix(c(1,2), 2, 2, byrow = F),
widths=c(1,1), heights=c(1,2))
with(subset(hotTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,PAR.mean,type="l",ylim=c(0,2000),
xlab="",ylab="",
panel.first=shadeNight(times)))
title(ylab=expression(PPFD~(mu*mol~m^-2~s^-1)),line=3.5,xpd=NA)
par(new = T)
with(subset(hotTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,TargTempC_Avg.mean,
type="l",pch=16,xlab="",ylab="",col="red",ylim=c(0,45),axes=F))
axis(side=4,col="red",col.axis="red")
title(ylab=expression(T[l-IR]~(degree*C)),line=-26,xpd=NA,col.lab="red")
with(subset(hotTdat.m,T_treatment=="ambient"),plot(DateTime_hr+3600,FluxCO2.mean,
type="b",pch=16, col="black",ylim=c(-0.05,0.2),legend=F,ylab="",
panel.first=shadeNight(times)))
adderrorbars(x=subset(hotTdat.m,T_treatment=="ambient")$DateTime_hr+3600,
y=subset(hotTdat.m,T_treatment=="ambient")$FluxCO2.mean,
SE=subset(hotTdat.m,T_treatment=="ambient")$FluxCO2.se,direction="updown")
abline(h=0)
axis(side = 4)
title(ylab=expression(Net~CO[2]~flux~(mmol~CO[2]~s^-1)),line=3.5,xpd=NA)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-- Repeat the calculation of assimilation-weighted leaf temperature, but on a weekly timescale
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- calculate assimilation weighted leaf temperature
#- create weekly bins
combodat$bin4days <- as.factor(week(combodat$DateTime_hr))
combodat$weekfac <- factor(paste(combodat$chamber,combodat$bin4days,sep="-"))
#- calculate weighted average leaf temperatures, for each bin
combodat.list <- split(combodat,combodat$weekfac)
chamber <-meanAirT<- meanLeafT <- weightedMeanLeafT <- Date <- bin <- T_treatment <- VPD <- list()
for(i in 1:length(combodat.list)){
tocalc <- subset(combodat.list[[i]], PAR> 20 & FluxCO2>0)
# zero fill negative fluxes
tona <- which(tocalc$FluxCO2 < 0)
tocalc$FluxCO2[tona] <- 0
chamber[[i]] <- as.character(tocalc$chamber[1])
bin[[i]] <- as.character(tocalc$bin4days[1])
T_treatment[[i]] <- as.character(tocalc$T_treatment[1])
meanAirT[[i]] <- mean(tocalc$Tair_al)
meanLeafT[[i]] <- mean(tocalc$TargTempC_Avg)
VPD[[i]] <- mean(tocalc$VPD)
Date[[i]] <- as.Date(tocalc$DateTime_hr)[1]
weightedMeanLeafT[[i]] <- weighted.mean(tocalc$TargTempC_Avg,tocalc$FluxCO2)
}
#output_meanT <- data.frame(chamber=levels(fluxdat2$chamber),T_treatment=factor(rep(c("ambient","elevated"),6)))
output_meanT <- data.frame(bin4days=levels(combodat$weekfac))
output_meanT$chamber <- do.call(rbind,chamber)
output_meanT$T_treatment <- do.call(rbind,T_treatment)
output_meanT$bin <- do.call(rbind,bin)
output_meanT$Date <- as.Date(do.call(rbind,Date),origin="1970-01-01")
output_meanT$meanAirT <- do.call(rbind,meanAirT)
output_meanT$meanLeafT <- do.call(rbind,meanLeafT)
output_meanT$weightedMeanLeafT <- do.call(rbind,weightedMeanLeafT)
output_meanT$VPD <- do.call(rbind,VPD)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# SMATR statistical analysis of Tleaf vs. Tair
#sma1 <- sma(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment,
# data=output_meanT) # treats each observation as independent, which inflates the statistical power
#-------------
#- random effects ANCOVA for Tleaf vs. Tair. Some evidence that the warmed treatment had a lower slope
# but both slope 95% CI's included 1.0.
lme1 <- lmer(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
anova(lme1) # some evidence of difference in slope, but not terribly strong (p = 0.02)
confint(lme1)
modelout <- data.frame(summary(lme1)$coefficients)
ambCI <- c(modelout$Estimate[[2]]-modelout$Std..Error[[2]]*1.96,modelout$Estimate[[2]]+modelout$Std..Error[[2]]*1.96)
eleCI <- c((modelout$Estimate[[2]]+modelout$Estimate[[4]])-(modelout$Std..Error[[4]]*1.96),
(modelout$Estimate[[2]]+modelout$Estimate[[4]])+(modelout$Std..Error[[4]]*1.96))
lme1.test <- lmer(meanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme1.test2 <- lmer(meanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme1,lme1.test,lme1.test2) # simplest model is preferred
AIC(lme1,lme1.test,lme1.test2) # models have very similar AICs
confint(lme1.test2)
visreg(lme1,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme1.test,xvar="T_treatment")
#-------------
#- random effects ANCOVA for assimilation-weighted Tleaf vs. Tair
lme2 <- lmer(weightedMeanLeafT~T_treatment+meanAirT+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test <- lmer(weightedMeanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test2 <- lmer(weightedMeanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme2,lme2.test,lme2.test2)
AIC(lme2,lme2.test,lme2.test2) # simpler model is preferred from AIC and logLik bases
anova(lme2.test2) # some evidence of difference in slope, but not terribly strong (p = 0.05)
modelout2 <- data.frame(summary(lme2.test2)$coefficients)
confint(lme2.test2)
#ambCI <- c(modelout2$Estimate[[3]]-modelout2$Std..Error[[3]]*1.96,modelout2$Estimate[[3]]+modelout2$Std..Error[[3]]*1.96)
#eleCI <- c((modelout2$Estimate[[3]]+modelout2$Estimate[[4]])-(modelout2$Std..Error[[4]]*1.96),
# (modelout2$Estimate[[3]]+modelout2$Estimate[[4]])+(modelout2$Std..Error[[4]]*1.96))
visreg(lme2.test2,xvar="meanAirT",overlay=T)
visreg(lme2,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme2,xvar="T_treatment")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- set up plot of Tleaf vs. Tair and assimilation-weighted Tleaf vs. Tair
windows(100,75)
par(mar=c(7,7,1,2),mfrow=c(1,2))
palette(c("blue","red"))
pchs=3
cexs=0.5
#------
#- plot Tleaf vs. Tair
mindate <- min(output_meanT$Date,na.rm=T)#as.Date("2016-05-01")
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="",add=T)
# lmT <- lm(meanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lmT,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lmT)[[2]],2),sep=""),bty="n")
legend("bottomright",pch=c(pchs,pchs),col=c("blue","red"),legend=c("Ambient","Warmed"))
legend("topleft",legend=letters[1],cex=1.2,bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(T[leaf]~(degree*C)~(measured)),xpd=NA,cex.lab=2)
#------
#- plot assimilation-weighted Tleaf vs. Tair
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="",add=T)
# lm1 <- lm(weightedMeanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lm1,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lm1)[[2]],2),sep=""),bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(Assimilation~weighted~T[leaf]~(degree*C)),xpd=NA,cex.lab=2)
legend("topleft",legend=letters[2],cex=1.2,bty="n")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-- Repeat the calculation of assimilation-weighted leaf temperature, but on a monthly timescale
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- calculate assimilation weighted leaf temperature
#- create monthly bins
combodat$bin4days <- as.factor(month(combodat$DateTime_hr))
combodat <- subset(combodat,bin4days != "2") #extract a month with little data
combodat$weekfac <- factor(paste(combodat$chamber,combodat$bin4days,sep="-"))
#- calculate weighted average leaf temperatures, for each bin
combodat.list <- split(combodat,combodat$weekfac)
chamber <-meanAirT<- meanLeafT <- weightedMeanLeafT <- Date <- bin <- T_treatment <- VPD <- list()
for(i in 1:length(combodat.list)){
tocalc <- subset(combodat.list[[i]], PAR> 20 & FluxCO2>0)
# zero fill negative fluxes
tona <- which(tocalc$FluxCO2 < 0)
tocalc$FluxCO2[tona] <- 0
chamber[[i]] <- as.character(tocalc$chamber[1])
bin[[i]] <- as.character(tocalc$bin4days[1])
T_treatment[[i]] <- as.character(tocalc$T_treatment[1])
meanAirT[[i]] <- mean(tocalc$Tair_al)
meanLeafT[[i]] <- mean(tocalc$TargTempC_Avg)
VPD[[i]] <- mean(tocalc$VPD)
Date[[i]] <- as.Date(tocalc$DateTime_hr)[1]
weightedMeanLeafT[[i]] <- weighted.mean(tocalc$TargTempC_Avg,tocalc$FluxCO2)
}
#output_meanT <- data.frame(chamber=levels(fluxdat2$chamber),T_treatment=factor(rep(c("ambient","elevated"),6)))
output_meanT <- data.frame(bin4days=levels(combodat$weekfac))
output_meanT$chamber <- do.call(rbind,chamber)
output_meanT$T_treatment <- do.call(rbind,T_treatment)
output_meanT$bin <- do.call(rbind,bin)
output_meanT$Date <- as.Date(do.call(rbind,Date),origin="1970-01-01")
output_meanT$meanAirT <- do.call(rbind,meanAirT)
output_meanT$meanLeafT <- do.call(rbind,meanLeafT)
output_meanT$weightedMeanLeafT <- do.call(rbind,weightedMeanLeafT)
output_meanT$VPD <- do.call(rbind,VPD)
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
# SMATR statistical analysis of Tleaf vs. Tair
#sma1 <- sma(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment,
# data=output_meanT) # treats each observation as independent, which inflates the statistical power
#-------------
#- random effects ANCOVA for Tleaf vs. Tair. Some evidence that the warmed treatment had a lower slope
# but both slope 95% CI's included 1.0.
lme1 <- lmer(meanLeafT~meanAirT+T_treatment+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
anova(lme1) # some evidence of difference in slope, but not terribly strong (p = 0.02)
confint(lme1)
modelout <- data.frame(summary(lme1)$coefficients)
ambCI <- c(modelout$Estimate[[2]]-modelout$Std..Error[[2]]*1.96,modelout$Estimate[[2]]+modelout$Std..Error[[2]]*1.96)
eleCI <- c((modelout$Estimate[[2]]+modelout$Estimate[[4]])-(modelout$Std..Error[[4]]*1.96),
(modelout$Estimate[[2]]+modelout$Estimate[[4]])+(modelout$Std..Error[[4]]*1.96))
lme1.test <- lmer(meanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme1.test2 <- lmer(meanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme1,lme1.test,lme1.test2) # simplest model is preferred
AIC(lme1,lme1.test,lme1.test2) # models have very similar AICs
confint(lme1.test2)
visreg(lme1,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme1.test,xvar="T_treatment")
#-------------
#- random effects ANCOVA for assimilation-weighted Tleaf vs. Tair
lme2 <- lmer(weightedMeanLeafT~T_treatment+meanAirT+meanAirT:T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test <- lmer(weightedMeanLeafT~meanAirT+T_treatment+(meanAirT|chamber),
data=output_meanT)
lme2.test2 <- lmer(weightedMeanLeafT~meanAirT+(meanAirT|chamber),
data=output_meanT)
anova(lme2,lme2.test,lme2.test2)
AIC(lme2,lme2.test,lme2.test2) # simpler model is preferred from AIC and logLik bases
anova(lme2.test2) # some evidence of difference in slope, but not terribly strong (p = 0.05)
modelout2 <- data.frame(summary(lme2.test2)$coefficients)
confint(lme2.test2)
#ambCI <- c(modelout2$Estimate[[3]]-modelout2$Std..Error[[3]]*1.96,modelout2$Estimate[[3]]+modelout2$Std..Error[[3]]*1.96)
#eleCI <- c((modelout2$Estimate[[3]]+modelout2$Estimate[[4]])-(modelout2$Std..Error[[4]]*1.96),
# (modelout2$Estimate[[3]]+modelout2$Estimate[[4]])+(modelout2$Std..Error[[4]]*1.96))
visreg(lme2.test2,xvar="meanAirT",overlay=T)
visreg(lme2,xvar="meanAirT",by="T_treatment",overlay=T)
visreg(lme2,xvar="T_treatment")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
#- set up plot of Tleaf vs. Tair and assimilation-weighted Tleaf vs. Tair
windows(100,75)
par(mar=c(7,7,1,2),mfrow=c(1,2))
palette(c("blue","red"))
pchs=16
cexs=1
#------
#- plot Tleaf vs. Tair
mindate <- min(output_meanT$Date,na.rm=T)#as.Date("2016-05-01")
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme1,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
plotBy(meanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),
pch=pchs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,cex=cexs,
xlab="",ylab="",add=T)
# lmT <- lm(meanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lmT,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lmT)[[2]],2),sep=""),bty="n")
legend("bottomright",pch=c(pchs,pchs),col=c("blue","red"),legend=c("Ambient","Warmed"))
legend("topleft",legend=letters[1],cex=1.2,bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(T[leaf]~(degree*C)~(measured)),xpd=NA,cex.lab=2)
#------
#- plot assimilation-weighted Tleaf vs. Tair
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="")
abline(0,1,lty=2)
#- overlay mixed model predictions
xvar <- seq(from=min(output_meanT$meanAirT,na.rm=T),to=max(output_meanT$meanAirT,na.rm=T),length.out=101)
newdata <- expand.grid(T_treatment=c("ambient"),meanAirT=xvar,chamber="C01")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="blue",lwd=2)
lines(preds$upr~xvar,col="blue",lty=2,lwd=2)
lines(preds$lwr~xvar,col="blue",lty=2,lwd=2)
newdata <- expand.grid(T_treatment=c("elevated"),meanAirT=xvar,chamber="C02")
preds <- predictInterval(lme2,newdata=newdata)
lines(preds$fit~xvar,col="red",lwd=2)
lines(preds$upr~xvar,col="red",lty=2,lwd=2)
lines(preds$lwr~xvar,col="red",lty=2,lwd=2)
plotBy(weightedMeanLeafT~meanAirT|T_treatment,data=subset(output_meanT,Date>mindate),pch=pchs,cex=cexs,xlim=c(0,35),ylim=c(0,35),legend=F,axes=F,
xlab="",ylab="",add=T)
# lm1 <- lm(weightedMeanLeafT~meanAirT,data=subset(output_meanT,Date>mindate))
# abline(lm1,lty=2)
#legend("bottomright",paste("Slope = ",round(coef(lm1)[[2]],2),sep=""),bty="n")
magaxis(side=c(1:4),labels=c(1,1,0,1),las=1,ratio=0.25)
title(xlab=expression(T[air]~(degree*C)),xpd=NA,cex.lab=2)
title(ylab=expression(Assimilation~weighted~T[leaf]~(degree*C)),xpd=NA,cex.lab=2)
legend("topleft",legend=letters[2],cex=1.2,bty="n")
#-----------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------
|
#' Rozvodnice 28 povodí v ČR
#'
#' @format SpatialPolygonsDataFrame - 28 polygonů
"povodi"
#' Hydrometeorologická data pro 28 povodí v ČR
#'
#' @format data.table obsahující následující proměnné
#'
#' \describe{
#' \item{DBCN}{databankové číslo (identifikátor povodí) - pomocí něj je možné propojit s datasetem \link{povodi}}
#' \item{AREA}{plocha povodí v m2}
#' \item{DTM}{datum}
#' \item{Q}{průtok [m3/s]}
#' \item{P}{denní srážky [mm]}
#' \item{T}{denní teplota [st. C]}
#' \item{R}{denní odtok [mm]}
#' }
"hydrometeo"
#' Data z globálního klimatického modelu
#'
#' Změny srážek a teploty pro jednotlivé měsíce dle simulace CMIP5 model HadGEM-ESM2 podle RCP8.5 mezi obdobími 2070-2100 a 1970-2000
#' @name GCM
#' @format RasterBrick
NULL
#' Měsíční změny srážek
#' @rdname GCM
"gcm_pr_ch"
#' Měsíční změny teploty
#' @rdname GCM
"gcm_tas_ch"
|
/R/data.R
|
no_license
|
hanel/KZ2020
|
R
| false
| false
| 905
|
r
|
#' Rozvodnice 28 povodí v ČR
#'
#' @format SpatialPolygonsDataFrame - 28 polygonů
"povodi"
#' Hydrometeorologická data pro 28 povodí v ČR
#'
#' @format data.table obsahující následující proměnné
#'
#' \describe{
#' \item{DBCN}{databankové číslo (identifikátor povodí) - pomocí něj je možné propojit s datasetem \link{povodi}}
#' \item{AREA}{plocha povodí v m2}
#' \item{DTM}{datum}
#' \item{Q}{průtok [m3/s]}
#' \item{P}{denní srážky [mm]}
#' \item{T}{denní teplota [st. C]}
#' \item{R}{denní odtok [mm]}
#' }
"hydrometeo"
#' Data z globálního klimatického modelu
#'
#' Změny srážek a teploty pro jednotlivé měsíce dle simulace CMIP5 model HadGEM-ESM2 podle RCP8.5 mezi obdobími 2070-2100 a 1970-2000
#' @name GCM
#' @format RasterBrick
NULL
#' Měsíční změny srážek
#' @rdname GCM
"gcm_pr_ch"
#' Měsíční změny teploty
#' @rdname GCM
"gcm_tas_ch"
|
#' print methods of the tt objects
#'
#' In tidytuesdayR there are nice print methods for the objects that were used
#' to download and store the data from the TidyTuesday repo. They will always
#' print the available datasets/files. If there is a readme available,
#' it will try to display the tidytuesday readme.
#'
#' @name printing
#'
#' @inheritParams base::print
#' @param x a tt_data or tt object
#'
#' @examples
#'
#' \donttest{
#' if(interactive()){
#' tt <- tt_load_gh("2019-01-15")
#' print(tt)
#'
#' tt_data <- tt_download(tt, files = "All")
#' print(tt_data)
#' }
#' }
NULL
#' @rdname printing
#' @importFrom tools file_path_sans_ext
#' @export
#' @return used to show readme and list names of available datasets
#'
print.tt_data <- function(x, ...) {
readme(x)
message("Available datasets:\n\t",
paste(tools::file_path_sans_ext(names(x)), "\n\t", collapse = ""))
invisible(x)
}
#' @rdname printing
#' @importFrom tools file_path_sans_ext
#' @export
#' @return used to show available datasets for the tidytuesday
#'
print.tt <- function(x,...){
message(
"Available datasets in this TidyTuesday:\n\t",
paste(attr(x, ".files")$data_files, "\n\t", collapse = "")
)
invisible(x)
}
#' @title Readme HTML maker and Viewer
#' @param tt tt_data object for printing
#' @importFrom xml2 write_html
#' @return NULL
#' @export
#' @return Does not return anything. Used to show readme of the downloaded
#' tidytuesday dataset in the Viewer.
#' @examples
#' \donttest{
#' tt_output <- tt_load_gh("2019-01-15")
#' readme(tt_output)
#' }
readme <- function(tt) {
if ("tt_data" %in% class(tt)) {
tt <- attr(tt, ".tt")
}
if (length(attr(tt, ".readme")) > 0) {
xml2::write_html(attr(tt, ".readme"), file = tmpHTML <-
tempfile(fileext = ".html"))
# if running in rstudio, print out that
html_viewer(tmpHTML)
}
invisible(NULL)
}
#' @importFrom utils browseURL
#' @importFrom rstudioapi viewer isAvailable
#' @noRd
html_viewer <- function(url, is_interactive = interactive()){
if(!is_interactive){
invisible(NULL)
} else if (isAvailable()) {
viewer(url = url)
} else{
browseURL(url = url)
}
}
|
/R/utils.R
|
permissive
|
thecodemasterk/tidytuesdayR
|
R
| false
| false
| 2,191
|
r
|
#' print methods of the tt objects
#'
#' In tidytuesdayR there are nice print methods for the objects that were used
#' to download and store the data from the TidyTuesday repo. They will always
#' print the available datasets/files. If there is a readme available,
#' it will try to display the tidytuesday readme.
#'
#' @name printing
#'
#' @inheritParams base::print
#' @param x a tt_data or tt object
#'
#' @examples
#'
#' \donttest{
#' if(interactive()){
#' tt <- tt_load_gh("2019-01-15")
#' print(tt)
#'
#' tt_data <- tt_download(tt, files = "All")
#' print(tt_data)
#' }
#' }
NULL
#' @rdname printing
#' @importFrom tools file_path_sans_ext
#' @export
#' @return used to show readme and list names of available datasets
#'
print.tt_data <- function(x, ...) {
readme(x)
message("Available datasets:\n\t",
paste(tools::file_path_sans_ext(names(x)), "\n\t", collapse = ""))
invisible(x)
}
#' @rdname printing
#' @importFrom tools file_path_sans_ext
#' @export
#' @return used to show available datasets for the tidytuesday
#'
print.tt <- function(x,...){
message(
"Available datasets in this TidyTuesday:\n\t",
paste(attr(x, ".files")$data_files, "\n\t", collapse = "")
)
invisible(x)
}
#' @title Readme HTML maker and Viewer
#' @param tt tt_data object for printing
#' @importFrom xml2 write_html
#' @return NULL
#' @export
#' @return Does not return anything. Used to show readme of the downloaded
#' tidytuesday dataset in the Viewer.
#' @examples
#' \donttest{
#' tt_output <- tt_load_gh("2019-01-15")
#' readme(tt_output)
#' }
readme <- function(tt) {
if ("tt_data" %in% class(tt)) {
tt <- attr(tt, ".tt")
}
if (length(attr(tt, ".readme")) > 0) {
xml2::write_html(attr(tt, ".readme"), file = tmpHTML <-
tempfile(fileext = ".html"))
# if running in rstudio, print out that
html_viewer(tmpHTML)
}
invisible(NULL)
}
#' @importFrom utils browseURL
#' @importFrom rstudioapi viewer isAvailable
#' @noRd
html_viewer <- function(url, is_interactive = interactive()){
if(!is_interactive){
invisible(NULL)
} else if (isAvailable()) {
viewer(url = url)
} else{
browseURL(url = url)
}
}
|
# BACI-Chironomid
# 2014-11-28 CJS sf.autoplot.lmer
# 2014-11-26 CJS sink, ggplot, ##***, lmer modifications
# Taken from Krebs, Ecological Methodology, 2nd Edition. Box 10.3.
# Estimates of chironomid abundance in sediments were taken at one station
# above and below a pulp mill outflow pipe for 3 years before plant #operation
# and for 6 years after plant operation.
options(useFancyQuotes=FALSE) # renders summary output corrects
library(ggplot2)
library(lsmeans)
library(lmerTest)
library(plyr)
source("../../schwarz.functions.r")
# Read in the actual data
sink("baci-chironomid-R-001.txt", split=TRUE)
##***part001b;
cat(" BACI design measuring chironomid counts with multiple (paired) yearly measurements before/after \n\n")
chironomid <- read.csv("baci-chironomid.csv", header=TRUE, as.is=TRUE, strip.white=TRUE)
cat("Listing of part of the raw data \n")
head(chironomid)
##***part001e;
sink()
# The data is NOT in the usual format where there is only one column
# for the response and a separate column indicating if it is a control or
# impact site. We need to restructure the data
sink('baci-chironomid-R-301.txt', split=TRUE)
##***part301b;
# We reshape the data from wide to long format
chironomid.long <- reshape(chironomid, varying=c("Control.Site","Treatment.Site"),
v.names="Count", direction="long",
timevar=c("SiteClass"),
times=c("Control","Impact"),
drop=c("diff"), idvar=c("Year"))
chironomid.long$SiteClass <- factor(chironomid.long$SiteClass)
chironomid.long$Site <- factor(chironomid.long$Site)
chironomid.long$YearF <- factor(chironomid.long$Year)
chironomid.long$Period <- factor(chironomid.long$Period)
head(chironomid.long)
##***part301e;
sink()
str(chironomid.long)
# Get plot of series over time
##***part010b;
prelimplot <- ggplot(data=chironomid.long,
aes(x=Year, y=Count, group=Site, color=SiteClass, shape=Site))+
ggtitle("Fish counts over time")+
geom_point()+
geom_line()+
geom_vline(xintercept=-0.5+min(chironomid.long$Year[as.character(chironomid.long$Period) == "After"]))
prelimplot
##***part010e;
ggsave(plot=prelimplot, file="baci-chironomid-R-010.png", h=4, w=6, units="in", dpi=300)
# There are several ways in which this BACI design can be analyzed.
###########################################################################
# Do a t-test on the differces of the averages for each site
# Because only one measurement was taken at each site in each year, we
# don't have to first average. We can use the wide format data.
sink('baci-chironomid-R-101.txt', split=TRUE)
##***part101b;
chironomid$diff <- chironomid$Treatment.Site - chironomid$Control.Site
head(chironomid)
##***part101e;
sink()
# Plot the difference over time
##***part102b;
plotdiff <- ggplot(data=chironomid, aes(x=Year, y=diff))+
ggtitle("Plot of differences over time")+
ylab("Difference (Impact-Control)")+
geom_point()+
geom_line()+
geom_vline(xintercept=-0.5+min(chironomid$Year[as.character(chironomid$Period) == "After"]))
plotdiff
##***part102e;
ggsave(plot=plotdiff, file="baci-chironomid-R-102.png", h=4, w=6, units="in", dpi=300)
# do the two sample t-test not assuming equal variances
sink('baci-chironomid-R-104.txt', split=TRUE)
##***part104b;
result <- try(t.test(diff ~ Period, data=chironomid),silent=TRUE)
if(class(result)=="try-error")
{cat("Unable to do unequal variance t-test because of small sample size\n")} else
{
result$diff.in.means <- sum(result$estimate*c(1,-1))
names(result$diff.in.means)<- "diff.in.means"
result$se.diff <- result$statistic / abs(result$diff.in.means)
names(result$se.diff) <- 'SE.diff'
print(result)
print(result$diff.in.means)
print(result$se.diff)
}
##***part104e;
sink()
# do the two sample t-test assuming equal variances
sink('baci-chironomid-R-105.txt', split=TRUE)
##***part105b;
result <- t.test(diff ~ Period, data=chironomid, var.equal=TRUE)
result$diff.in.means <- sum(result$estimate*c(1,-1))
names(result$diff.in.means)<- "diff.in.means"
result$se.diff <- result$statistic / abs(result$diff.in.means)
names(result$se.diff) <- 'SE.diff'
result
result$diff.in.means
result$se.diff
##***part105e;
sink()
# do the two sample Wilcoxon test
sink('baci-chironomid-R-107.txt', split=TRUE)
##***part107b;
result <- wilcox.test(diff ~ Period, data=chironomid, conf.int=TRUE)
result
##***part107e;
sink()
##################################################################
# Do a Mixed effect linear model on the individual values
sink('baci-chironomid-R-300-type3.txt', split=TRUE)
##***part300b;
# Because there is ONLY one measurement per year, the SamplingTime*Site and
# residual variance are total confounded and cannot be separated. This is
# the residual term.
result.lmer <- lmer(Count ~ SiteClass+Period+SiteClass:Period + (1|YearF),
data=chironomid.long)
anova(result.lmer, ddf="Kenward-Roger")
##***part300e;
sink()
summary(result.lmer)
sink('baci-chironomid-R-300-vc.txt', split=TRUE)
##***part300vcb;
# Extract the variance components
vc <- VarCorr(result.lmer)
vc
##***part300vce;
sink()
# LSmeans after a lm() fit
sink('baci-chironomid-R-s300LSM-SiteClass.txt', split=TRUE)
##***parts300LSM-SiteClassb;
result.lmer.lsmo.S <- lsmeans::lsmeans(result.lmer, ~SiteClass)
cat("\n\n Estimated marginal means for SiteClass \n\n")
summary(result.lmer.lsmo.S)
##***parts300LSM-SiteClasse;
sink()
sink('baci-chironomid-R-300LSM-Period.txt', split=TRUE)
##***part300LSM-Periodb;
result.lmer.lsmo.P <- lsmeans::lsmeans(result.lmer, ~Period)
cat("\n\n Estimated marginal means \n\n")
summary(result.lmer.lsmo.P)
##***part300LSM-Periode;
sink()
sink('baci-chironomid-R-300LSM-int.txt', split=TRUE)
##***part300LSM-intb;
result.lmer.lsmo.SP <- lsmeans::lsmeans(result.lmer, ~SiteClass:Period)
cat("\n\n Estimated marginal means \n\n")
summary(result.lmer.lsmo.SP)
##***part300LSM-inte;
sink()
# Estimate the BACI contrast
# You could look at the entry in the summary table from the model fit, but
# this is dangerous as these entries depend on the contrast matrix.
# It is far safer to the contrast function applied to an lsmeans object
temp <- summary(result.lmer)$coefficients # get all the coefficients
temp[grepl("SiteClass",rownames(temp)) & grepl("Period", rownames(temp)),]
sink("baci-chironomid-R-300baci.txt", split=TRUE)
##***part300bacib;
# Estimate the BACI contrast along with a se
contrast(result.lmer.lsmo.SP, list(baci=c(1,-1,-1,1)))
confint(contrast(result.lmer.lsmo.SP, list(baci=c(1,-1,-1,1))))
##***part300bacie;
sink()
# Check the residuals etc
##***part300diagnosticb;
diagplot <- sf.autoplot.lmer(result.lmer)
diagplot
##***part300diagnostice;
ggsave(plot=diagplot, file='baci-chironomid-R-300-diagnostic.png',
h=4, w=6, units="in", dpi=300)
|
/Sampling_Regression_Experiment_Design_and_Analysis/baci-chironomid.r
|
no_license
|
burakbayramli/books
|
R
| false
| false
| 7,078
|
r
|
# BACI-Chironomid
# 2014-11-28 CJS sf.autoplot.lmer
# 2014-11-26 CJS sink, ggplot, ##***, lmer modifications
# Taken from Krebs, Ecological Methodology, 2nd Edition. Box 10.3.
# Estimates of chironomid abundance in sediments were taken at one station
# above and below a pulp mill outflow pipe for 3 years before plant #operation
# and for 6 years after plant operation.
options(useFancyQuotes=FALSE) # renders summary output corrects
library(ggplot2)
library(lsmeans)
library(lmerTest)
library(plyr)
source("../../schwarz.functions.r")
# Read in the actual data
sink("baci-chironomid-R-001.txt", split=TRUE)
##***part001b;
cat(" BACI design measuring chironomid counts with multiple (paired) yearly measurements before/after \n\n")
chironomid <- read.csv("baci-chironomid.csv", header=TRUE, as.is=TRUE, strip.white=TRUE)
cat("Listing of part of the raw data \n")
head(chironomid)
##***part001e;
sink()
# The data is NOT in the usual format where there is only one column
# for the response and a separate column indicating if it is a control or
# impact site. We need to restructure the data
sink('baci-chironomid-R-301.txt', split=TRUE)
##***part301b;
# We reshape the data from wide to long format
chironomid.long <- reshape(chironomid, varying=c("Control.Site","Treatment.Site"),
v.names="Count", direction="long",
timevar=c("SiteClass"),
times=c("Control","Impact"),
drop=c("diff"), idvar=c("Year"))
chironomid.long$SiteClass <- factor(chironomid.long$SiteClass)
chironomid.long$Site <- factor(chironomid.long$Site)
chironomid.long$YearF <- factor(chironomid.long$Year)
chironomid.long$Period <- factor(chironomid.long$Period)
head(chironomid.long)
##***part301e;
sink()
str(chironomid.long)
# Get plot of series over time
##***part010b;
prelimplot <- ggplot(data=chironomid.long,
aes(x=Year, y=Count, group=Site, color=SiteClass, shape=Site))+
ggtitle("Fish counts over time")+
geom_point()+
geom_line()+
geom_vline(xintercept=-0.5+min(chironomid.long$Year[as.character(chironomid.long$Period) == "After"]))
prelimplot
##***part010e;
ggsave(plot=prelimplot, file="baci-chironomid-R-010.png", h=4, w=6, units="in", dpi=300)
# There are several ways in which this BACI design can be analyzed.
###########################################################################
# Do a t-test on the differces of the averages for each site
# Because only one measurement was taken at each site in each year, we
# don't have to first average. We can use the wide format data.
sink('baci-chironomid-R-101.txt', split=TRUE)
##***part101b;
chironomid$diff <- chironomid$Treatment.Site - chironomid$Control.Site
head(chironomid)
##***part101e;
sink()
# Plot the difference over time
##***part102b;
plotdiff <- ggplot(data=chironomid, aes(x=Year, y=diff))+
ggtitle("Plot of differences over time")+
ylab("Difference (Impact-Control)")+
geom_point()+
geom_line()+
geom_vline(xintercept=-0.5+min(chironomid$Year[as.character(chironomid$Period) == "After"]))
plotdiff
##***part102e;
ggsave(plot=plotdiff, file="baci-chironomid-R-102.png", h=4, w=6, units="in", dpi=300)
# do the two sample t-test not assuming equal variances
sink('baci-chironomid-R-104.txt', split=TRUE)
##***part104b;
result <- try(t.test(diff ~ Period, data=chironomid),silent=TRUE)
if(class(result)=="try-error")
{cat("Unable to do unequal variance t-test because of small sample size\n")} else
{
result$diff.in.means <- sum(result$estimate*c(1,-1))
names(result$diff.in.means)<- "diff.in.means"
result$se.diff <- result$statistic / abs(result$diff.in.means)
names(result$se.diff) <- 'SE.diff'
print(result)
print(result$diff.in.means)
print(result$se.diff)
}
##***part104e;
sink()
# do the two sample t-test assuming equal variances
sink('baci-chironomid-R-105.txt', split=TRUE)
##***part105b;
result <- t.test(diff ~ Period, data=chironomid, var.equal=TRUE)
result$diff.in.means <- sum(result$estimate*c(1,-1))
names(result$diff.in.means)<- "diff.in.means"
result$se.diff <- result$statistic / abs(result$diff.in.means)
names(result$se.diff) <- 'SE.diff'
result
result$diff.in.means
result$se.diff
##***part105e;
sink()
# do the two sample Wilcoxon test
sink('baci-chironomid-R-107.txt', split=TRUE)
##***part107b;
result <- wilcox.test(diff ~ Period, data=chironomid, conf.int=TRUE)
result
##***part107e;
sink()
##################################################################
# Do a Mixed effect linear model on the individual values
sink('baci-chironomid-R-300-type3.txt', split=TRUE)
##***part300b;
# Because there is ONLY one measurement per year, the SamplingTime*Site and
# residual variance are total confounded and cannot be separated. This is
# the residual term.
result.lmer <- lmer(Count ~ SiteClass+Period+SiteClass:Period + (1|YearF),
data=chironomid.long)
anova(result.lmer, ddf="Kenward-Roger")
##***part300e;
sink()
summary(result.lmer)
sink('baci-chironomid-R-300-vc.txt', split=TRUE)
##***part300vcb;
# Extract the variance components
vc <- VarCorr(result.lmer)
vc
##***part300vce;
sink()
# LSmeans after a lm() fit
sink('baci-chironomid-R-s300LSM-SiteClass.txt', split=TRUE)
##***parts300LSM-SiteClassb;
result.lmer.lsmo.S <- lsmeans::lsmeans(result.lmer, ~SiteClass)
cat("\n\n Estimated marginal means for SiteClass \n\n")
summary(result.lmer.lsmo.S)
##***parts300LSM-SiteClasse;
sink()
sink('baci-chironomid-R-300LSM-Period.txt', split=TRUE)
##***part300LSM-Periodb;
result.lmer.lsmo.P <- lsmeans::lsmeans(result.lmer, ~Period)
cat("\n\n Estimated marginal means \n\n")
summary(result.lmer.lsmo.P)
##***part300LSM-Periode;
sink()
sink('baci-chironomid-R-300LSM-int.txt', split=TRUE)
##***part300LSM-intb;
result.lmer.lsmo.SP <- lsmeans::lsmeans(result.lmer, ~SiteClass:Period)
cat("\n\n Estimated marginal means \n\n")
summary(result.lmer.lsmo.SP)
##***part300LSM-inte;
sink()
# Estimate the BACI contrast
# You could look at the entry in the summary table from the model fit, but
# this is dangerous as these entries depend on the contrast matrix.
# It is far safer to the contrast function applied to an lsmeans object
temp <- summary(result.lmer)$coefficients # get all the coefficients
temp[grepl("SiteClass",rownames(temp)) & grepl("Period", rownames(temp)),]
sink("baci-chironomid-R-300baci.txt", split=TRUE)
##***part300bacib;
# Estimate the BACI contrast along with a se
contrast(result.lmer.lsmo.SP, list(baci=c(1,-1,-1,1)))
confint(contrast(result.lmer.lsmo.SP, list(baci=c(1,-1,-1,1))))
##***part300bacie;
sink()
# Check the residuals etc
##***part300diagnosticb;
diagplot <- sf.autoplot.lmer(result.lmer)
diagplot
##***part300diagnostice;
ggsave(plot=diagplot, file='baci-chironomid-R-300-diagnostic.png',
h=4, w=6, units="in", dpi=300)
|
#'@title distance
#'@description
#'The distance function computes the string distances between main and subtypes of meteroites.
#'For measuring the distance between to strings the Jaro-Winker distance is used.
#'A value of 1 means there is no similarity between two strings.
#'A value of 0 means the similarity between two strings is 100%.
#'This means that a low value indicates a possible similarity, but this does not mean that two objects are from the same main type.
#'@usage
#'distance()
#'@return
#'Returns a matrix with the string distances between main and subtypes of meteroites.
#'@export
distance <- function(){
# takes the meteroites Data
mData <- meteroitesapi()
#Vector with the main types of meteroites
classMeteorites <- c("CM", "CO", "CI", "CR",
"CV", "Diagonite", "EH", "EL",
"Eucrite", "Acapulcoite", "Achondrite", "Angrite",
"Aubrite", "H", "Iron", "L", "Martian", "Mesosiderite")
# Vector with the types of registered meteroites from the downloaded data
vectorClass <- c(mData$recclass)
# Computation of the distance matrix between main and subtypes
distanceMatrix <- stringdist::stringdistmatrix(classMeteorites, vectorClass, method = "jw", useNames = TRUE)
# Return the distance matrix
return(distanceMatrix)
}
|
/R/Distance.R
|
permissive
|
Oviing/meteroites2
|
R
| false
| false
| 1,329
|
r
|
#'@title distance
#'@description
#'The distance function computes the string distances between main and subtypes of meteroites.
#'For measuring the distance between to strings the Jaro-Winker distance is used.
#'A value of 1 means there is no similarity between two strings.
#'A value of 0 means the similarity between two strings is 100%.
#'This means that a low value indicates a possible similarity, but this does not mean that two objects are from the same main type.
#'@usage
#'distance()
#'@return
#'Returns a matrix with the string distances between main and subtypes of meteroites.
#'@export
distance <- function(){
# takes the meteroites Data
mData <- meteroitesapi()
#Vector with the main types of meteroites
classMeteorites <- c("CM", "CO", "CI", "CR",
"CV", "Diagonite", "EH", "EL",
"Eucrite", "Acapulcoite", "Achondrite", "Angrite",
"Aubrite", "H", "Iron", "L", "Martian", "Mesosiderite")
# Vector with the types of registered meteroites from the downloaded data
vectorClass <- c(mData$recclass)
# Computation of the distance matrix between main and subtypes
distanceMatrix <- stringdist::stringdistmatrix(classMeteorites, vectorClass, method = "jw", useNames = TRUE)
# Return the distance matrix
return(distanceMatrix)
}
|
##' kfadvance function
##'
##' A function to
##'
##' @param obs X
##' @param oldmean X
##' @param oldvar X
##' @param A X
##' @param B X
##' @param C X
##' @param D X
##' @param E X
##' @param F X
##' @param W X
##' @param V X
##' @param marglik X
##' @param log X
##' @param na.rm X
##' @return ...
##' @export
kfadvance <- function (obs, oldmean, oldvar, A, B, C, D, E, F, W, V, marglik = FALSE,log = TRUE, na.rm = FALSE){
if (na.rm) {
if (any(is.na(obs))) {
if (all(is.na(obs))) {
if (log) {
return(list(mean = A %*% oldmean + B, var = A %*%
oldvar %*% t(A) + C %*% W %*% t(C), mlik = 0))
}
else {
return(list(mean = A %*% oldmean + B, var = A %*%
oldvar %*% t(A) + C %*% W %*% t(C), mlik = 1))
}
}
else {
M <- diag(length(obs))
M <- M[-which(is.na(obs)), ]
obs <- obs[which(!is.na(obs))]
D <- M %*% D
E <- M %*% E
F <- M %*% F
}
}
}
T <- A %*% oldmean + B
S <- A %*% oldvar %*% t(A) + C %*% W %*% t(C)
thing1 <- D %*% S
tD <- t(D)
K <- thing1 %*% tD + F %*% V %*% t(F)
margmean <- D %*% T + E
resid <- obs - margmean
if (marglik == TRUE) {
if (all(dim(K) == 1)) {
thing2 <- S %*% tD
newmean <- T + as.numeric(1/K) * thing2 %*% resid
newvar <- S - as.numeric(1/K) * thing2 %*% thing1
marginal <- dnorm(obs, as.numeric(margmean), sqrt(as.numeric(K)),
log = log)
}
else {
Kchol <- chol(K)
Kcholinv <- solve(Kchol)
logdetK <- 2*sum(log(diag(Kchol)))
Kinv <- Kcholinv%*%t(Kcholinv)
#Kinv <- solve(K)
thing3 <- tD %*% Kinv
thing4 <- S %*% thing3
newmean <- T + thing4 %*% resid
newvar <- S - thing4 %*% thing1
#marginal <- -(1/2)*determinant(K)$modulus + (-1/2) * t(resid) %*% Kinv %*% resid
marginal <- -(1/2)*logdetK + (-1/2) * t(resid) %*% Kinv %*% resid
#marginal <- dmvnorm(as.vector(obs),as.vector(margmean),K,log=TRUE)
if (!log) {
marginal <- exp(marginal)
}
}
return(list(mean = newmean, var = newvar, mlik = marginal))
}
else {
if (all(dim(K) == 1)) {
thing2 <- S %*% tD
newmean <- T + as.numeric(1/K) * thing2 %*% resid
newvar <- S - as.numeric(1/K) * thing2 %*% thing1
}
else {
#Kinv <- solve(K)
Kchol <- chol(K)
Kcholinv <- solve(Kchol)
#logdetK <- 2*sum(log(diag(Kchol)))
Kinv <- Kcholinv%*%t(Kcholinv)
thing3 <- tD %*% Kinv
thing4 <- S %*% thing3
newmean <- T + thing4 %*% resid
newvar <- S - thing4 %*% thing1
}
return(list(mean = newmean, var = newvar))
}
}
|
/R/kfadvance.R
|
no_license
|
bentaylor1/kalmanST
|
R
| false
| false
| 3,117
|
r
|
##' kfadvance function
##'
##' A function to
##'
##' @param obs X
##' @param oldmean X
##' @param oldvar X
##' @param A X
##' @param B X
##' @param C X
##' @param D X
##' @param E X
##' @param F X
##' @param W X
##' @param V X
##' @param marglik X
##' @param log X
##' @param na.rm X
##' @return ...
##' @export
kfadvance <- function (obs, oldmean, oldvar, A, B, C, D, E, F, W, V, marglik = FALSE,log = TRUE, na.rm = FALSE){
if (na.rm) {
if (any(is.na(obs))) {
if (all(is.na(obs))) {
if (log) {
return(list(mean = A %*% oldmean + B, var = A %*%
oldvar %*% t(A) + C %*% W %*% t(C), mlik = 0))
}
else {
return(list(mean = A %*% oldmean + B, var = A %*%
oldvar %*% t(A) + C %*% W %*% t(C), mlik = 1))
}
}
else {
M <- diag(length(obs))
M <- M[-which(is.na(obs)), ]
obs <- obs[which(!is.na(obs))]
D <- M %*% D
E <- M %*% E
F <- M %*% F
}
}
}
T <- A %*% oldmean + B
S <- A %*% oldvar %*% t(A) + C %*% W %*% t(C)
thing1 <- D %*% S
tD <- t(D)
K <- thing1 %*% tD + F %*% V %*% t(F)
margmean <- D %*% T + E
resid <- obs - margmean
if (marglik == TRUE) {
if (all(dim(K) == 1)) {
thing2 <- S %*% tD
newmean <- T + as.numeric(1/K) * thing2 %*% resid
newvar <- S - as.numeric(1/K) * thing2 %*% thing1
marginal <- dnorm(obs, as.numeric(margmean), sqrt(as.numeric(K)),
log = log)
}
else {
Kchol <- chol(K)
Kcholinv <- solve(Kchol)
logdetK <- 2*sum(log(diag(Kchol)))
Kinv <- Kcholinv%*%t(Kcholinv)
#Kinv <- solve(K)
thing3 <- tD %*% Kinv
thing4 <- S %*% thing3
newmean <- T + thing4 %*% resid
newvar <- S - thing4 %*% thing1
#marginal <- -(1/2)*determinant(K)$modulus + (-1/2) * t(resid) %*% Kinv %*% resid
marginal <- -(1/2)*logdetK + (-1/2) * t(resid) %*% Kinv %*% resid
#marginal <- dmvnorm(as.vector(obs),as.vector(margmean),K,log=TRUE)
if (!log) {
marginal <- exp(marginal)
}
}
return(list(mean = newmean, var = newvar, mlik = marginal))
}
else {
if (all(dim(K) == 1)) {
thing2 <- S %*% tD
newmean <- T + as.numeric(1/K) * thing2 %*% resid
newvar <- S - as.numeric(1/K) * thing2 %*% thing1
}
else {
#Kinv <- solve(K)
Kchol <- chol(K)
Kcholinv <- solve(Kchol)
#logdetK <- 2*sum(log(diag(Kchol)))
Kinv <- Kcholinv%*%t(Kcholinv)
thing3 <- tD %*% Kinv
thing4 <- S %*% thing3
newmean <- T + thing4 %*% resid
newvar <- S - thing4 %*% thing1
}
return(list(mean = newmean, var = newvar))
}
}
|
# Set up ------------------------------------------------------------------
library(ggplot2)
library(purrr)
library(sf)
outfolder <- "04-garabato-pictures/"
if (!dir.exists(outfolder)) dir.create(outfolder)
# Functions ---------------------------------------------------------------
rotate <- function(a) matrix(c(cos(a), sin(a), -sin(a), cos(a)), 2, 2)
rad <- function(degree) degree / 360 * 2 * pi
garabato <- function(f, trazos, ..., seed = NULL) {
set.seed(seed)
args <- map(list(...), ~ rep(.x, each = 2))
args$n <- trazos * 2
output <- do.call(f, args) %>%
matrix(ncol = 2, byrow = TRUE)
(output %*% rotate(rad(45))) %>%
st_linestring()
}
custom_plot <- function(sf_obj) {
sf_obj %>%
ggplot() +
geom_sf(color = "steelblue", size = 0.5, alpha = 0.5, fill = "#FDD103") +
theme_void(base_family = "Avenir Next Condensed") +
theme(plot.background = element_rect(fill = "antiquewhite", color = "antiquewhite"),
plot.title = element_text(hjust = 0.5))
}
# Drawings ----------------------------------------------------------------
garabato(rnorm, trazos = 200, mean = 1:200, sd = sqrt(1:200), seed = 123) %>%
st_cast("MULTIPOLYGON") %>%
custom_plot() +
ggtitle("normal distribution with increasing center and scale")
ggsave(str_glue("{outfolder}pic-1.png"), device = "png", dpi = "print", bg = "antiquewhite")
garabato(rnorm, trazos = 200, mean = 0, sd = 1, seed = 123) %>%
st_cast("MULTIPOLYGON") %>%
custom_plot() +
ggtitle("standard normal distribution")
ggsave(str_glue("{outfolder}pic-2.png"), device = "png", dpi = "print", bg = "antiquewhite")
garabato(rnorm, 200, mean = c(rep(1, 100), rep(10, 100)), sd = 2, seed = 123) %>%
st_cast("MULTIPOLYGON") %>%
custom_plot() +
ggtitle("mixture of two normals")
ggsave(str_glue("{outfolder}pic-3.png"), device = "png", dpi = "print", bg = "antiquewhite")
garabato(runif, 200, min = log(1:100), max = 1:100) %>%
st_cast("MULTIPOLYGON") %>%
custom_plot() +
ggtitle("not a uniform distribution")
ggsave(str_glue("{outfolder}pic-4.png"), device = "png", dpi = "print", bg = "antiquewhite")
|
/04-garabato-pictures.R
|
no_license
|
acastroaraujo/visualization
|
R
| false
| false
| 2,144
|
r
|
# Set up ------------------------------------------------------------------
library(ggplot2)
library(purrr)
library(sf)
outfolder <- "04-garabato-pictures/"
if (!dir.exists(outfolder)) dir.create(outfolder)
# Functions ---------------------------------------------------------------
rotate <- function(a) matrix(c(cos(a), sin(a), -sin(a), cos(a)), 2, 2)
rad <- function(degree) degree / 360 * 2 * pi
garabato <- function(f, trazos, ..., seed = NULL) {
set.seed(seed)
args <- map(list(...), ~ rep(.x, each = 2))
args$n <- trazos * 2
output <- do.call(f, args) %>%
matrix(ncol = 2, byrow = TRUE)
(output %*% rotate(rad(45))) %>%
st_linestring()
}
custom_plot <- function(sf_obj) {
sf_obj %>%
ggplot() +
geom_sf(color = "steelblue", size = 0.5, alpha = 0.5, fill = "#FDD103") +
theme_void(base_family = "Avenir Next Condensed") +
theme(plot.background = element_rect(fill = "antiquewhite", color = "antiquewhite"),
plot.title = element_text(hjust = 0.5))
}
# Drawings ----------------------------------------------------------------
garabato(rnorm, trazos = 200, mean = 1:200, sd = sqrt(1:200), seed = 123) %>%
st_cast("MULTIPOLYGON") %>%
custom_plot() +
ggtitle("normal distribution with increasing center and scale")
ggsave(str_glue("{outfolder}pic-1.png"), device = "png", dpi = "print", bg = "antiquewhite")
garabato(rnorm, trazos = 200, mean = 0, sd = 1, seed = 123) %>%
st_cast("MULTIPOLYGON") %>%
custom_plot() +
ggtitle("standard normal distribution")
ggsave(str_glue("{outfolder}pic-2.png"), device = "png", dpi = "print", bg = "antiquewhite")
garabato(rnorm, 200, mean = c(rep(1, 100), rep(10, 100)), sd = 2, seed = 123) %>%
st_cast("MULTIPOLYGON") %>%
custom_plot() +
ggtitle("mixture of two normals")
ggsave(str_glue("{outfolder}pic-3.png"), device = "png", dpi = "print", bg = "antiquewhite")
garabato(runif, 200, min = log(1:100), max = 1:100) %>%
st_cast("MULTIPOLYGON") %>%
custom_plot() +
ggtitle("not a uniform distribution")
ggsave(str_glue("{outfolder}pic-4.png"), device = "png", dpi = "print", bg = "antiquewhite")
|
.http.request = function(path, query, body, headers) {
if (path == "/" || path == "" || path == "/user") {
resp_body = ""
} else {
match_info <- regexec("^/user/(.*)", path)
resp_body <- regmatches(path, match_info)[[1]][2]
}
status_code = 200L
resp_headers = character(0)
content_type = "text/plain"
list(
resp_body,
content_type,
resp_headers,
status_code
)
}
Rserve::run.Rserve(http.port = 3000)
|
/r/rserve/app.R
|
permissive
|
the-benchmarker/web-frameworks
|
R
| false
| false
| 444
|
r
|
.http.request = function(path, query, body, headers) {
if (path == "/" || path == "" || path == "/user") {
resp_body = ""
} else {
match_info <- regexec("^/user/(.*)", path)
resp_body <- regmatches(path, match_info)[[1]][2]
}
status_code = 200L
resp_headers = character(0)
content_type = "text/plain"
list(
resp_body,
content_type,
resp_headers,
status_code
)
}
Rserve::run.Rserve(http.port = 3000)
|
setwd("C:/Users/sli126/Documents/GitHub/R_Programming")
data<-read.csv("hw1_data.csv")
$11
names(data)
#12
data[1:2,]
#13
attributes(data)
#14
data[152:153,]
tail(data,n=2)
#15
data[47,]$Ozone
#16
sum(as.numeric(is.na(data$Ozone)))
#17
miss<-is.na(data$Ozone)
nmiss<-data$Ozone[!miss]
mean(nmiss)
#18
m<-data[Ozone>31 & Temp>90,]$Solar.R
mean(m[!is.na(m)])
#19
m6<-data[Month==6,]$Temp
mean(m6[!is.na(m6)])
#20
m5<-data[Month==5,]$Ozone
max(m5[!is.na(m5)])
|
/quiz1.R
|
no_license
|
hcydlee/R_Programming
|
R
| false
| false
| 459
|
r
|
setwd("C:/Users/sli126/Documents/GitHub/R_Programming")
data<-read.csv("hw1_data.csv")
$11
names(data)
#12
data[1:2,]
#13
attributes(data)
#14
data[152:153,]
tail(data,n=2)
#15
data[47,]$Ozone
#16
sum(as.numeric(is.na(data$Ozone)))
#17
miss<-is.na(data$Ozone)
nmiss<-data$Ozone[!miss]
mean(nmiss)
#18
m<-data[Ozone>31 & Temp>90,]$Solar.R
mean(m[!is.na(m)])
#19
m6<-data[Month==6,]$Temp
mean(m6[!is.na(m6)])
#20
m5<-data[Month==5,]$Ozone
max(m5[!is.na(m5)])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.