content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
## raster_functions.R
## Raster helper functions.
## Author: Tim Raupach <timothy.raupach@giub.unibe.ch>
require(raster)
require(data.table)
require(rgdal)
require(rgeos)
require(sp)
cellToRaster = function(cell, proj) {
## Convert cell points to a raster object.
##
## Args:
## cell: data.table, must contain x, y, and coordinates must be regularly spaced.
## proj: The projection for x and y coordinates.
##
## Returns: a raster object for the cell.
stopifnot(c("x","y") %in% names(cell))
## Set projection.
coordinates(cell) = ~x+y
proj4string(cell) = proj
## Set gridded to TRUE, and print any warning except a warning about empty
## columns/rows.
cell = tryCatch({
gridded(cell) <- TRUE
cell },
warning = function(w) {
if(!str_detect(as.character(w), "grid has empty column/rows"))
warning(as.character(w))
suppressWarnings(gridded(cell) <- TRUE)
return(cell)
})
stopifnot(gridded(cell) == TRUE)
## Convert the cell to a raster.
cell = raster(cell)
return(cell)
}
| /code/R/library/raster_functions.R | permissive | traupach/stormtrack | R | false | false | 1,147 | r | ## raster_functions.R
## Raster helper functions.
## Author: Tim Raupach <timothy.raupach@giub.unibe.ch>
require(raster)
require(data.table)
require(rgdal)
require(rgeos)
require(sp)
cellToRaster = function(cell, proj) {
## Convert cell points to a raster object.
##
## Args:
## cell: data.table, must contain x, y, and coordinates must be regularly spaced.
## proj: The projection for x and y coordinates.
##
## Returns: a raster object for the cell.
stopifnot(c("x","y") %in% names(cell))
## Set projection.
coordinates(cell) = ~x+y
proj4string(cell) = proj
## Set gridded to TRUE, and print any warning except a warning about empty
## columns/rows.
cell = tryCatch({
gridded(cell) <- TRUE
cell },
warning = function(w) {
if(!str_detect(as.character(w), "grid has empty column/rows"))
warning(as.character(w))
suppressWarnings(gridded(cell) <- TRUE)
return(cell)
})
stopifnot(gridded(cell) == TRUE)
## Convert the cell to a raster.
cell = raster(cell)
return(cell)
}
|
# Generic function setP, set Parameter
setGeneric("setP",
function(object, ...) { standardGeneric("setP")} )
# Generic function getP, get Parameter
setGeneric("getP",
function(object, ...) { standardGeneric("getP")} )
#Methods for signature x12Parameter
setMethod(
f='setP',
signature=signature(object = "x12Parameter"),
definition=function(object, listP) {
paras <- c(
#"period",
"series.span",
"series.modelspan",
#"series.type",
#"decimals",
"transform.function",
"transform.power",
"transform.adjust",
"regression.variables",
"regression.user",
"regression.file",
"regression.usertype",
"regression.centeruser",
"regression.start",
"regression.aictest",
#"outlier",
"outlier.types",
"outlier.critical",
"outlier.span",
"outlier.method",
"identify",
"identify.diff",
"identify.sdiff",
"identify.maxlag",
"arima.model",
"arima.smodel",
"arima.ar",
"arima.ma",
"automdl",
"automdl.acceptdefault",
"automdl.balanced",
"automdl.maxorder",
"automdl.maxdiff",
"forecast_years",
"backcast_years",
"forecast_conf",
"estimate",
"estimate.outofsample",
"check",
"check.maxlag",
"slidingspans",
"slidingspans.fixmdl",
"slidingspans.fixreg",
"slidingspans.length",
"slidingspans.numspans",
"slidingspans.outlier",
"slidingspans.additivesa",
"slidingspans.start",
"history",
"history.estimates",
"history.fixmdl",
"history.fixreg",
"history.outlier",
"history.sadjlags",
"history.trendlags",
"history.start",
"history.target",
"x11.sigmalim",
"x11.type",
"x11.sfshort",
"x11.samode",
"x11.seasonalma",
"x11.trendma",
"x11.appendfcst",
"x11.appendbcst",
"x11.calendarsigma",
"x11.excludefcst",
"x11.final",
"x11regression"
#"tblnames",
#"Rtblnames",
#"seats",
#"seatsparameter"
)
mn <- names(listP)%in%paras
if(any(!mn)){
warning("The following parameters could not be matched: ",paste(names(listP)[!mn],collapse=" , "))
}
mn <- names(listP)[mn]
for(nam in mn){
slot(object,nam) <- listP[[nam]]
}
return(object)
}
)
setMethod(
f='getP',
signature=signature(object = "x12Parameter"),
definition=function(object, whichP) {
paras <- c(
#"period",
"series.span",
"series.modelspan",
#"series.type",
#"decimals",
"transform.function",
"transform.power",
"transform.adjust",
"regression.variables",
"regression.user",
"regression.file",
"regression.usertype",
"regression.centeruser",
"regression.start",
"regression.aictest",
#"outlier",
"outlier.types",
"outlier.critical",
"outlier.span",
"outlier.method",
"identify",
"identify.diff",
"identify.sdiff",
"identify.maxlag",
"arima.model",
"arima.smodel",
"arima.ar",
"arima.ma",
"automdl",
"automdl.acceptdefault",
"automdl.balanced",
"automdl.maxorder",
"automdl.maxdiff",
"forecast_years",
"backcast_years",
"forecast_conf",
"estimate",
"estimate.outofsample",
"check",
"check.maxlag",
"slidingspans",
"slidingspans.fixmdl",
"slidingspans.fixreg",
"slidingspans.length",
"slidingspans.numspans",
"slidingspans.outlier",
"slidingspans.additivesa",
"slidingspans.start",
"history",
"history.estimates",
"history.fixmdl",
"history.fixreg",
"history.outlier",
"history.sadjlags",
"history.trendlags",
"history.start",
"history.target",
"x11.sigmalim",
"x11.type",
"x11.sfshort",
"x11.samode",
"x11.seasonalma",
"x11.trendma",
"x11.appendfcst",
"x11.appendbcst",
"x11.calendarsigma",
"x11.excludefcst",
"x11.final",
"x11regression"
#"tblnames",
#"Rtblnames",
#"seats",
#"seatsparameter"
)
mn <- whichP%in%paras
if(any(!mn)){
warning("The following parameters could not be matched: ",paste(whichP[!mn],collapse=" , "))
}
mn <- whichP[mn]
ret <- list()
for(nam in mn){
ret[[nam]] <- slot(object,nam)
}
return(ret)
}
)
#Methods for signature x12Single
setMethod(
f='getP',
signature=signature(object = "x12Single"),definition=function(object, whichP) {
getP(object@x12Parameter,whichP=whichP)
})
setMethod(
f='setP',
signature=signature(object = "x12Single"),definition=function(object, listP) {
object@x12Parameter <- setP(object@x12Parameter,listP=listP)
return(object)
})
#Methods for signature x12Batch
setMethod(
f='getP',
signature=signature(object = "x12Batch"),definition=function(object, whichP,index=NULL) {
ret <- list()
if(is.null(index)){##changing all
cat("The parameters for all objects are shown.\n")
for(i in 1:length(object@x12List)){
ret[[length(ret)+1]] <- getP(object@x12List[[i]],whichP=whichP)
}
}else{
if(is.integer(index)){
if(min(index)>0&max(index)<=length(object@x12List)){
for(i in index){
ret[[length(ret)+1]] <- getP(object@x12List[[i]],whichP=whichP)
}
}else
stop("argument index is out of bounds!\n")
}else if(is.character(index)){
namTS <- vector()
for(i in 1:length(object@x12List)){
namTS <- c(namTS,object@x12List[[i]]@tsName)
}
if(all(index%in%namTS)){
for(nam in index){
ind <- which(nam==namTS)
ret[[length(ret)+1]] <- getP(object@x12List[[ind]],whichP=whichP)
}
}else
stop("argument index contained names not found in the series names!\n")
}else
stop("argument index must be either integer or character!\n")
}
return(ret)
})
setMethod(
f='setP',
signature=signature(object = "x12Batch"),definition=function(object, listP,index=NULL) {
if(is.null(index)){##changing all
cat("The parameters for all objects are changed.\n")
for(i in 1:length(object@x12List)){
object@x12List[[i]] <- setP(object@x12List[[i]],listP=listP)
}
}else{
if(is.numeric(index)){
if(min(index)>0&max(index)<=length(object@x12List)){
for(i in index){
object@x12List[[i]] <- setP(object@x12List[[i]],listP=listP)
}
}else
stop("argument index is out of bounds!\n")
}else if(is.character(index)){
namTS <- vector()
for(i in 1:length(object@x12List)){
namTS <- c(namTS,object@x12List[[i]]@tsName)
}
if(all(index%in%namTS)){
for(nam in index){
ind <- which(nam==namTS)
object@x12List[[ind]] <- setP(object@x12List[[ind]],listP=listP)
}
}else
stop("argument index contained names not found in the series names!\n")
}else
stop("argument index must be either integer or character!\n")
}
return(object)
})
#Goto previous parameter setting and output
# Generic function prev, cleanArchive
setGeneric("prev",
function(object, ...) { standardGeneric("prev")} )
setMethod(
f='prev',
signature=signature(object = "x12Single"),definition=function(object,n=NULL) {
if(is.null(n))
ind <- length(object@x12OldParameter)
else if(n%in%c(1:length(object@x12OldParameter)))
ind <- n
else
stop("Please provide an index corresponding to a previous run. (see summary with oldOutput>0)")
object@x12Output <- object@x12OldOutput[[ind]]
object@x12Parameter <- object@x12OldParameter[[ind]]
oldout <- list()
oldpar <- list()
for(i in 1:length(object@x12OldParameter)){
if(i!=ind){
oldout[[length(oldout)+1]] <- object@x12OldOutput[[i]]
oldpar[[length(oldpar)+1]] <- object@x12OldParameter[[i]]
}
}
object@x12OldOutput <- oldout
object@x12OldParameter <- oldpar
return(object)
})
setMethod(
f='prev',
signature=signature(object = "x12Batch"),definition=function(object,index=NULL,n=NULL) {
if(is.null(index)){##changing all
cat("All current parameters and outputs are replaced by the previous ones.\n")
for(i in 1:length(object@x12List)){
object@x12List[[i]] <- prev(object@x12List[[i]],n=n)
}
}else{
if(is.numeric(index)){
if(min(index)>0&max(index)<=length(object@x12List)){
for(i in index){
object@x12List[[i]] <- prev(object@x12List[[i]],n=n)
}
}else
stop("argument index is out of bounds!\n")
}else if(is.character(index)){
namTS <- vector()
for(i in 1:length(object@x12List)){
namTS <- c(namTS,object@x12List[[i]]@tsName)
}
if(all(index%in%namTS)){
for(nam in index){
ind <- which(nam==namTS)
object@x12List[[ind]] <- prev(object@x12List[[ind]],n=n)
}
}else
stop("argument index contained names not found in the series names!\n")
}else
stop("argument index must be either integer or character!\n")
}
return(object)
})
setGeneric("cleanArchive",
function(object, ...) { standardGeneric("cleanArchive")} )
setGeneric("cleanHistory",
function(object, ...) {
.Deprecated("cleanArchive")
cleanArchive(object,...)
} )
setMethod(
f='cleanArchive',
signature=signature(object = "x12Single"),definition=function(object) {
object@x12OldParameter <- object@x12OldOutput <- list()
return(object)
})
setMethod(
f='cleanArchive',
signature=signature(object = "x12Batch"),definition=function(object,index=NULL) {
if(is.null(index)){##changing all
cat("All previous parameters and outputs are deleted.\n")
for(i in 1:length(object@x12List)){
object@x12List[[i]] <- cleanArchive(object@x12List[[i]])
}
}else{
if(is.numeric(index)){
if(min(index)>0&max(index)<=length(object@x12List)){
for(i in index){
object@x12List[[i]] <- cleanArchive(object@x12List[[i]])
}
}else
stop("argument index is out of bounds!\n")
}else if(is.character(index)){
namTS <- vector()
for(i in 1:length(object@x12List)){
namTS <- c(namTS,object@x12List[[i]]@tsName)
}
if(all(index%in%namTS)){
for(nam in index){
ind <- which(nam==namTS)
object@x12List[[ind]] <- cleanArchive(object@x12List[[ind]])
}
}else
stop("argument index contained names not found in the series names!\n")
}else
stop("argument index must be either integer or character!\n")
}
return(object)
})
####SAVE
setGeneric("saveP",
function(object, file="x12Parameter.RData") { standardGeneric("saveP")} )
setGeneric("loadP",
function(object, file) { standardGeneric("loadP")} )
setMethod(
f='saveP',
signature=signature(object = "x12Parameter"),
definition=function(object,file) {
save(object,file=file)
}
)
setMethod(
f='saveP',
signature=signature(object = "x12Single"),
definition=function(object,file) {
out=object@x12Parameter
save(out,file=file)
}
)
setMethod(
f='saveP',
signature=signature(object = "x12Batch"),
definition=function(object,file) {
x12ParList <- list()
for(i in 1:length(object@x12List)){
x12ParList[[object@x12List[[i]]@tsName]] <- object@x12List[[i]]@x12Parameter
}
save(x12ParList,file=file)
}
)
setMethod(
f='loadP',
signature=signature(object = "x12Parameter"),
definition=function(object,file) {
par <- get(load(file=file))
if("x12Parameter"!=class(par))
stop("no parameter settings found in the file!\n")
return(par)
}
)
setMethod(
f='loadP',
signature=signature(object = "x12Single"),
definition=function(object,file) {
par <- get(load(file=file))
if("x12Parameter"!=class(par))
stop("no parameter settings found in the file!\n")
object@x12Parameter <- par
return(object)
}
)
setMethod(
f='loadP',
signature=signature(object = "x12Batch"),
definition=function(object,file) {
parList <- get(load(file=file))
if(class(parList)=="x12Parameter"){
warning("All Parameters will be overwritten with one loaded parameter configuration")
for(i in 1:length(object@x12List)){
object@x12List[[i]]@x12Parameter <- parList
}
}else{
if(length(parList)!=length(object@x12List))
stop("loaded Parameter list does not fit to the x12Batch object \n")
for(i in 1:length(parList)){
if(class(parList[[i]])!="x12Parameter")
stop("The file does not contain a list of x12Parameter objects!")
object@x12List[[i]]@x12Parameter <- parList[[i]]
}
}
return(object)
}
) | /x12/R/parameter-methods.R | no_license | ingted/R-Examples | R | false | false | 14,053 | r | # Generic function setP, set Parameter
setGeneric("setP",
function(object, ...) { standardGeneric("setP")} )
# Generic function getP, get Parameter
setGeneric("getP",
function(object, ...) { standardGeneric("getP")} )
#Methods for signature x12Parameter
setMethod(
f='setP',
signature=signature(object = "x12Parameter"),
definition=function(object, listP) {
paras <- c(
#"period",
"series.span",
"series.modelspan",
#"series.type",
#"decimals",
"transform.function",
"transform.power",
"transform.adjust",
"regression.variables",
"regression.user",
"regression.file",
"regression.usertype",
"regression.centeruser",
"regression.start",
"regression.aictest",
#"outlier",
"outlier.types",
"outlier.critical",
"outlier.span",
"outlier.method",
"identify",
"identify.diff",
"identify.sdiff",
"identify.maxlag",
"arima.model",
"arima.smodel",
"arima.ar",
"arima.ma",
"automdl",
"automdl.acceptdefault",
"automdl.balanced",
"automdl.maxorder",
"automdl.maxdiff",
"forecast_years",
"backcast_years",
"forecast_conf",
"estimate",
"estimate.outofsample",
"check",
"check.maxlag",
"slidingspans",
"slidingspans.fixmdl",
"slidingspans.fixreg",
"slidingspans.length",
"slidingspans.numspans",
"slidingspans.outlier",
"slidingspans.additivesa",
"slidingspans.start",
"history",
"history.estimates",
"history.fixmdl",
"history.fixreg",
"history.outlier",
"history.sadjlags",
"history.trendlags",
"history.start",
"history.target",
"x11.sigmalim",
"x11.type",
"x11.sfshort",
"x11.samode",
"x11.seasonalma",
"x11.trendma",
"x11.appendfcst",
"x11.appendbcst",
"x11.calendarsigma",
"x11.excludefcst",
"x11.final",
"x11regression"
#"tblnames",
#"Rtblnames",
#"seats",
#"seatsparameter"
)
mn <- names(listP)%in%paras
if(any(!mn)){
warning("The following parameters could not be matched: ",paste(names(listP)[!mn],collapse=" , "))
}
mn <- names(listP)[mn]
for(nam in mn){
slot(object,nam) <- listP[[nam]]
}
return(object)
}
)
setMethod(
f='getP',
signature=signature(object = "x12Parameter"),
definition=function(object, whichP) {
paras <- c(
#"period",
"series.span",
"series.modelspan",
#"series.type",
#"decimals",
"transform.function",
"transform.power",
"transform.adjust",
"regression.variables",
"regression.user",
"regression.file",
"regression.usertype",
"regression.centeruser",
"regression.start",
"regression.aictest",
#"outlier",
"outlier.types",
"outlier.critical",
"outlier.span",
"outlier.method",
"identify",
"identify.diff",
"identify.sdiff",
"identify.maxlag",
"arima.model",
"arima.smodel",
"arima.ar",
"arima.ma",
"automdl",
"automdl.acceptdefault",
"automdl.balanced",
"automdl.maxorder",
"automdl.maxdiff",
"forecast_years",
"backcast_years",
"forecast_conf",
"estimate",
"estimate.outofsample",
"check",
"check.maxlag",
"slidingspans",
"slidingspans.fixmdl",
"slidingspans.fixreg",
"slidingspans.length",
"slidingspans.numspans",
"slidingspans.outlier",
"slidingspans.additivesa",
"slidingspans.start",
"history",
"history.estimates",
"history.fixmdl",
"history.fixreg",
"history.outlier",
"history.sadjlags",
"history.trendlags",
"history.start",
"history.target",
"x11.sigmalim",
"x11.type",
"x11.sfshort",
"x11.samode",
"x11.seasonalma",
"x11.trendma",
"x11.appendfcst",
"x11.appendbcst",
"x11.calendarsigma",
"x11.excludefcst",
"x11.final",
"x11regression"
#"tblnames",
#"Rtblnames",
#"seats",
#"seatsparameter"
)
mn <- whichP%in%paras
if(any(!mn)){
warning("The following parameters could not be matched: ",paste(whichP[!mn],collapse=" , "))
}
mn <- whichP[mn]
ret <- list()
for(nam in mn){
ret[[nam]] <- slot(object,nam)
}
return(ret)
}
)
#Methods for signature x12Single
setMethod(
f='getP',
signature=signature(object = "x12Single"),definition=function(object, whichP) {
getP(object@x12Parameter,whichP=whichP)
})
setMethod(
f='setP',
signature=signature(object = "x12Single"),definition=function(object, listP) {
object@x12Parameter <- setP(object@x12Parameter,listP=listP)
return(object)
})
#Methods for signature x12Batch
setMethod(
f='getP',
signature=signature(object = "x12Batch"),definition=function(object, whichP,index=NULL) {
ret <- list()
if(is.null(index)){##changing all
cat("The parameters for all objects are shown.\n")
for(i in 1:length(object@x12List)){
ret[[length(ret)+1]] <- getP(object@x12List[[i]],whichP=whichP)
}
}else{
if(is.integer(index)){
if(min(index)>0&max(index)<=length(object@x12List)){
for(i in index){
ret[[length(ret)+1]] <- getP(object@x12List[[i]],whichP=whichP)
}
}else
stop("argument index is out of bounds!\n")
}else if(is.character(index)){
namTS <- vector()
for(i in 1:length(object@x12List)){
namTS <- c(namTS,object@x12List[[i]]@tsName)
}
if(all(index%in%namTS)){
for(nam in index){
ind <- which(nam==namTS)
ret[[length(ret)+1]] <- getP(object@x12List[[ind]],whichP=whichP)
}
}else
stop("argument index contained names not found in the series names!\n")
}else
stop("argument index must be either integer or character!\n")
}
return(ret)
})
setMethod(
f='setP',
signature=signature(object = "x12Batch"),definition=function(object, listP,index=NULL) {
if(is.null(index)){##changing all
cat("The parameters for all objects are changed.\n")
for(i in 1:length(object@x12List)){
object@x12List[[i]] <- setP(object@x12List[[i]],listP=listP)
}
}else{
if(is.numeric(index)){
if(min(index)>0&max(index)<=length(object@x12List)){
for(i in index){
object@x12List[[i]] <- setP(object@x12List[[i]],listP=listP)
}
}else
stop("argument index is out of bounds!\n")
}else if(is.character(index)){
namTS <- vector()
for(i in 1:length(object@x12List)){
namTS <- c(namTS,object@x12List[[i]]@tsName)
}
if(all(index%in%namTS)){
for(nam in index){
ind <- which(nam==namTS)
object@x12List[[ind]] <- setP(object@x12List[[ind]],listP=listP)
}
}else
stop("argument index contained names not found in the series names!\n")
}else
stop("argument index must be either integer or character!\n")
}
return(object)
})
#Goto previous parameter setting and output
# Generic function prev, cleanArchive
setGeneric("prev",
function(object, ...) { standardGeneric("prev")} )
setMethod(
f='prev',
signature=signature(object = "x12Single"),definition=function(object,n=NULL) {
if(is.null(n))
ind <- length(object@x12OldParameter)
else if(n%in%c(1:length(object@x12OldParameter)))
ind <- n
else
stop("Please provide an index corresponding to a previous run. (see summary with oldOutput>0)")
object@x12Output <- object@x12OldOutput[[ind]]
object@x12Parameter <- object@x12OldParameter[[ind]]
oldout <- list()
oldpar <- list()
for(i in 1:length(object@x12OldParameter)){
if(i!=ind){
oldout[[length(oldout)+1]] <- object@x12OldOutput[[i]]
oldpar[[length(oldpar)+1]] <- object@x12OldParameter[[i]]
}
}
object@x12OldOutput <- oldout
object@x12OldParameter <- oldpar
return(object)
})
setMethod(
f='prev',
signature=signature(object = "x12Batch"),definition=function(object,index=NULL,n=NULL) {
if(is.null(index)){##changing all
cat("All current parameters and outputs are replaced by the previous ones.\n")
for(i in 1:length(object@x12List)){
object@x12List[[i]] <- prev(object@x12List[[i]],n=n)
}
}else{
if(is.numeric(index)){
if(min(index)>0&max(index)<=length(object@x12List)){
for(i in index){
object@x12List[[i]] <- prev(object@x12List[[i]],n=n)
}
}else
stop("argument index is out of bounds!\n")
}else if(is.character(index)){
namTS <- vector()
for(i in 1:length(object@x12List)){
namTS <- c(namTS,object@x12List[[i]]@tsName)
}
if(all(index%in%namTS)){
for(nam in index){
ind <- which(nam==namTS)
object@x12List[[ind]] <- prev(object@x12List[[ind]],n=n)
}
}else
stop("argument index contained names not found in the series names!\n")
}else
stop("argument index must be either integer or character!\n")
}
return(object)
})
setGeneric("cleanArchive",
function(object, ...) { standardGeneric("cleanArchive")} )
setGeneric("cleanHistory",
function(object, ...) {
.Deprecated("cleanArchive")
cleanArchive(object,...)
} )
setMethod(
f='cleanArchive',
signature=signature(object = "x12Single"),definition=function(object) {
object@x12OldParameter <- object@x12OldOutput <- list()
return(object)
})
setMethod(
f='cleanArchive',
signature=signature(object = "x12Batch"),definition=function(object,index=NULL) {
if(is.null(index)){##changing all
cat("All previous parameters and outputs are deleted.\n")
for(i in 1:length(object@x12List)){
object@x12List[[i]] <- cleanArchive(object@x12List[[i]])
}
}else{
if(is.numeric(index)){
if(min(index)>0&max(index)<=length(object@x12List)){
for(i in index){
object@x12List[[i]] <- cleanArchive(object@x12List[[i]])
}
}else
stop("argument index is out of bounds!\n")
}else if(is.character(index)){
namTS <- vector()
for(i in 1:length(object@x12List)){
namTS <- c(namTS,object@x12List[[i]]@tsName)
}
if(all(index%in%namTS)){
for(nam in index){
ind <- which(nam==namTS)
object@x12List[[ind]] <- cleanArchive(object@x12List[[ind]])
}
}else
stop("argument index contained names not found in the series names!\n")
}else
stop("argument index must be either integer or character!\n")
}
return(object)
})
####SAVE
setGeneric("saveP",
function(object, file="x12Parameter.RData") { standardGeneric("saveP")} )
setGeneric("loadP",
function(object, file) { standardGeneric("loadP")} )
setMethod(
f='saveP',
signature=signature(object = "x12Parameter"),
definition=function(object,file) {
save(object,file=file)
}
)
setMethod(
f='saveP',
signature=signature(object = "x12Single"),
definition=function(object,file) {
out=object@x12Parameter
save(out,file=file)
}
)
setMethod(
f='saveP',
signature=signature(object = "x12Batch"),
definition=function(object,file) {
x12ParList <- list()
for(i in 1:length(object@x12List)){
x12ParList[[object@x12List[[i]]@tsName]] <- object@x12List[[i]]@x12Parameter
}
save(x12ParList,file=file)
}
)
setMethod(
f='loadP',
signature=signature(object = "x12Parameter"),
definition=function(object,file) {
par <- get(load(file=file))
if("x12Parameter"!=class(par))
stop("no parameter settings found in the file!\n")
return(par)
}
)
setMethod(
f='loadP',
signature=signature(object = "x12Single"),
definition=function(object,file) {
par <- get(load(file=file))
if("x12Parameter"!=class(par))
stop("no parameter settings found in the file!\n")
object@x12Parameter <- par
return(object)
}
)
setMethod(
f='loadP',
signature=signature(object = "x12Batch"),
definition=function(object,file) {
parList <- get(load(file=file))
if(class(parList)=="x12Parameter"){
warning("All Parameters will be overwritten with one loaded parameter configuration")
for(i in 1:length(object@x12List)){
object@x12List[[i]]@x12Parameter <- parList
}
}else{
if(length(parList)!=length(object@x12List))
stop("loaded Parameter list does not fit to the x12Batch object \n")
for(i in 1:length(parList)){
if(class(parList[[i]])!="x12Parameter")
stop("The file does not contain a list of x12Parameter objects!")
object@x12List[[i]]@x12Parameter <- parList[[i]]
}
}
return(object)
}
) |
rm(list=ls())
#Si es necesario
#install.packages("googleVis")
library(googleVis)
#getWorldBankData <- function(id='SP.POP.TOTL', date='1960:2010',
getWorldBankData <- function(id='SP.POP.TOTL', date='1960:2015',
value="value", per.page=15000){ #per.page=14000
require(RJSONIO)
url <- paste("http://api.worldbank.org/countries/all/indicators/", id,
"?date=", date, "&format=json&per_page=", per.page,
sep="")
wbData <- fromJSON(url)[[2]]
wbData = data.frame(
year = as.numeric(sapply(wbData, "[[", "date")),
value = as.numeric(sapply(wbData, function(x)
ifelse(is.null(x[["value"]]),NA, x[["value"]]))),
country.name = sapply(wbData, function(x) x[["country"]]['value']),
country.id = sapply(wbData, function(x) x[["country"]]['id'])
)
names(wbData)[2] <- value
return(wbData)
}
## OK - that above is the function that calls DATA (JSON format) from the
## world bank APIs that have been exposed to make info available -
## when called, you'll see the variables populate
getWorldBankCountries <- function(){
require(RJSONIO)
wbCountries <-
fromJSON("http://api.worldbank.org/countries?per_page=15000&format=json") # per_page=14000&format=json
wbCountries <- data.frame(t(sapply(wbCountries[[2]], unlist)))
wbCountries$longitude <- as.numeric(wbCountries$longitude)
wbCountries$latitude <- as.numeric(wbCountries$latitude)
levels(wbCountries$region.value) <- gsub(" \\(all income levels\\)",
"", levels(wbCountries$region.value))
return(wbCountries)
}
### Obtener los paises, su clasificacion y su localizacion.
## Create a string 1960:this year, e.g. 1960:2011
#years <- paste("1960:", format(Sys.Date(), "%Y"), sep="")
#Ano hasta el que se desea analizar
years <- paste("1960:", "2015", sep="")
## this just makes a string that says "1960:2014" - that's it
years
## Fertility rate, Indice de fertilidad
fertility.rate <- getWorldBankData(id='SP.DYN.TFRT.IN',
date=years, value="Tasa de fertilidad")
## calls the function iwth instructions to pull fertility data
## Life Expectancy
life.exp <- getWorldBankData(id='SP.DYN.LE00.IN', date=years,
value="Esperanza de vida")
##calls function to get hte life expectancy (same function, different id to API)
## Population
population <- getWorldBankData(id='SP.POP.TOTL', date=years,
value="Poblacion")
### and population - again, same funciton, different ID, different query, different data returns (12k-15k obs)
## GDP per capita (current US$)
GDP.per.capita <- getWorldBankData(id='NY.GDP.PCAP.CD',
date=years,
value="PIB per capita en USD")
## and one more trip to the API to get the GDP data
## Merge data sets
wbData <- merge(life.exp, fertility.rate)
wbData <- merge(wbData, population)
wbData <- merge(wbData, GDP.per.capita)
## RA > I'm not sure if merge requres the left hand column to be same across all sets
## like a KEY - and if needs to be in same order, but suggest checking/testing/researching
## if you are hacking your own data in here
head(wbData)
dim(wbData)
## Get country mappings
wbCountries <- getWorldBankCountries()
head(wbCountries)
dim(wbCountries)
## returns a BEAUTIFUL key:
## header id iso2Code name region.id region.value adminregion.id adminregion.value incomeLevel.id incomeLevel.value lendingType.id lendingType.value capitalCity longitude latitude
## 1st row 1 ABW AW Aruba LCN Latin America & Caribbean NOC High income: nonOECD LNX Not classified Oranjestad 46 57
## Add regional information
wbData <- merge(wbData, wbCountries[c("iso2Code", "region.value", #region.value
"incomeLevel.value")], #incomeLevel.value
by.x="country.id", by.y="iso2Code")
## here is magic of merge
head(wbData)
dim(wbData)
## Filter out the aggregates and country id column
subData <- subset(wbData, !region.value %in% "Aggregates" , select=-country.id)
## SUBDATA is only 9.9k long, rather than 12k (filtered Aggregates) // 12k may not be enougn anymore
## Create a motion chart!!!!!!!!!!!!!! (make sure you save first!)
M <- gvisMotionChart(subData, idvar="country.name", timevar="year",options=list(width=700, height=600))
## using SubData that looks like this:
# id iso2Code name region.id region.value adminregion.id adminregion.value incomeLevel.id incomeLevel.value lendingType.id lendingType.value capitalCity longitude latitude
# 1 ABW AW Aruba LCN Latin America & Caribbean NOC High income: nonOECD LNX Not classified Oranjestad 46 57
# 2 AFG AF Afghanistan SAS South Asia SAS South Asia LIC Low income IDX IDA Kabul 193 120
# 3 AFR A9 Africa NA Aggregates NA Aggregates Aggregates 1 1
## Wont "do" anything except prepare the data in "M" for the plot
## Display the chart in your browser
plot(M)
## awesome!
## SOURCE: http://lamages.blogspot.co.uk/2011/09/accessing-and-plotting-world-bank-data.html
## ORIGiNAL SOURCE: Posted by Markus Gesmann
## comments and a few tweaks by Ryan Anderson www.dreamtolearn.com | /Google_Chart.R | no_license | LuisRodriguezIE/CompendioForecasting | R | false | false | 5,480 | r | rm(list=ls())
#Si es necesario
#install.packages("googleVis")
library(googleVis)
#getWorldBankData <- function(id='SP.POP.TOTL', date='1960:2010',
getWorldBankData <- function(id='SP.POP.TOTL', date='1960:2015',
value="value", per.page=15000){ #per.page=14000
require(RJSONIO)
url <- paste("http://api.worldbank.org/countries/all/indicators/", id,
"?date=", date, "&format=json&per_page=", per.page,
sep="")
wbData <- fromJSON(url)[[2]]
wbData = data.frame(
year = as.numeric(sapply(wbData, "[[", "date")),
value = as.numeric(sapply(wbData, function(x)
ifelse(is.null(x[["value"]]),NA, x[["value"]]))),
country.name = sapply(wbData, function(x) x[["country"]]['value']),
country.id = sapply(wbData, function(x) x[["country"]]['id'])
)
names(wbData)[2] <- value
return(wbData)
}
## OK - that above is the function that calls DATA (JSON format) from the
## world bank APIs that have been exposed to make info available -
## when called, you'll see the variables populate
getWorldBankCountries <- function(){
require(RJSONIO)
wbCountries <-
fromJSON("http://api.worldbank.org/countries?per_page=15000&format=json") # per_page=14000&format=json
wbCountries <- data.frame(t(sapply(wbCountries[[2]], unlist)))
wbCountries$longitude <- as.numeric(wbCountries$longitude)
wbCountries$latitude <- as.numeric(wbCountries$latitude)
levels(wbCountries$region.value) <- gsub(" \\(all income levels\\)",
"", levels(wbCountries$region.value))
return(wbCountries)
}
### Obtener los paises, su clasificacion y su localizacion.
## Create a string 1960:this year, e.g. 1960:2011
#years <- paste("1960:", format(Sys.Date(), "%Y"), sep="")
#Ano hasta el que se desea analizar
years <- paste("1960:", "2015", sep="")
## this just makes a string that says "1960:2014" - that's it
years
## Fertility rate, Indice de fertilidad
fertility.rate <- getWorldBankData(id='SP.DYN.TFRT.IN',
date=years, value="Tasa de fertilidad")
## calls the function iwth instructions to pull fertility data
## Life Expectancy
life.exp <- getWorldBankData(id='SP.DYN.LE00.IN', date=years,
value="Esperanza de vida")
##calls function to get hte life expectancy (same function, different id to API)
## Population
population <- getWorldBankData(id='SP.POP.TOTL', date=years,
value="Poblacion")
### and population - again, same funciton, different ID, different query, different data returns (12k-15k obs)
## GDP per capita (current US$)
GDP.per.capita <- getWorldBankData(id='NY.GDP.PCAP.CD',
date=years,
value="PIB per capita en USD")
## and one more trip to the API to get the GDP data
## Merge data sets
wbData <- merge(life.exp, fertility.rate)
wbData <- merge(wbData, population)
wbData <- merge(wbData, GDP.per.capita)
## RA > I'm not sure if merge requres the left hand column to be same across all sets
## like a KEY - and if needs to be in same order, but suggest checking/testing/researching
## if you are hacking your own data in here
head(wbData)
dim(wbData)
## Get country mappings
wbCountries <- getWorldBankCountries()
head(wbCountries)
dim(wbCountries)
## returns a BEAUTIFUL key:
## header id iso2Code name region.id region.value adminregion.id adminregion.value incomeLevel.id incomeLevel.value lendingType.id lendingType.value capitalCity longitude latitude
## 1st row 1 ABW AW Aruba LCN Latin America & Caribbean NOC High income: nonOECD LNX Not classified Oranjestad 46 57
## Add regional information
wbData <- merge(wbData, wbCountries[c("iso2Code", "region.value", #region.value
"incomeLevel.value")], #incomeLevel.value
by.x="country.id", by.y="iso2Code")
## here is magic of merge
head(wbData)
dim(wbData)
## Filter out the aggregates and country id column
subData <- subset(wbData, !region.value %in% "Aggregates" , select=-country.id)
## SUBDATA is only 9.9k long, rather than 12k (filtered Aggregates) // 12k may not be enougn anymore
## Create a motion chart!!!!!!!!!!!!!! (make sure you save first!)
M <- gvisMotionChart(subData, idvar="country.name", timevar="year",options=list(width=700, height=600))
## using SubData that looks like this:
# id iso2Code name region.id region.value adminregion.id adminregion.value incomeLevel.id incomeLevel.value lendingType.id lendingType.value capitalCity longitude latitude
# 1 ABW AW Aruba LCN Latin America & Caribbean NOC High income: nonOECD LNX Not classified Oranjestad 46 57
# 2 AFG AF Afghanistan SAS South Asia SAS South Asia LIC Low income IDX IDA Kabul 193 120
# 3 AFR A9 Africa NA Aggregates NA Aggregates Aggregates 1 1
## Wont "do" anything except prepare the data in "M" for the plot
## Display the chart in your browser
plot(M)
## awesome!
## SOURCE: http://lamages.blogspot.co.uk/2011/09/accessing-and-plotting-world-bank-data.html
## ORIGiNAL SOURCE: Posted by Markus Gesmann
## comments and a few tweaks by Ryan Anderson www.dreamtolearn.com |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.r
\name{read_burp}
\alias{read_burp}
\title{Read in a Burp proxy XML export file}
\usage{
read_burp(burp_file, convert_response = TRUE, convert_request = TRUE)
}
\arguments{
\item{burp_file}{path to a Burp proxy XML export file}
\item{convert_response}{if \code{TRUE}, turn the \code{response} record into
and \code{httr} \code{response} object. If the \code{response} record cannot
be turned into an \code{httr} \code{response} object a warning will
be issued and the raw \code{response} record will be returned.}
\item{convert_request}{if \code{TRUE}, turn the \code{request} record into
and \code{httr} \code{request} object. If the \code{request} record cannot
be turned into an \code{httr} \code{request} object a warning will
be issued and the raw \code{request} record will be returned.}
}
\value{
a \code{tibble}
}
\description{
For now, this function expects the \code{request} and \code{response}
elements to be base64 encoded.
}
\details{
Eventually there will likely be an \code{as_har()} function to turn the
entire structure into a \code{HARtools} object.
}
\examples{
library(dplyr)
system.file("extdata", "hottest_year.xml", package="burrp") \%>\%
read_burp() \%>\%
glimpse()
}
| /man/read_burp.Rd | permissive | mpadge/burrp | R | false | true | 1,286 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.r
\name{read_burp}
\alias{read_burp}
\title{Read in a Burp proxy XML export file}
\usage{
read_burp(burp_file, convert_response = TRUE, convert_request = TRUE)
}
\arguments{
\item{burp_file}{path to a Burp proxy XML export file}
\item{convert_response}{if \code{TRUE}, turn the \code{response} record into
and \code{httr} \code{response} object. If the \code{response} record cannot
be turned into an \code{httr} \code{response} object a warning will
be issued and the raw \code{response} record will be returned.}
\item{convert_request}{if \code{TRUE}, turn the \code{request} record into
and \code{httr} \code{request} object. If the \code{request} record cannot
be turned into an \code{httr} \code{request} object a warning will
be issued and the raw \code{request} record will be returned.}
}
\value{
a \code{tibble}
}
\description{
For now, this function expects the \code{request} and \code{response}
elements to be base64 encoded.
}
\details{
Eventually there will likely be an \code{as_har()} function to turn the
entire structure into a \code{HARtools} object.
}
\examples{
library(dplyr)
system.file("extdata", "hottest_year.xml", package="burrp") \%>\%
read_burp() \%>\%
glimpse()
}
|
################ ZIP sin 7s desde el inicio aaaaaaaahhh ####################
library(tidyverse)
library(corrplot)
library(polycor)
library(glm2)
library(pscl)
library(boot)
library(VGAM)
# Base sin 7s
CData_CDMX2_sin7 <- CData_CDMX2 %>% filter(Vic_Rob_As < 7)
################ Guardamos num de obs en n
N <- length(CData_CDMX2_sin7$Vic_Rob_As) ; N # 5417
################ Modelo Nulo
mod_null <- zeroinfl(Vic_Rob_As ~ 1, data= CData_CDMX2_sin7,
dist="poisson")
summary(mod_null)
#################### Modelo todas covariables ############################
mod_full <- zeroinfl(Vic_Rob_As ~ ., data= CData_CDMX2_sin7,
dist="poisson",link="logit")
summary(mod_full)
# loglik of zero-inflated model -3225.546
# BIC of zero-inflated model 6691.816
# AIC of zero-inflated model 6507.091
{
cat("loglik of zero-inflated model", logLik(mod_full), "\n")
cat("BIC of zero-inflated model", BIC(mod_full), "\n")
cat("AIC of zero-inflated model", AIC(mod_full))
}
################ Modelo sin Imp_Seg: ###############################
#### Edad + Seg_Mun + Mas_Pat_Vil + Region + Nivel_Edu + Sit_Lab
mod2 <- zeroinfl(Vic_Rob_As ~ Edad + Seg_Mun + Mas_Pat_Vil
+ Region + Nivel_Edu + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod2)
# loglik of zero-inflated model -3228.321
# BIC of zero-inflated model 6680.171
# AIC of zero-inflated model 6508.641
{
cat("loglik of zero-inflated model", logLik(mod2), "\n")
cat("BIC of zero-inflated model", BIC(mod2), "\n")
cat("AIC of zero-inflated model", AIC(mod2))
}
################ Modelo distinto 1 ###############################
#### Poisson: Seg_Mun + Region + Nivel_Edu + Sit_Lab
#### Bern: Edad + Mas_Pat_Vil + Region + Sit_Lab
mod_dist_1 <- zeroinfl(Vic_Rob_As ~ Seg_Mun + Region + Nivel_Edu + Sit_Lab |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_dist_1)
# loglik of zero-inflated model -3229.856
# BIC of zero-inflated model 6623.061
# AIC of zero-inflated model 6497.712 <---------------------- MEJOR?
{
cat("loglik of zero-inflated model", logLik(mod_dist_1), "\n")
cat("BIC of zero-inflated model", BIC(mod_dist_1), "\n")
cat("AIC of zero-inflated model", AIC(mod_dist_1))
}
############ Pruebas simulaciones (media y mat de confusion)
sim_conf_mat_zeroinfl2(mod_dist_1,
res = CData_CDMX2_sin7$Vic_Rob_As,
muest.size = N) # 0.6827377
################ Modelo distinto 2 ###############################
#### Poisson: Seg_Mun + Region + Nivel_Edu
#### Bern: Edad + Mas_Pat_Vil + Region + Sit_Lab
mod_dist_2 <- zeroinfl(Vic_Rob_As ~ Seg_Mun + Region + Nivel_Edu |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_dist_2)
# loglik of zero-inflated model -3231.696
# BIC of zero-inflated model 6609.546
# AIC of zero-inflated model 6497.392 <---------------------- MEJOR?
{
cat("loglik of zero-inflated model", logLik(mod_dist_2), "\n")
cat("BIC of zero-inflated model", BIC(mod_dist_2), "\n")
cat("AIC of zero-inflated model", AIC(mod_dist_2))
}
############ Pruebas simulaciones (media y mat de confusion)
sim_conf_mat_zeroinfl2(mod_dist_2,
res = CData_CDMX2_sin7$Vic_Rob_As,
muest.size = N) # 0.6833856
################ Modelo distinto 3 ###############################
#### Poisson: Seg_Mun + Region
#### Bern: Edad + Mas_Pat_Vil + Region + Sit_Lab
mod_dist_3 <- zeroinfl(Vic_Rob_As ~ Seg_Mun + Region |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_dist_3)
# loglik of zero-inflated model -3237.105
# BIC of zero-inflated model 6585.975 <---------------------- MEJOR
# AIC of zero-inflated model 6500.21 <----------------------- EL 2do MEJOR
{
cat("loglik of zero-inflated model", logLik(mod_dist_3), "\n")
cat("BIC of zero-inflated model", BIC(mod_dist_3), "\n")
cat("AIC of zero-inflated model", AIC(mod_dist_3))
}
############ Pruebas simulaciones (media y mat de confusion)
sim_conf_mat_zeroinfl2(mod_dist_3,
res = CData_CDMX2_sin7$Vic_Rob_As,
muest.size = N) # 0.6833856
################ Modelo distinto 4 ###############################
#### Poisson: Seg_Mun
#### Bern: Edad + Mas_Pat_Vil + Region + Sit_Lab
mod_dist_4 <- zeroinfl(Vic_Rob_As ~ Seg_Mun |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_dist_4)
# loglik of zero-inflated model -3278.8
# BIC of zero-inflated model 6643.572
# AIC of zero-inflated model 6577.599
{
cat("loglik of zero-inflated model", logLik(mod_dist_4), "\n")
cat("BIC of zero-inflated model", BIC(mod_dist_4), "\n")
cat("AIC of zero-inflated model", AIC(mod_dist_4))
}
| /ZIP/ZIP_sin7_desde_0.R | no_license | Luis-2199/BayesProject | R | false | false | 5,169 | r | ################ ZIP sin 7s desde el inicio aaaaaaaahhh ####################
library(tidyverse)
library(corrplot)
library(polycor)
library(glm2)
library(pscl)
library(boot)
library(VGAM)
# Base sin 7s
CData_CDMX2_sin7 <- CData_CDMX2 %>% filter(Vic_Rob_As < 7)
################ Guardamos num de obs en n
N <- length(CData_CDMX2_sin7$Vic_Rob_As) ; N # 5417
################ Modelo Nulo
mod_null <- zeroinfl(Vic_Rob_As ~ 1, data= CData_CDMX2_sin7,
dist="poisson")
summary(mod_null)
#################### Modelo todas covariables ############################
mod_full <- zeroinfl(Vic_Rob_As ~ ., data= CData_CDMX2_sin7,
dist="poisson",link="logit")
summary(mod_full)
# loglik of zero-inflated model -3225.546
# BIC of zero-inflated model 6691.816
# AIC of zero-inflated model 6507.091
{
cat("loglik of zero-inflated model", logLik(mod_full), "\n")
cat("BIC of zero-inflated model", BIC(mod_full), "\n")
cat("AIC of zero-inflated model", AIC(mod_full))
}
################ Modelo sin Imp_Seg: ###############################
#### Edad + Seg_Mun + Mas_Pat_Vil + Region + Nivel_Edu + Sit_Lab
mod2 <- zeroinfl(Vic_Rob_As ~ Edad + Seg_Mun + Mas_Pat_Vil
+ Region + Nivel_Edu + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod2)
# loglik of zero-inflated model -3228.321
# BIC of zero-inflated model 6680.171
# AIC of zero-inflated model 6508.641
{
cat("loglik of zero-inflated model", logLik(mod2), "\n")
cat("BIC of zero-inflated model", BIC(mod2), "\n")
cat("AIC of zero-inflated model", AIC(mod2))
}
################ Modelo distinto 1 ###############################
#### Poisson: Seg_Mun + Region + Nivel_Edu + Sit_Lab
#### Bern: Edad + Mas_Pat_Vil + Region + Sit_Lab
mod_dist_1 <- zeroinfl(Vic_Rob_As ~ Seg_Mun + Region + Nivel_Edu + Sit_Lab |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_dist_1)
# loglik of zero-inflated model -3229.856
# BIC of zero-inflated model 6623.061
# AIC of zero-inflated model 6497.712 <---------------------- MEJOR?
{
cat("loglik of zero-inflated model", logLik(mod_dist_1), "\n")
cat("BIC of zero-inflated model", BIC(mod_dist_1), "\n")
cat("AIC of zero-inflated model", AIC(mod_dist_1))
}
############ Pruebas simulaciones (media y mat de confusion)
sim_conf_mat_zeroinfl2(mod_dist_1,
res = CData_CDMX2_sin7$Vic_Rob_As,
muest.size = N) # 0.6827377
################ Modelo distinto 2 ###############################
#### Poisson: Seg_Mun + Region + Nivel_Edu
#### Bern: Edad + Mas_Pat_Vil + Region + Sit_Lab
mod_dist_2 <- zeroinfl(Vic_Rob_As ~ Seg_Mun + Region + Nivel_Edu |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_dist_2)
# loglik of zero-inflated model -3231.696
# BIC of zero-inflated model 6609.546
# AIC of zero-inflated model 6497.392 <---------------------- MEJOR?
{
cat("loglik of zero-inflated model", logLik(mod_dist_2), "\n")
cat("BIC of zero-inflated model", BIC(mod_dist_2), "\n")
cat("AIC of zero-inflated model", AIC(mod_dist_2))
}
############ Pruebas simulaciones (media y mat de confusion)
sim_conf_mat_zeroinfl2(mod_dist_2,
res = CData_CDMX2_sin7$Vic_Rob_As,
muest.size = N) # 0.6833856
################ Modelo distinto 3 ###############################
#### Poisson: Seg_Mun + Region
#### Bern: Edad + Mas_Pat_Vil + Region + Sit_Lab
mod_dist_3 <- zeroinfl(Vic_Rob_As ~ Seg_Mun + Region |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_dist_3)
# loglik of zero-inflated model -3237.105
# BIC of zero-inflated model 6585.975 <---------------------- MEJOR
# AIC of zero-inflated model 6500.21 <----------------------- EL 2do MEJOR
{
cat("loglik of zero-inflated model", logLik(mod_dist_3), "\n")
cat("BIC of zero-inflated model", BIC(mod_dist_3), "\n")
cat("AIC of zero-inflated model", AIC(mod_dist_3))
}
############ Pruebas simulaciones (media y mat de confusion)
sim_conf_mat_zeroinfl2(mod_dist_3,
res = CData_CDMX2_sin7$Vic_Rob_As,
muest.size = N) # 0.6833856
################ Modelo distinto 4 ###############################
#### Poisson: Seg_Mun
#### Bern: Edad + Mas_Pat_Vil + Region + Sit_Lab
mod_dist_4 <- zeroinfl(Vic_Rob_As ~ Seg_Mun |
Edad + Mas_Pat_Vil + Region + Sit_Lab,
data= CData_CDMX2_sin7, dist="poisson",link="logit")
summary(mod_dist_4)
# loglik of zero-inflated model -3278.8
# BIC of zero-inflated model 6643.572
# AIC of zero-inflated model 6577.599
{
cat("loglik of zero-inflated model", logLik(mod_dist_4), "\n")
cat("BIC of zero-inflated model", BIC(mod_dist_4), "\n")
cat("AIC of zero-inflated model", AIC(mod_dist_4))
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix<-function(x=matrix()){ ##this function makes a cache for input
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setInverseMatrix<-function(inv) i<<-inv
getInverseMatrix<-function() i
list(set=set,get=get,setInverseMatrix=setInverseMatrix,getInverseMatrix=getInverseMatrix)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) { ##this function checks for the cache if present it will retrive value from the catche
## Return a matrix that is the inverse of 'x'
i<-x$getInverseMatrix()
if(!is.null(i)){
message("getting cached data")
return(i)
}
matix<-x$get()
i<-solve(matix)
x$setInverseMatrix(i)
i
}
| /cachematrix.R | no_license | vadivelkarthick1989/ProgrammingAssignment2 | R | false | false | 850 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix<-function(x=matrix()){ ##this function makes a cache for input
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setInverseMatrix<-function(inv) i<<-inv
getInverseMatrix<-function() i
list(set=set,get=get,setInverseMatrix=setInverseMatrix,getInverseMatrix=getInverseMatrix)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) { ##this function checks for the cache if present it will retrive value from the catche
## Return a matrix that is the inverse of 'x'
i<-x$getInverseMatrix()
if(!is.null(i)){
message("getting cached data")
return(i)
}
matix<-x$get()
i<-solve(matix)
x$setInverseMatrix(i)
i
}
|
############################################################################
############################################################################
#From,
# Gokul Kaisaravalli Bhojraj
# Id: 80789
# Business Intelligence
############################################################################
############################################################################
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# 1)###################################################
library(ff)
library(ffbase)
# 1)a
system.time(ffx08<-
read.csv.ffdf(file="/Users/Gokul/Desktop/MS/Statistical R/leture/2008.csv.bz2", header=TRUE,
na.string=c("",NA),colClasses=
c(Month="factor",DayOfWeek="factor", Year="factor")))
system.time(ffx07<-
read.csv.ffdf(file="/Users/Gokul/Desktop/MS/Statistical R/leture/2008.csv.bz2", header=TRUE,
na.string=c("",NA),colClasses=
c(Month="factor",DayOfWeek="factor", Year="factor")))
# 1)b
save.ffdf(ffx07, dir = "/Users/Gokul/Desktop/MS/2007.csv.bz2",
clone = FALSE, relativepath = TRUE,overwrite = TRUE)
save.ffdf(ffx08, dir = "/Users/Gokul/Desktop/MS/2008.csv.bz2",
clone = FALSE, relativepath = TRUE,overwrite = TRUE)
load.ffdf(dir="/Users/Gokul/Desktop/MS/2008.csv.bz2")
load.ffdf(dir="/Users/Gokul/Desktop/MS/2007.csv.bz2")
summary(ffx07)
summary(ffx08)
# 1)c
sub_ffx08<-subset(ffx08,Month==12)
sub_ffx08$Month<-droplevels(sub_ffx08$Month)
summary(ffx08)
# 2 ################################################################
# 2)a
# 2)a 1
source("C:/Users/Gokul/Downloads/Chunk_lm.R")
# 2008
load.ffdf(dir="/Users/Gokul/Desktop/MS/2008.csv.bz2")
form<-ArrDelay~Origin+Distance
system.time(reg1<-Chunk_lm(form,data=ffx08,chunkSize=75000,sandwich=FALSE,beta_h=NULL,cores=4))
## system.time(reg <-biglm(form, data=ffx08,sandwich=FALSE))
# 2007
load.ffdf(dir="/Users/Gokul/Desktop/MS/2007.csv.bz2")
form2<-ArrDelay~Origin+Distance
system.time(reg2<-Chunk_lm(form2,data=ffx07,chunkSize=75000,sandwich=FALSE,beta_h=NULL,cores=4))
save(reg1,reg2,file="reg_7_8.RData")
source("C:/Users/Gokul/Downloads/Predicted_airports.R")
pred_7<-pred_airports(beta_h=reg2$coef,data=ffx07,fix_num=mean(ffx07$Distance[]))
pred_8<-pred_airports(beta_h=reg1$coef,data=ffx08,fix_num=mean(ffx08$Distance[]))
head(pred_7)
# 2)a 2
pred_data<-data.frame(Origin=c(names(pred_7),names(pred_8)),
rbind(data.frame(pred=pred_7,year=2007),data.frame(pred=pred_8,year=2008)))
names(pred_data)<-c("Origin","pred","year")
# 2)b
library(XLConnect)
airports<-read.table(file="C:/Users/Gokul/Desktop/MS/Statistical R/leture/airports.csv",
header=TRUE,sep= ",",dec=".",na.string="NA")
head(airports)
# 2)c
?merge
plot_data<-merge(airports,pred_data,by.x="iata",by.y = "Origin")
# 3) #############################################################
# 3)a
install.packages("ggplot2")
library(ggplot2)
library(maps)
# 3)b
map.us <- map_data(map = "state")
p_1 <- ggplot()
p_1 <- p_1 + geom_polygon(data=map.us,
aes(x = long, y = lat,group=group),fill = grey(0.5))
p_1
# 3)c
p_1<-p_1+geom_point (data = plot_data,
aes (x = long, y = lat),pch = 16)
p_1
#Spliting in to two plots based on year
p_1<-p_1 + facet_grid(. ~ year)
p_1
# 3)d
plot_sub<-subset(plot_data,lat<50&lat>25)
rm(p_1)
map.us <- map_data(map = "state")
p_1 <- ggplot()
p_1 <- p_1 + geom_polygon(data=map.us,
aes(x = long, y = lat,group=group),fill = grey(0.5))
p_1
p_1<-p_1+geom_point (data = plot_sub,
aes (x = long, y = lat),pch = 16)
p_1
#Spliting in to two plots based on year
p_1<-p_1 + facet_grid(. ~ year)
p_1
# 3)e
p_1<-p_1+geom_point (data = plot_sub,aes (x = long,y = lat,colour =pred
),pch = 16)+theme(legend.position=c(.5, .175))+labs(colour="Color")+
scale_colour_gradient(low = "#56B1F7", high = "#132B43")
p_1
# 3)f
p_1<-p_1+geom_text(data = plot_sub,aes (
x = long,
y = lat,label=round(pred,0)
),size=3.2,vjust=0.6)
p_1
rm(p_1)
p_1 <- ggplot()
p_1 <- p_1 + geom_polygon(data=map.us, aes(x = long, y = lat,group=group),fill = grey(0.5))
p_1<-p_1+geom_point (data = plot_sub,aes (x = long, y = lat,colour =pred
),pch = 16) +theme(legend.position=c(.5, .175))+labs(colour="Color")+
scale_colour_gradient(low = "#56B1F7", high = "#132B43")
p_1<-p_1 + facet_grid(. ~ year)
#taking Top 1% add delay
plot_sub2<-subset(plot_sub,pred>=quantile(pred, probs = 0.99))
p_1<-p_1+geom_text(data = plot_sub2,aes (
x = long,
y = lat,label=round(pred,0)
),size=3.2,vjust=0.6)
p_1
# 3)g
p_1<-p_1+geom_text(data = plot_sub2,aes (
x = long,
y = lat,
label=iata
),size=3.2,vjust=-0.5)
p_1
# 3)h
numb07<-table(ffx07$Origin[])
numb08<-table(ffx08$Origin[])
#Making a data.frame with the information from table
flights<-data.frame(c(dimnames(numb07)[[1]],dimnames(numb08)[[1]]),
rbind(cbind(as.numeric(numb07),2007),cbind(as.numeric(numb08),2008)))
names(flights)<-c("Origin","numb","year")
#Merging with plot_sub
plot_sub2<-merge(plot_sub,flights,by.x=c("iata","year"),by.y=c("Origin","year"))
# 3)i
rm(p_1)
p_1 <- ggplot()
p_1 <- p_1 + geom_polygon(data=map.us,
aes(x = long, y = lat,group=group),fill = grey(0.5))
p_1<-p_1+geom_point (
data = plot_sub2,
aes (x = long, y = lat,colour = pred,size =numb/1000),
pch = 16)+labs(size = "Flights (k)",colour="Delay")+
theme(legend.position=c(0.5, .25))+
scale_colour_gradient(low = "#56B1F7", high = "#132B43")
p_1<-p_1 + facet_grid(. ~ year)
p_1
# 4 ####################################################################
plot_sub3<-subset(plot_sub2,(numb>=quantile(numb, probs = 0.95)&year==2007)|(numb>=quantile(numb, probs = 0.95)&year==2008))
p_1<-p_1+geom_text(data = plot_sub3,aes (
x = long,
y = lat,label=round(pred,0)
),size=3.2,vjust=0.6)
p_1<-p_1+geom_text(data = plot_sub3,aes (
x = long,
y = lat,
label=iata
),size=3.2,vjust=1.7)
#Ading city names
p_1<-p_1+geom_text(data = plot_sub3,aes (
x = long,
y = lat,
label=city
),size=3.2,vjust=-0.5)
p_1
# saving the map
ggsave ("C:/Users/Gokul/Desktop/MS/Statistical R/leture/map.pdf", plot =p_1)
####################################################################
################################################################### | /Big Data Analysis.R | no_license | gokulrajkb/Data-Analysis-simple-distributed-Big-Data- | R | false | false | 6,745 | r | ############################################################################
############################################################################
#From,
# Gokul Kaisaravalli Bhojraj
# Id: 80789
# Business Intelligence
############################################################################
############################################################################
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# 1)###################################################
library(ff)
library(ffbase)
# 1)a
system.time(ffx08<-
read.csv.ffdf(file="/Users/Gokul/Desktop/MS/Statistical R/leture/2008.csv.bz2", header=TRUE,
na.string=c("",NA),colClasses=
c(Month="factor",DayOfWeek="factor", Year="factor")))
system.time(ffx07<-
read.csv.ffdf(file="/Users/Gokul/Desktop/MS/Statistical R/leture/2008.csv.bz2", header=TRUE,
na.string=c("",NA),colClasses=
c(Month="factor",DayOfWeek="factor", Year="factor")))
# 1)b
save.ffdf(ffx07, dir = "/Users/Gokul/Desktop/MS/2007.csv.bz2",
clone = FALSE, relativepath = TRUE,overwrite = TRUE)
save.ffdf(ffx08, dir = "/Users/Gokul/Desktop/MS/2008.csv.bz2",
clone = FALSE, relativepath = TRUE,overwrite = TRUE)
load.ffdf(dir="/Users/Gokul/Desktop/MS/2008.csv.bz2")
load.ffdf(dir="/Users/Gokul/Desktop/MS/2007.csv.bz2")
summary(ffx07)
summary(ffx08)
# 1)c
sub_ffx08<-subset(ffx08,Month==12)
sub_ffx08$Month<-droplevels(sub_ffx08$Month)
summary(ffx08)
# 2 ################################################################
# 2)a
# 2)a 1
source("C:/Users/Gokul/Downloads/Chunk_lm.R")
# 2008
load.ffdf(dir="/Users/Gokul/Desktop/MS/2008.csv.bz2")
form<-ArrDelay~Origin+Distance
system.time(reg1<-Chunk_lm(form,data=ffx08,chunkSize=75000,sandwich=FALSE,beta_h=NULL,cores=4))
## system.time(reg <-biglm(form, data=ffx08,sandwich=FALSE))
# 2007
load.ffdf(dir="/Users/Gokul/Desktop/MS/2007.csv.bz2")
form2<-ArrDelay~Origin+Distance
system.time(reg2<-Chunk_lm(form2,data=ffx07,chunkSize=75000,sandwich=FALSE,beta_h=NULL,cores=4))
save(reg1,reg2,file="reg_7_8.RData")
source("C:/Users/Gokul/Downloads/Predicted_airports.R")
pred_7<-pred_airports(beta_h=reg2$coef,data=ffx07,fix_num=mean(ffx07$Distance[]))
pred_8<-pred_airports(beta_h=reg1$coef,data=ffx08,fix_num=mean(ffx08$Distance[]))
head(pred_7)
# 2)a 2
pred_data<-data.frame(Origin=c(names(pred_7),names(pred_8)),
rbind(data.frame(pred=pred_7,year=2007),data.frame(pred=pred_8,year=2008)))
names(pred_data)<-c("Origin","pred","year")
# 2)b
library(XLConnect)
airports<-read.table(file="C:/Users/Gokul/Desktop/MS/Statistical R/leture/airports.csv",
header=TRUE,sep= ",",dec=".",na.string="NA")
head(airports)
# 2)c
?merge
plot_data<-merge(airports,pred_data,by.x="iata",by.y = "Origin")
# 3) #############################################################
# 3)a
install.packages("ggplot2")
library(ggplot2)
library(maps)
# 3)b
map.us <- map_data(map = "state")
p_1 <- ggplot()
p_1 <- p_1 + geom_polygon(data=map.us,
aes(x = long, y = lat,group=group),fill = grey(0.5))
p_1
# 3)c
p_1<-p_1+geom_point (data = plot_data,
aes (x = long, y = lat),pch = 16)
p_1
#Spliting in to two plots based on year
p_1<-p_1 + facet_grid(. ~ year)
p_1
# 3)d
plot_sub<-subset(plot_data,lat<50&lat>25)
rm(p_1)
map.us <- map_data(map = "state")
p_1 <- ggplot()
p_1 <- p_1 + geom_polygon(data=map.us,
aes(x = long, y = lat,group=group),fill = grey(0.5))
p_1
p_1<-p_1+geom_point (data = plot_sub,
aes (x = long, y = lat),pch = 16)
p_1
#Spliting in to two plots based on year
p_1<-p_1 + facet_grid(. ~ year)
p_1
# 3)e
p_1<-p_1+geom_point (data = plot_sub,aes (x = long,y = lat,colour =pred
),pch = 16)+theme(legend.position=c(.5, .175))+labs(colour="Color")+
scale_colour_gradient(low = "#56B1F7", high = "#132B43")
p_1
# 3)f
p_1<-p_1+geom_text(data = plot_sub,aes (
x = long,
y = lat,label=round(pred,0)
),size=3.2,vjust=0.6)
p_1
rm(p_1)
p_1 <- ggplot()
p_1 <- p_1 + geom_polygon(data=map.us, aes(x = long, y = lat,group=group),fill = grey(0.5))
p_1<-p_1+geom_point (data = plot_sub,aes (x = long, y = lat,colour =pred
),pch = 16) +theme(legend.position=c(.5, .175))+labs(colour="Color")+
scale_colour_gradient(low = "#56B1F7", high = "#132B43")
p_1<-p_1 + facet_grid(. ~ year)
#taking Top 1% add delay
plot_sub2<-subset(plot_sub,pred>=quantile(pred, probs = 0.99))
p_1<-p_1+geom_text(data = plot_sub2,aes (
x = long,
y = lat,label=round(pred,0)
),size=3.2,vjust=0.6)
p_1
# 3)g
p_1<-p_1+geom_text(data = plot_sub2,aes (
x = long,
y = lat,
label=iata
),size=3.2,vjust=-0.5)
p_1
# 3)h
numb07<-table(ffx07$Origin[])
numb08<-table(ffx08$Origin[])
#Making a data.frame with the information from table
flights<-data.frame(c(dimnames(numb07)[[1]],dimnames(numb08)[[1]]),
rbind(cbind(as.numeric(numb07),2007),cbind(as.numeric(numb08),2008)))
names(flights)<-c("Origin","numb","year")
#Merging with plot_sub
plot_sub2<-merge(plot_sub,flights,by.x=c("iata","year"),by.y=c("Origin","year"))
# 3)i
rm(p_1)
p_1 <- ggplot()
p_1 <- p_1 + geom_polygon(data=map.us,
aes(x = long, y = lat,group=group),fill = grey(0.5))
p_1<-p_1+geom_point (
data = plot_sub2,
aes (x = long, y = lat,colour = pred,size =numb/1000),
pch = 16)+labs(size = "Flights (k)",colour="Delay")+
theme(legend.position=c(0.5, .25))+
scale_colour_gradient(low = "#56B1F7", high = "#132B43")
p_1<-p_1 + facet_grid(. ~ year)
p_1
# 4 ####################################################################
plot_sub3<-subset(plot_sub2,(numb>=quantile(numb, probs = 0.95)&year==2007)|(numb>=quantile(numb, probs = 0.95)&year==2008))
p_1<-p_1+geom_text(data = plot_sub3,aes (
x = long,
y = lat,label=round(pred,0)
),size=3.2,vjust=0.6)
p_1<-p_1+geom_text(data = plot_sub3,aes (
x = long,
y = lat,
label=iata
),size=3.2,vjust=1.7)
#Ading city names
p_1<-p_1+geom_text(data = plot_sub3,aes (
x = long,
y = lat,
label=city
),size=3.2,vjust=-0.5)
p_1
# saving the map
ggsave ("C:/Users/Gokul/Desktop/MS/Statistical R/leture/map.pdf", plot =p_1)
####################################################################
################################################################### |
library(hBayesDM)
### Name: gng_m4
### Title: Orthogonalized Go/Nogo Task
### Aliases: gng_m4
### ** Examples
## Not run:
##D # Run the model and store results in "output"
##D output <- gng_m4("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4)
##D
##D # Visually check convergence of the sampling chains (should look like 'hairy caterpillars')
##D plot(output, type = "trace")
##D
##D # Check Rhat values (all Rhat values should be less than or equal to 1.1)
##D rhat(output)
##D
##D # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal)
##D plot(output)
##D
##D # Show the WAIC and LOOIC model fit estimates
##D printFit(output)
## End(Not run)
| /data/genthat_extracted_code/hBayesDM/examples/gng_m4.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 711 | r | library(hBayesDM)
### Name: gng_m4
### Title: Orthogonalized Go/Nogo Task
### Aliases: gng_m4
### ** Examples
## Not run:
##D # Run the model and store results in "output"
##D output <- gng_m4("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4)
##D
##D # Visually check convergence of the sampling chains (should look like 'hairy caterpillars')
##D plot(output, type = "trace")
##D
##D # Check Rhat values (all Rhat values should be less than or equal to 1.1)
##D rhat(output)
##D
##D # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal)
##D plot(output)
##D
##D # Show the WAIC and LOOIC model fit estimates
##D printFit(output)
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SegFunctions.R
\name{RCEPoly}
\alias{RCEPoly}
\title{A function to compute Polycentric Relative Centralisation Index}
\usage{
RCEPoly(x, dc = NULL, center = 1, spatobj = NULL, folder = NULL, shape = NULL)
}
\arguments{
\item{x}{- an object of class matrix (or which can be coerced to that class),
where each column represents the distribution of a group within
spatial units. The number of columns should be greater than 1 (at least 2
groups are required). You should not include a column with total
population, because this will be interpreted as a group.}
\item{dc}{- a numeric matrix/vector containing the distances between spatial units
centroids and the central spatial unit(s).}
\item{center}{- a numeric vector giving the number of the spatial units that
represent the centers in the table}
\item{spatobj}{- a spatial object (SpatialPolygonsDataFrame) with
geographic information}
\item{folder}{- a character vector with the folder (directory)
name indicating where the shapefile is located on the drive}
\item{shape}{- a character vector with the name of the shapefile
(without the .shp extension).}
}
\value{
a matrix containing relative centralisation index values
}
\description{
The polycentric version of the relative centralisation index.
The function can be used in two ways: to provide a matrix containing
the distances between spatial/organizational unit centroids or a external
geographic information source (spatial object or shape file).
}
\examples{
x <- segdata@data[ ,1:2]
foldername <- system.file('extdata', package = 'OasisR')
shapename <- 'segdata'
RCEPoly(x, spatobj = segdata, center = c(28, 83) )
RCEPoly(x, folder = foldername, shape = shapename, center = c(28, 83))
center <- c(28, 83)
polydist <- matrix(data = NA, nrow = nrow(x), ncol = length(center))
for (i in 1:ncol(polydist))
polydist[,i] <- distcenter(spatobj = segdata, center = center[i])
RCEPoly(x, dc = polydist)
distmin <- vector(length = nrow(x))
for (i in 1:nrow(polydist)) distmin[i] <- min(polydist[i,])
RCE(x, dc = distmin)
}
\references{
Duncan O. D. and Duncan B. (1955) \emph{A
Methodological Analysis of Segregation Indexes}.
American Sociological Review 41, pp. 210-217
}
\seealso{
\code{\link{RCE}}, \code{\link{RCEPolyK}},
\code{\link{ACEDuncan}}, \code{\link{ACEDuncanPoly}},
\code{\link{ACEDuncanPolyK}}, \code{\link{ACE}}, \code{\link{ACEPoly}}
}
| /man/RCEPoly.Rd | no_license | cran/OasisR | R | false | true | 2,535 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SegFunctions.R
\name{RCEPoly}
\alias{RCEPoly}
\title{A function to compute Polycentric Relative Centralisation Index}
\usage{
RCEPoly(x, dc = NULL, center = 1, spatobj = NULL, folder = NULL, shape = NULL)
}
\arguments{
\item{x}{- an object of class matrix (or which can be coerced to that class),
where each column represents the distribution of a group within
spatial units. The number of columns should be greater than 1 (at least 2
groups are required). You should not include a column with total
population, because this will be interpreted as a group.}
\item{dc}{- a numeric matrix/vector containing the distances between spatial units
centroids and the central spatial unit(s).}
\item{center}{- a numeric vector giving the number of the spatial units that
represent the centers in the table}
\item{spatobj}{- a spatial object (SpatialPolygonsDataFrame) with
geographic information}
\item{folder}{- a character vector with the folder (directory)
name indicating where the shapefile is located on the drive}
\item{shape}{- a character vector with the name of the shapefile
(without the .shp extension).}
}
\value{
a matrix containing relative centralisation index values
}
\description{
The polycentric version of the relative centralisation index.
The function can be used in two ways: to provide a matrix containing
the distances between spatial/organizational unit centroids or a external
geographic information source (spatial object or shape file).
}
\examples{
x <- segdata@data[ ,1:2]
foldername <- system.file('extdata', package = 'OasisR')
shapename <- 'segdata'
RCEPoly(x, spatobj = segdata, center = c(28, 83) )
RCEPoly(x, folder = foldername, shape = shapename, center = c(28, 83))
center <- c(28, 83)
polydist <- matrix(data = NA, nrow = nrow(x), ncol = length(center))
for (i in 1:ncol(polydist))
polydist[,i] <- distcenter(spatobj = segdata, center = center[i])
RCEPoly(x, dc = polydist)
distmin <- vector(length = nrow(x))
for (i in 1:nrow(polydist)) distmin[i] <- min(polydist[i,])
RCE(x, dc = distmin)
}
\references{
Duncan O. D. and Duncan B. (1955) \emph{A
Methodological Analysis of Segregation Indexes}.
American Sociological Review 41, pp. 210-217
}
\seealso{
\code{\link{RCE}}, \code{\link{RCEPolyK}},
\code{\link{ACEDuncan}}, \code{\link{ACEDuncanPoly}},
\code{\link{ACEDuncanPolyK}}, \code{\link{ACE}}, \code{\link{ACEPoly}}
}
|
\name{ic.var}
\alias{ic.var}
\title{Calcola intervallo di confidenza per la varianza}
\description{
Questa funzione effettua il calcolo dell'intervallo di
confidenza per la varianza di campione gaussiano.
}
\usage{
ic.var(x, twosides = TRUE, conf.level = 0.95)
}
\arguments{
\item{x}{vettore di dati}
\item{twosides}{logico. Se \code{FALSE} l'estremo inferiore e' posto pari a 0}
\item{conf.level}{livello confidenza}
}
\examples{
x <- c(0.39, 0.68, 0.82, 1.35, 1.38, 1.62, 1.70,
1.71, 1.85, 2.14, 2.89, 3.69)
ic.var(x)
ic.var(x,FALSE)
}
\keyword{univar}
| /man/ic.var.Rd | no_license | cran/labstatR | R | false | false | 572 | rd | \name{ic.var}
\alias{ic.var}
\title{Calcola intervallo di confidenza per la varianza}
\description{
Questa funzione effettua il calcolo dell'intervallo di
confidenza per la varianza di campione gaussiano.
}
\usage{
ic.var(x, twosides = TRUE, conf.level = 0.95)
}
\arguments{
\item{x}{vettore di dati}
\item{twosides}{logico. Se \code{FALSE} l'estremo inferiore e' posto pari a 0}
\item{conf.level}{livello confidenza}
}
\examples{
x <- c(0.39, 0.68, 0.82, 1.35, 1.38, 1.62, 1.70,
1.71, 1.85, 2.14, 2.89, 3.69)
ic.var(x)
ic.var(x,FALSE)
}
\keyword{univar}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 2.91445259343564e+234, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615833667-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 270 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 2.91445259343564e+234, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
library(shiny)
simpleTotal <- function(principal, rate, times) {
principal * (1 + (rate * times)/100)
}
compoundTotal <- function(principal, rate, times, compoundTime) {
principal * ((1 + (rate / (100 * compoundTime))) ^ (times * compoundTime))
}
shinyServer(function(input, output) {
# output only used once, therefore only needs to be named once:
output$prediction <- renderPrint({
input$calculate
if (input$choice == "Simple interest") {
isolate(simpleTotal(input$principal, input$rate, input$times) - input$principal)
} else {
isolate(compoundTotal(input$principal, input$rate, input$times, input$compoundTime) - input$principal)
}
})
output$total <- renderPrint({
input$calculate
if (input$choice == "Simple interest") {
isolate(simpleTotal(input$principal, input$rate, input$times))
} else {
isolate(compoundTotal(input$principal, input$rate, input$times, input$compoundTime))
}
})
output$currency2 <- renderText({input$currency})
output$compoundTime <- renderText({input$compoundTime})
output$timeMeasure2 <- renderText({paste0(strsplit(input$timeMeasure, "s"), ",")})
# I need to name everything twice, otherwise it would not work: https://github.com/rstudio/shiny/issues/743
# Output for simple interest text outcome:
output$principalSimple <- renderText({input$principal})
output$currencySimple <- renderText({input$currency})
output$choiceSimple <- renderText({tolower(input$choice)})
output$rateSimple <- renderText({paste0(input$rate, "%")})
output$timesSimple <- renderText({input$times})
output$timeMeasureSimple <- renderText({paste0(strsplit(input$timeMeasure, "s"), "(s)", ",")})
# Output for compound interest text outcome:
output$principalCompound <- renderText({input$principal})
output$currencyCompound <- renderText({input$currency})
output$choiceCompound <- renderText({tolower(input$choice)})
output$rateCompound <- renderText({paste0(input$rate, "%")})
output$timesCompound <- renderText({input$times})
output$timeMeasureCompound <- renderText({paste0(strsplit(input$timeMeasure, "s"), "(s)", ",")})
})
| /server.R | no_license | SebasJ23/Coursera-DevDataProd-Project | R | false | false | 2,257 | r | library(shiny)
simpleTotal <- function(principal, rate, times) {
principal * (1 + (rate * times)/100)
}
compoundTotal <- function(principal, rate, times, compoundTime) {
principal * ((1 + (rate / (100 * compoundTime))) ^ (times * compoundTime))
}
shinyServer(function(input, output) {
# output only used once, therefore only needs to be named once:
output$prediction <- renderPrint({
input$calculate
if (input$choice == "Simple interest") {
isolate(simpleTotal(input$principal, input$rate, input$times) - input$principal)
} else {
isolate(compoundTotal(input$principal, input$rate, input$times, input$compoundTime) - input$principal)
}
})
output$total <- renderPrint({
input$calculate
if (input$choice == "Simple interest") {
isolate(simpleTotal(input$principal, input$rate, input$times))
} else {
isolate(compoundTotal(input$principal, input$rate, input$times, input$compoundTime))
}
})
output$currency2 <- renderText({input$currency})
output$compoundTime <- renderText({input$compoundTime})
output$timeMeasure2 <- renderText({paste0(strsplit(input$timeMeasure, "s"), ",")})
# I need to name everything twice, otherwise it would not work: https://github.com/rstudio/shiny/issues/743
# Output for simple interest text outcome:
output$principalSimple <- renderText({input$principal})
output$currencySimple <- renderText({input$currency})
output$choiceSimple <- renderText({tolower(input$choice)})
output$rateSimple <- renderText({paste0(input$rate, "%")})
output$timesSimple <- renderText({input$times})
output$timeMeasureSimple <- renderText({paste0(strsplit(input$timeMeasure, "s"), "(s)", ",")})
# Output for compound interest text outcome:
output$principalCompound <- renderText({input$principal})
output$currencyCompound <- renderText({input$currency})
output$choiceCompound <- renderText({tolower(input$choice)})
output$rateCompound <- renderText({paste0(input$rate, "%")})
output$timesCompound <- renderText({input$times})
output$timeMeasureCompound <- renderText({paste0(strsplit(input$timeMeasure, "s"), "(s)", ",")})
})
|
library(data.table) # used for reading in csv files
library(tidyverse)
library(lubridate)
library(scales)
# https://rud.is/b/2019/06/30/make-refreshing-segmented-column-charts-with-ggchicklet/
library(ggplot2)
library(hrbrthemes) # used for scale_y_percent
# Summary stats
library(janitor)
# Latex output
library(knitr)
library(kableExtra)
all_codings_anon <- readRDS("all_codings_anon.rds")
setDT(all_codings_anon)
#
# Define colours for the plots
#
fillplus <- c(
# non-interactive
"AD" = "grey",
"LT" = "peachpuff",
# vicarious interactive
"LQ" = "#c7e9c0", # mid green
"LR" = "#edf8e9", # pale green
"SQ" = "#cbc9e2", # pale purple
"SR" = "#bdd7e7", # pale blue
# interactive
"CQ" = "#238b45", # dark green
"ST" = "#41b6c4", # mid blue / teal
"SD" = "#54278f", # dark purple
"FB" = "#fd8d3c" # orange
)
interactivity_levels <- c(
"NON" = "peachpuff",
"VIC" = "#a1d99b",
"INT" = "#225ea8",
"NA" = "white"
)
#
# Data summary table (Table 2 in the paper)
#
tab_number_of_lecturers = all_codings_anon %>%
group_by(discipline, lecturer_anonid) %>%
tally() %>%
group_by(discipline) %>%
summarise(
num_lecturers = n()
) %>%
adorn_totals("row")
tab_coding_summary = all_codings_anon %>%
filter(!discipline %in% c("BIO", "CHE")) %>%
group_by(discipline, course_anonid, lecturer_anonid) %>%
tally() %>%
group_by(discipline) %>%
summarise(
count = n(),
num_lectures = sum(n)
) %>%
adorn_totals("row") %>%
left_join(tab_number_of_lecturers) %>%
select(discipline, num_lecturers, count, num_lectures)
tab_coding_summary
tab_coding_summary %>%
kable("latex", booktabs = T)
# Produce a table where each row is an assigned code, with a given duration
codes_with_durations = all_codings_anon %>%
unnest(codes_with_duration) %>%
mutate(
# Define groupings of codes by interactivity level (as in Wood et al.)
code_grp = as.factor(case_when(
code %in% c("AD", "LT") ~ "NON",
code %in% c("LQ", "SR", "SQ", "LR") ~ "VIC",
code %in% c("CQ", "ST", "SD", "FB") ~ "INT",
TRUE ~ "NA"
)),
# Put the factors into a sensible order
code = fct_relevel(code, "AD", "LT", "LQ", "LR", "SQ", "SR", "CQ", "ST", "SD", "FB"),
code_grp = fct_relevel(code_grp, "NON", "VIC", "INT"),
duration_int = ifelse(code_grp %in% c("INT"), duration, 0)
# course = fct_reorder(course, course, .desc = TRUE),
# course = fct_reorder(course, discipline, .desc = TRUE)
) %>%
select(-raw_coding, -coding, -codes_by_second)
write_csv(codes_with_durations, "anon_codes_with_durations.csv")
#
# Full detailed dataset
#
codes_with_durations %>%
filter(!is.na(duration)) %>% # remove rows corresponding to "END", which have null duration
mutate(
title = simple_title,
title = lecturer_and_session,
# order by date
title = fct_reorder(title, lecture_date, .desc = TRUE)
) %>%
ggplot(aes(title, duration, group = time, fill = code)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(discipline, course_anonid),
scales = "free",
space = "free",
shrink = TRUE) +
scale_y_time(breaks = 60*5*(0:12),
labels = 5*(0:12),
limits = c(0, 60*60)) +
scale_fill_manual("FILL+ code", values = fillplus) +
coord_flip() +
labs(x = NULL, y = "Time") +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 0),
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave("Paper1_FILLplus_timeline_allcourses.pdf",width=30,height=60,units="cm",dpi=300)
lecture_numbers = codes_with_durations %>%
select(discipline, course_anonid, lecturer_anonid, lecture_id, lecture_date) %>%
distinct() %>%
arrange(discipline, course_anonid, lecturer_anonid, lecture_date) %>%
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(lecture_number = row_number()) %>%
ungroup()
codes_with_durations %>%
left_join(lecture_numbers %>% select(lecture_id, lecture_number)) %>%
mutate(
title = simple_title,
title = lecture_number
# order by date
#title = fct_reorder(title, title, .desc = TRUE)
) %>%
ggplot(aes(title, duration, group = time, fill = code)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(discipline, course_anonid, lecturer_anonid),
scales = "free",
space = "free",
shrink = TRUE) +
scale_y_time(breaks = 60*5*(0:12),
labels = 5*(0:12),
limits = c(0, 60*60)) +
scale_fill_manual("FILL+ code", values = fillplus) +
coord_flip() +
labs(x = NULL, y = "Time") +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 0),
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave("Paper1_FILLplus_timeline_allcourses_alt.pdf",width=30,height=60,units="cm",dpi=300)
plot_all_timelines = function(discipline_to_plot, width_cm=30, height_cm=60) {
data_to_plot = codes_with_durations %>%
filter(!is.na(duration)) %>% # remove rows corresponding to "END", which have null duration
filter(discipline == discipline_to_plot) %>%
left_join(lecture_numbers %>% select(lecture_id, lecture_number)) %>%
mutate(
title = simple_title,
title = as.factor(lecture_number),
# order by date
title = fct_reorder(title, lecture_number, .desc = TRUE)
)
data_to_plot %>%
ggplot(aes(title, duration, group = time, fill = code)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(course_anonid, lecturer_anonid),
scales = "free",
space = "free",
shrink = TRUE,
switch = "y") +
scale_y_time(breaks = 60*5*(0:12),
labels = 5*(0:12),
limits = c(0, 60*60)) +
scale_fill_manual("FILL+ code", values = fillplus) +
coord_flip() +
#scale_x_reverse() +
labs(x = NULL, y = "Time") +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 180),
strip.placement = "outside",
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave(paste0("Paper1_FILLplus_timeline_",discipline_to_plot,".pdf"),width=width_cm,height=height_cm,units="cm",dpi=300)
}
plot_all_timelines("MATH", height_cm = 40)
plot_all_timelines("PHYS", height_cm = 20)
plot_all_timelines("VET", height_cm = 17)
#
# Summaries for lecturer/course combinations
#
# Levels of interactivity
interactivity_proportions_by_lecturer = codes_with_durations %>%
group_by(discipline, course_anonid, lecturer_anonid, code_grp) %>%
summarise(
mins = sum(duration)
) %>%
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(
sum = sum(mins, na.rm = TRUE),
prop = mins/sum,
# prepare to order by the proportio of NON
prop_NON = if_else(code_grp == "NON", prop, 0),
prop_NON = max(prop_NON)
) %>%
ungroup() %>%
filter(!code_grp == "NA")
interactivity_proportions_by_lecturer %>%
mutate(
course_lecturer = paste(course_anonid, lecturer_anonid),
course_lecturer = fct_reorder(course_lecturer, prop_NON, .desc = FALSE)
) %>%
ggplot(aes(course_lecturer, prop, fill = code_grp)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(discipline),
scales = "free",
space = "free",
shrink = TRUE) +
scale_y_percent(breaks = 0.2*c(0:5)) +
scale_fill_manual("Interactivity", values = interactivity_levels) +
coord_flip() +
labs(x = NULL, y = "Proportion of time") +
# theme_ipsum_rc() +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 0),
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave("Paper1_FILLplus_interactivity_proportions.pdf",width=20,height=14,units="cm",dpi=300)
# Use of each code
code_proportions_by_lecturer = codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
group_by(discipline, course_anonid, lecturer_anonid, code) %>%
summarise(
mins = sum(duration)
) %>%
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(
sum = sum(mins, na.rm = TRUE),
prop = mins/sum
) %>%
ungroup() %>%
# add on their NON proportion so we can sort by it (and therefore keep the same order as the previous plot)
left_join(interactivity_proportions_by_lecturer %>%
select(course_anonid, lecturer_anonid, prop_NON) %>%
distinct(),
by = c("course_anonid", "lecturer_anonid"))
code_proportions_by_lecturer %>%
mutate(
course_lecturer = paste(course_anonid, lecturer_anonid),
course_lecturer = fct_reorder(course_lecturer, prop_NON, .desc = FALSE)
) %>%
ggplot(aes(course_lecturer, prop, fill = code)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(discipline),
scales = "free",
space = "free",
shrink = TRUE) +
scale_y_percent(breaks = 0.2*c(0:5)) +
scale_fill_manual("FILL+ code", values = fillplus) +
coord_flip() +
labs(x = NULL, y = "Proportion of time") +
# theme_ipsum_rc() +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 0),
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave("Paper1_FILLplus_code_proportions.pdf",width=20,height=14,units="cm",dpi=300)
# Analysis of lecturer questions
codes_with_durations %>%
filter(code == "LQ") %>%
group_by(course_anonid, lecturer_anonid, session_number) %>%
summarise(
num_qs = n()
) %>%
ungroup() %>%
summarise(
min_qs = min(num_qs),
mean_qs = mean(num_qs),
max_qs = max(num_qs),
median_qs = median(num_qs)
)
codes_with_durations %>%
group_by(discipline, course_anonid, lecturer_anonid, session_number) %>%
summarise(
num_qs = sum(code == "LQ", na.rm = TRUE)
) %>%
ungroup() %>%
# add the mean for each course-lecturer combo
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(
mean_qs = mean(num_qs, na.rm = TRUE),
se_qs = sd(num_qs, na.rm = TRUE) / sqrt(n())
) %>%
ungroup() %>%
mutate(
#position = parse_number(course_anonid)*1 + 100*num_qs,
course_num = parse_number(course_anonid),
course_and_lecturer = paste(course_anonid, lecturer_anonid),
#course_and_lecturer = paste(course_acronym, course_anonid, lecturer),
course_and_lecturer = fct_reorder(course_and_lecturer, mean_qs, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = num_qs, fill = course_anonid)) +
facet_grid(cols = vars(discipline),
scales = "free",
space = "free_x",
shrink = TRUE) +
#geom_violin(fill = fillplus["LQ"]) +
#geom_violin() +
#ylim(-1, 50) +
#geom_boxplot(alpha = 0.5, width = 0.3, colour = "#999999") +
geom_point(aes(color = course_num),
position = position_jitter(width = 0.1),
alpha = 0.8) +
geom_errorbar(aes(ymin=(mean_qs-se_qs), ymax=(mean_qs+se_qs)), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_qs), colour = "black", shape = 4) +
#coord_flip() +
labs(x = NULL, y = "Number of questions") +
theme_minimal(base_size = 16) +
scale_colour_viridis_c() +
scale_fill_viridis_d() +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),
#panel.grid.major.x=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
#strip.text=element_text(hjust=0, size=12)
)
#ggtitle("Distribution of number of questions asked per lecture")
ggsave("Paper1_FILLplus_LQ_distribution_old.pdf",width=20,height=10,units="cm",dpi=300)
summary_of_LQ_SQ = codes_with_durations %>%
group_by(discipline, course_anonid, lecturer_anonid, session_number) %>%
summarise(
num_LQ = sum(code == "LQ", na.rm = TRUE),
num_SQ = sum(code == "SQ", na.rm = TRUE)
) %>%
ungroup() %>%
# add the mean for each course-lecturer combo
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(
mean_LQ = mean(num_LQ, na.rm = TRUE),
se_LQ = sd(num_LQ, na.rm = TRUE) / sqrt(n()),
mean_SQ = mean(num_SQ, na.rm = TRUE),
se_SQ = sd(num_SQ, na.rm = TRUE) / sqrt(n())
) %>%
ungroup() %>%
mutate(
course_num = parse_number(course_anonid),
course_and_lecturer = paste(course_anonid, lecturer_anonid)
)
# LQs
summary_of_LQ_SQ %>%
mutate(
course_and_lecturer = fct_reorder(course_and_lecturer, mean_LQ, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = num_LQ, fill = course_anonid)) +
facet_grid(cols = vars(discipline),
scales = "free",
space = "free_x",
shrink = TRUE) +
geom_point(aes(color = course_num),
position = position_jitter(width = 0.1),
alpha = 0.8) +
geom_errorbar(aes(ymin=(mean_LQ-se_LQ), ymax=(mean_LQ+se_LQ)), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_LQ), colour = "black", shape = 4) +
labs(x = NULL, y = "Number of questions") +
theme_minimal(base_size = 16) +
scale_colour_viridis_c() +
scale_fill_viridis_d() +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),
)
ggsave("Paper1_FILLplus_LQ_distribution.pdf",width=20,height=10,units="cm",dpi=300)
# SQs
summary_of_LQ_SQ %>%
mutate(
course_and_lecturer = fct_reorder(course_and_lecturer, mean_SQ, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = num_SQ, fill = course_anonid)) +
facet_grid(cols = vars(discipline),
scales = "free",
space = "free_x",
shrink = TRUE) +
geom_point(aes(color = course_num),
position = position_jitter(width = 0.1),
alpha = 0.8) +
geom_errorbar(aes(ymin=(mean_SQ-se_SQ), ymax=(mean_SQ+se_SQ)), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_SQ), colour = "black", shape = 4) +
labs(x = NULL, y = "Number of student questions") +
theme_minimal(base_size = 16) +
scale_colour_viridis_c() +
scale_fill_viridis_d() +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),
)
ggsave("Paper1_FILLplus_SQ_distribution.pdf",width=20,height=10,units="cm",dpi=300)
# Table for avg durations
codes_with_durations %>%
group_by(lecturer_and_session) %>%
mutate(endtime = case_when(code == "END" ~ time),
endtime = zoo::na.locf(endtime, fromLast = TRUE)) %>%
ungroup() %>%
group_by(lecturer_and_session, code) %>%
mutate(session_total = sum (duration),
session_perc = session_total/endtime) %>%
ungroup() %>%
group_by(code) %>%
summarise(mean = mean(session_perc),
sd = sd(session_perc))
code_summary_by_session = codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
group_by(discipline, course_anonid, lecturer_anonid, session_number, code) %>%
summarise(
mins = sum(duration)
) %>%
group_by(discipline, course_anonid, lecturer_anonid, session_number) %>%
mutate(
sum = sum(mins, na.rm = TRUE),
prop = mins/sum
)
#This is an improved version that takes account of some codes not being observed
code_summary_by_session = codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
group_by(discipline, course_anonid, lecturer_anonid, session_number, code) %>%
# add entries for any codes which are missing
complete(code,
nesting(discipline, course_anonid, lecturer_anonid, session_number),
fill = list(duration = 0)) %>%
summarise(
mins = sum(duration)
) %>%
group_by(discipline, course_anonid, lecturer_anonid, session_number) %>%
mutate(
sum = sum(mins, na.rm = TRUE),
prop = mins/sum
)
code_summary_by_session %>%
ggplot(aes(x = lecturer_anonid, y = prop, colour = code)) +
facet_grid(~code) +
geom_point() +
theme_minimal(base_size = 16)
#
# Investigation of LT
#
codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
filter(code == "LT") %>%
group_by(course_anonid, lecturer_anonid) %>%
mutate(
duration_LT = case_when(code == "LT" ~ duration),
longest_LT = max(duration_LT, na.rm = TRUE),
mean_LT = mean(duration_LT, na.rm = TRUE)
) %>%
ungroup() %>%
mutate(
course_and_lecturer = as.factor(paste(course_anonid, lecturer_anonid)),
course_and_lecturer = fct_reorder(course_and_lecturer, longest_LT, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = duration, colour = code)) +
facet_grid(~code) +
geom_point() +
geom_point(aes(x = course_and_lecturer, y = mean_LT), colour = "red", shape = 4) +
scale_colour_manual("FILL+ code", values = fillplus) +
theme_minimal(base_size = 16)
all_LT_durations = codes_with_durations %>%
filter(code == "LT") %>%
select(duration) %>%
mutate(duration = duration)
all_LT_durations %>% summary()
all_LT_durations %>%
ggplot(aes(x = duration)) + geom_density() + labs(x = "duration of LT in seconds")
all_LT_durations %>%
ggplot(aes(x = log(duration))) + geom_density() + labs(x = "log(duration of LT)")
code_use_summary = codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
group_by(discipline, course_anonid, lecturer_anonid, code) %>%
summarise(
duration_max = max(duration, na.rm = TRUE),
duration_mean = mean(duration, na.rm = TRUE),
duration_sd = sd(duration, na.rm = TRUE),
duration_total = sum(duration, na.rm = TRUE),
duration_count = n()
)
code_use_summary_long = code_use_summary %>%
pivot_longer(
cols = contains("duration_"),
names_to = c(".value", "stat"),
names_sep = "_"
)
code_use_summary %>%
mutate(
course_and_lecturer = as.factor(paste(course_anonid, lecturer_anonid)),
#course_and_lecturer = fct_reorder(course_and_lecturer, longest_LT, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = duration_mean, colour = code)) +
facet_grid(~code) +
geom_point() +
#geom_point(aes(x = course_and_lecturer, y = mean_LT), colour = "red", shape = 4) +
scale_colour_manual("FILL+ code", values = fillplus) +
theme_minimal(base_size = 16)
code_use_summary %>%
group_by(discipline, code) %>%
summarise(
avg_duration_max = mean(duration_max, na.rm = TRUE)/60,
avg_duration_mean = mean(duration_mean, na.rm = TRUE)/60,
avg_duration_sd = mean(duration_sd, na.rm = TRUE)/60
) %>%
filter(code == "LT")
codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
filter(code == "LT") %>%
group_by(course_anonid, lecturer_anonid) %>%
mutate(
duration_LT = case_when(code == "LT" ~ duration),
longest_LT = max(duration_LT, na.rm = TRUE),
mean_LT = mean(duration_LT, na.rm = TRUE),
se_LT = sd(duration_LT, na.rm = TRUE) / sqrt(n())
) %>%
ungroup() %>%
mutate(
course_num = parse_number(course_anonid),
course_and_lecturer = as.factor(paste(course_anonid, lecturer_anonid)),
course_and_lecturer = fct_reorder(course_and_lecturer, mean_LT, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = duration/60, colour = course_num)) +
facet_grid(~discipline ,
scales = "free",
space = "free_x",
shrink = TRUE) +
geom_point() +
geom_errorbar(aes(ymin=(mean_LT-se_LT)/60, ymax=(mean_LT+se_LT)/60), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_LT/60), colour = "black", shape = 4) +
#scale_colour_manual("FILL+ code", values = fillplus) +
scale_colour_viridis_c() +
theme_minimal(base_size = 16) +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),) +
labs(#x = "Course/lecturer combination",
x = "",
y = "Duration of LT (min)")
ggsave("Paper1_FILLplus_LT_distribution.pdf",width=20,height=10,units="cm",dpi=300)
codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
# add the proportion of time spent in LT for each session
group_by(course_anonid, lecturer_anonid, session_number) %>%
mutate(
lecture_duration = sum(duration),
duration_LT = case_when(code == "LT" ~ duration),
prop_LT = sum(duration_LT, na.rm = TRUE) / lecture_duration,
) %>%
#filter(code == "LT") %>%
group_by(course_anonid, lecturer_anonid) %>%
mutate(
longest_LT = max(duration_LT, na.rm = TRUE),
mean_LT = mean(duration_LT, na.rm = TRUE),
se_LT = sd(duration_LT, na.rm = TRUE) / sqrt(n()),
mean_prop_LT = mean(prop_LT),
) %>%
ungroup() %>%
mutate(
course_and_lecturer = as.factor(paste(course_anonid, lecturer_anonid)),
course_and_lecturer = fct_reorder(course_and_lecturer, longest_LT, .desc = TRUE),
course_and_lecturer = fct_reorder(course_and_lecturer, mean_prop_LT, .desc = TRUE)
) %>%
# restrict the plot to just LT durations
filter(code == "LT") %>%
ggplot(aes(x = course_and_lecturer, y = duration/60, colour = code)) +
facet_grid(~discipline ,
scales = "free",
space = "free_x",
shrink = TRUE) +
geom_point() +
geom_errorbar(aes(ymin=(mean_LT-se_LT)/60, ymax=(mean_LT+se_LT)/60), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_LT/60), colour = "black", shape = 4) +
geom_point(aes(x = course_and_lecturer, y = mean_prop_LT*50), colour = "red", shape = 4) +
#scale_colour_manual("FILL+ code", values = fillplus) +
scale_colour_viridis_c() +
theme_minimal(base_size = 16) +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),) +
labs(x = "Course/lecturer combination",
y = "Duration of LT (min)")
ggsave("Paper1_FILLplus_LT_distribution_withprops.pdf",width=20,height=10,units="cm",dpi=300)
# proportions in each session sum to 1
code_summary_by_session %>%
group_by(discipline, lecturer_anonid, session_number) %>%
summarise(tot_prop = sum(prop))
code_summary_table = code_summary_by_session %>%
group_by(discipline, code) %>%
summarise(
mean_prop = mean(prop),
sd_prop = sd(prop)
) %>%
bind_rows(
# produce a version for all the disciplines combined
code_summary_by_session %>%
group_by(code) %>%
summarise(
mean_prop = mean(prop),
sd_prop = sd(prop)
) %>%
mutate(discipline = "OVERALL")
) %>%
filter(!is.na(code), !code == "END") # remove these non-interesting codes
# check: the proportions should sum to 1!
code_summary_table %>%
group_by(discipline) %>%
summarise(tot_prop = sum(mean_prop))
# Table with all the gory details
code_summary_table %>%
mutate(
pretty_cell_value = glue::glue("{format(mean_prop*100, digits = 1)}\\% ($\\pm${format(sd_prop*100, digits = 1)}\\%)")
) %>%
select(discipline, code, pretty_cell_value) %>%
pivot_wider(
names_from = code,
values_from = pretty_cell_value,
values_fill = list(pretty_cell_value = "-")
) %>%
kable(format = "latex", escape = FALSE, booktabs = T)
# More compact version - Table 4 in the paper
code_summary_table %>%
mutate(
pretty_cell_value = glue::glue("{format(mean_prop*100, digits = 1)} ($\\pm${format(sd_prop*100, digits = 1)})"),
# even more compact - remove the sd's
#pretty_cell_value = glue::glue("{format(mean_prop*100, digits = 1)}")
) %>%
select(discipline, code, pretty_cell_value) %>%
pivot_wider(
names_from = code,
values_from = pretty_cell_value,
values_fill = list(pretty_cell_value = "-")
) %>%
# transpose the table
t %>%
kable(format = "latex", escape = FALSE, booktabs = T, linesep = c(""))
| /Paper1/Paper1_FILLplus_analysis.R | no_license | turtlesoul25/ClassroomPractices | R | false | false | 25,981 | r | library(data.table) # used for reading in csv files
library(tidyverse)
library(lubridate)
library(scales)
# https://rud.is/b/2019/06/30/make-refreshing-segmented-column-charts-with-ggchicklet/
library(ggplot2)
library(hrbrthemes) # used for scale_y_percent
# Summary stats
library(janitor)
# Latex output
library(knitr)
library(kableExtra)
all_codings_anon <- readRDS("all_codings_anon.rds")
setDT(all_codings_anon)
#
# Define colours for the plots
#
fillplus <- c(
# non-interactive
"AD" = "grey",
"LT" = "peachpuff",
# vicarious interactive
"LQ" = "#c7e9c0", # mid green
"LR" = "#edf8e9", # pale green
"SQ" = "#cbc9e2", # pale purple
"SR" = "#bdd7e7", # pale blue
# interactive
"CQ" = "#238b45", # dark green
"ST" = "#41b6c4", # mid blue / teal
"SD" = "#54278f", # dark purple
"FB" = "#fd8d3c" # orange
)
interactivity_levels <- c(
"NON" = "peachpuff",
"VIC" = "#a1d99b",
"INT" = "#225ea8",
"NA" = "white"
)
#
# Data summary table (Table 2 in the paper)
#
tab_number_of_lecturers = all_codings_anon %>%
group_by(discipline, lecturer_anonid) %>%
tally() %>%
group_by(discipline) %>%
summarise(
num_lecturers = n()
) %>%
adorn_totals("row")
tab_coding_summary = all_codings_anon %>%
filter(!discipline %in% c("BIO", "CHE")) %>%
group_by(discipline, course_anonid, lecturer_anonid) %>%
tally() %>%
group_by(discipline) %>%
summarise(
count = n(),
num_lectures = sum(n)
) %>%
adorn_totals("row") %>%
left_join(tab_number_of_lecturers) %>%
select(discipline, num_lecturers, count, num_lectures)
tab_coding_summary
tab_coding_summary %>%
kable("latex", booktabs = T)
# Produce a table where each row is an assigned code, with a given duration
codes_with_durations = all_codings_anon %>%
unnest(codes_with_duration) %>%
mutate(
# Define groupings of codes by interactivity level (as in Wood et al.)
code_grp = as.factor(case_when(
code %in% c("AD", "LT") ~ "NON",
code %in% c("LQ", "SR", "SQ", "LR") ~ "VIC",
code %in% c("CQ", "ST", "SD", "FB") ~ "INT",
TRUE ~ "NA"
)),
# Put the factors into a sensible order
code = fct_relevel(code, "AD", "LT", "LQ", "LR", "SQ", "SR", "CQ", "ST", "SD", "FB"),
code_grp = fct_relevel(code_grp, "NON", "VIC", "INT"),
duration_int = ifelse(code_grp %in% c("INT"), duration, 0)
# course = fct_reorder(course, course, .desc = TRUE),
# course = fct_reorder(course, discipline, .desc = TRUE)
) %>%
select(-raw_coding, -coding, -codes_by_second)
write_csv(codes_with_durations, "anon_codes_with_durations.csv")
#
# Full detailed dataset
#
codes_with_durations %>%
filter(!is.na(duration)) %>% # remove rows corresponding to "END", which have null duration
mutate(
title = simple_title,
title = lecturer_and_session,
# order by date
title = fct_reorder(title, lecture_date, .desc = TRUE)
) %>%
ggplot(aes(title, duration, group = time, fill = code)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(discipline, course_anonid),
scales = "free",
space = "free",
shrink = TRUE) +
scale_y_time(breaks = 60*5*(0:12),
labels = 5*(0:12),
limits = c(0, 60*60)) +
scale_fill_manual("FILL+ code", values = fillplus) +
coord_flip() +
labs(x = NULL, y = "Time") +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 0),
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave("Paper1_FILLplus_timeline_allcourses.pdf",width=30,height=60,units="cm",dpi=300)
lecture_numbers = codes_with_durations %>%
select(discipline, course_anonid, lecturer_anonid, lecture_id, lecture_date) %>%
distinct() %>%
arrange(discipline, course_anonid, lecturer_anonid, lecture_date) %>%
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(lecture_number = row_number()) %>%
ungroup()
codes_with_durations %>%
left_join(lecture_numbers %>% select(lecture_id, lecture_number)) %>%
mutate(
title = simple_title,
title = lecture_number
# order by date
#title = fct_reorder(title, title, .desc = TRUE)
) %>%
ggplot(aes(title, duration, group = time, fill = code)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(discipline, course_anonid, lecturer_anonid),
scales = "free",
space = "free",
shrink = TRUE) +
scale_y_time(breaks = 60*5*(0:12),
labels = 5*(0:12),
limits = c(0, 60*60)) +
scale_fill_manual("FILL+ code", values = fillplus) +
coord_flip() +
labs(x = NULL, y = "Time") +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 0),
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave("Paper1_FILLplus_timeline_allcourses_alt.pdf",width=30,height=60,units="cm",dpi=300)
plot_all_timelines = function(discipline_to_plot, width_cm=30, height_cm=60) {
data_to_plot = codes_with_durations %>%
filter(!is.na(duration)) %>% # remove rows corresponding to "END", which have null duration
filter(discipline == discipline_to_plot) %>%
left_join(lecture_numbers %>% select(lecture_id, lecture_number)) %>%
mutate(
title = simple_title,
title = as.factor(lecture_number),
# order by date
title = fct_reorder(title, lecture_number, .desc = TRUE)
)
data_to_plot %>%
ggplot(aes(title, duration, group = time, fill = code)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(course_anonid, lecturer_anonid),
scales = "free",
space = "free",
shrink = TRUE,
switch = "y") +
scale_y_time(breaks = 60*5*(0:12),
labels = 5*(0:12),
limits = c(0, 60*60)) +
scale_fill_manual("FILL+ code", values = fillplus) +
coord_flip() +
#scale_x_reverse() +
labs(x = NULL, y = "Time") +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 180),
strip.placement = "outside",
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave(paste0("Paper1_FILLplus_timeline_",discipline_to_plot,".pdf"),width=width_cm,height=height_cm,units="cm",dpi=300)
}
plot_all_timelines("MATH", height_cm = 40)
plot_all_timelines("PHYS", height_cm = 20)
plot_all_timelines("VET", height_cm = 17)
#
# Summaries for lecturer/course combinations
#
# Levels of interactivity
interactivity_proportions_by_lecturer = codes_with_durations %>%
group_by(discipline, course_anonid, lecturer_anonid, code_grp) %>%
summarise(
mins = sum(duration)
) %>%
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(
sum = sum(mins, na.rm = TRUE),
prop = mins/sum,
# prepare to order by the proportio of NON
prop_NON = if_else(code_grp == "NON", prop, 0),
prop_NON = max(prop_NON)
) %>%
ungroup() %>%
filter(!code_grp == "NA")
interactivity_proportions_by_lecturer %>%
mutate(
course_lecturer = paste(course_anonid, lecturer_anonid),
course_lecturer = fct_reorder(course_lecturer, prop_NON, .desc = FALSE)
) %>%
ggplot(aes(course_lecturer, prop, fill = code_grp)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(discipline),
scales = "free",
space = "free",
shrink = TRUE) +
scale_y_percent(breaks = 0.2*c(0:5)) +
scale_fill_manual("Interactivity", values = interactivity_levels) +
coord_flip() +
labs(x = NULL, y = "Proportion of time") +
# theme_ipsum_rc() +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 0),
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave("Paper1_FILLplus_interactivity_proportions.pdf",width=20,height=14,units="cm",dpi=300)
# Use of each code
code_proportions_by_lecturer = codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
group_by(discipline, course_anonid, lecturer_anonid, code) %>%
summarise(
mins = sum(duration)
) %>%
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(
sum = sum(mins, na.rm = TRUE),
prop = mins/sum
) %>%
ungroup() %>%
# add on their NON proportion so we can sort by it (and therefore keep the same order as the previous plot)
left_join(interactivity_proportions_by_lecturer %>%
select(course_anonid, lecturer_anonid, prop_NON) %>%
distinct(),
by = c("course_anonid", "lecturer_anonid"))
code_proportions_by_lecturer %>%
mutate(
course_lecturer = paste(course_anonid, lecturer_anonid),
course_lecturer = fct_reorder(course_lecturer, prop_NON, .desc = FALSE)
) %>%
ggplot(aes(course_lecturer, prop, fill = code)) +
geom_col(position = position_stack(reverse = TRUE)) +
facet_grid(rows = vars(discipline),
scales = "free",
space = "free",
shrink = TRUE) +
scale_y_percent(breaks = 0.2*c(0:5)) +
scale_fill_manual("FILL+ code", values = fillplus) +
coord_flip() +
labs(x = NULL, y = "Proportion of time") +
# theme_ipsum_rc() +
theme_minimal(base_size = 11.5) +
theme(strip.text.y = element_text(angle = 0),
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
strip.text=element_text(hjust=0, size=12))
ggsave("Paper1_FILLplus_code_proportions.pdf",width=20,height=14,units="cm",dpi=300)
# Analysis of lecturer questions
codes_with_durations %>%
filter(code == "LQ") %>%
group_by(course_anonid, lecturer_anonid, session_number) %>%
summarise(
num_qs = n()
) %>%
ungroup() %>%
summarise(
min_qs = min(num_qs),
mean_qs = mean(num_qs),
max_qs = max(num_qs),
median_qs = median(num_qs)
)
codes_with_durations %>%
group_by(discipline, course_anonid, lecturer_anonid, session_number) %>%
summarise(
num_qs = sum(code == "LQ", na.rm = TRUE)
) %>%
ungroup() %>%
# add the mean for each course-lecturer combo
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(
mean_qs = mean(num_qs, na.rm = TRUE),
se_qs = sd(num_qs, na.rm = TRUE) / sqrt(n())
) %>%
ungroup() %>%
mutate(
#position = parse_number(course_anonid)*1 + 100*num_qs,
course_num = parse_number(course_anonid),
course_and_lecturer = paste(course_anonid, lecturer_anonid),
#course_and_lecturer = paste(course_acronym, course_anonid, lecturer),
course_and_lecturer = fct_reorder(course_and_lecturer, mean_qs, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = num_qs, fill = course_anonid)) +
facet_grid(cols = vars(discipline),
scales = "free",
space = "free_x",
shrink = TRUE) +
#geom_violin(fill = fillplus["LQ"]) +
#geom_violin() +
#ylim(-1, 50) +
#geom_boxplot(alpha = 0.5, width = 0.3, colour = "#999999") +
geom_point(aes(color = course_num),
position = position_jitter(width = 0.1),
alpha = 0.8) +
geom_errorbar(aes(ymin=(mean_qs-se_qs), ymax=(mean_qs+se_qs)), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_qs), colour = "black", shape = 4) +
#coord_flip() +
labs(x = NULL, y = "Number of questions") +
theme_minimal(base_size = 16) +
scale_colour_viridis_c() +
scale_fill_viridis_d() +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),
#panel.grid.major.x=element_blank(),
# panel.spacing=grid::unit(2, "lines"),
#strip.text=element_text(hjust=0, size=12)
)
#ggtitle("Distribution of number of questions asked per lecture")
ggsave("Paper1_FILLplus_LQ_distribution_old.pdf",width=20,height=10,units="cm",dpi=300)
summary_of_LQ_SQ = codes_with_durations %>%
group_by(discipline, course_anonid, lecturer_anonid, session_number) %>%
summarise(
num_LQ = sum(code == "LQ", na.rm = TRUE),
num_SQ = sum(code == "SQ", na.rm = TRUE)
) %>%
ungroup() %>%
# add the mean for each course-lecturer combo
group_by(discipline, course_anonid, lecturer_anonid) %>%
mutate(
mean_LQ = mean(num_LQ, na.rm = TRUE),
se_LQ = sd(num_LQ, na.rm = TRUE) / sqrt(n()),
mean_SQ = mean(num_SQ, na.rm = TRUE),
se_SQ = sd(num_SQ, na.rm = TRUE) / sqrt(n())
) %>%
ungroup() %>%
mutate(
course_num = parse_number(course_anonid),
course_and_lecturer = paste(course_anonid, lecturer_anonid)
)
# LQs
summary_of_LQ_SQ %>%
mutate(
course_and_lecturer = fct_reorder(course_and_lecturer, mean_LQ, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = num_LQ, fill = course_anonid)) +
facet_grid(cols = vars(discipline),
scales = "free",
space = "free_x",
shrink = TRUE) +
geom_point(aes(color = course_num),
position = position_jitter(width = 0.1),
alpha = 0.8) +
geom_errorbar(aes(ymin=(mean_LQ-se_LQ), ymax=(mean_LQ+se_LQ)), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_LQ), colour = "black", shape = 4) +
labs(x = NULL, y = "Number of questions") +
theme_minimal(base_size = 16) +
scale_colour_viridis_c() +
scale_fill_viridis_d() +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),
)
ggsave("Paper1_FILLplus_LQ_distribution.pdf",width=20,height=10,units="cm",dpi=300)
# SQs
summary_of_LQ_SQ %>%
mutate(
course_and_lecturer = fct_reorder(course_and_lecturer, mean_SQ, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = num_SQ, fill = course_anonid)) +
facet_grid(cols = vars(discipline),
scales = "free",
space = "free_x",
shrink = TRUE) +
geom_point(aes(color = course_num),
position = position_jitter(width = 0.1),
alpha = 0.8) +
geom_errorbar(aes(ymin=(mean_SQ-se_SQ), ymax=(mean_SQ+se_SQ)), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_SQ), colour = "black", shape = 4) +
labs(x = NULL, y = "Number of student questions") +
theme_minimal(base_size = 16) +
scale_colour_viridis_c() +
scale_fill_viridis_d() +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),
)
ggsave("Paper1_FILLplus_SQ_distribution.pdf",width=20,height=10,units="cm",dpi=300)
# Table for avg durations
codes_with_durations %>%
group_by(lecturer_and_session) %>%
mutate(endtime = case_when(code == "END" ~ time),
endtime = zoo::na.locf(endtime, fromLast = TRUE)) %>%
ungroup() %>%
group_by(lecturer_and_session, code) %>%
mutate(session_total = sum (duration),
session_perc = session_total/endtime) %>%
ungroup() %>%
group_by(code) %>%
summarise(mean = mean(session_perc),
sd = sd(session_perc))
code_summary_by_session = codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
group_by(discipline, course_anonid, lecturer_anonid, session_number, code) %>%
summarise(
mins = sum(duration)
) %>%
group_by(discipline, course_anonid, lecturer_anonid, session_number) %>%
mutate(
sum = sum(mins, na.rm = TRUE),
prop = mins/sum
)
#This is an improved version that takes account of some codes not being observed
code_summary_by_session = codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
group_by(discipline, course_anonid, lecturer_anonid, session_number, code) %>%
# add entries for any codes which are missing
complete(code,
nesting(discipline, course_anonid, lecturer_anonid, session_number),
fill = list(duration = 0)) %>%
summarise(
mins = sum(duration)
) %>%
group_by(discipline, course_anonid, lecturer_anonid, session_number) %>%
mutate(
sum = sum(mins, na.rm = TRUE),
prop = mins/sum
)
code_summary_by_session %>%
ggplot(aes(x = lecturer_anonid, y = prop, colour = code)) +
facet_grid(~code) +
geom_point() +
theme_minimal(base_size = 16)
#
# Investigation of LT
#
codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
filter(code == "LT") %>%
group_by(course_anonid, lecturer_anonid) %>%
mutate(
duration_LT = case_when(code == "LT" ~ duration),
longest_LT = max(duration_LT, na.rm = TRUE),
mean_LT = mean(duration_LT, na.rm = TRUE)
) %>%
ungroup() %>%
mutate(
course_and_lecturer = as.factor(paste(course_anonid, lecturer_anonid)),
course_and_lecturer = fct_reorder(course_and_lecturer, longest_LT, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = duration, colour = code)) +
facet_grid(~code) +
geom_point() +
geom_point(aes(x = course_and_lecturer, y = mean_LT), colour = "red", shape = 4) +
scale_colour_manual("FILL+ code", values = fillplus) +
theme_minimal(base_size = 16)
all_LT_durations = codes_with_durations %>%
filter(code == "LT") %>%
select(duration) %>%
mutate(duration = duration)
all_LT_durations %>% summary()
all_LT_durations %>%
ggplot(aes(x = duration)) + geom_density() + labs(x = "duration of LT in seconds")
all_LT_durations %>%
ggplot(aes(x = log(duration))) + geom_density() + labs(x = "log(duration of LT)")
code_use_summary = codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
group_by(discipline, course_anonid, lecturer_anonid, code) %>%
summarise(
duration_max = max(duration, na.rm = TRUE),
duration_mean = mean(duration, na.rm = TRUE),
duration_sd = sd(duration, na.rm = TRUE),
duration_total = sum(duration, na.rm = TRUE),
duration_count = n()
)
code_use_summary_long = code_use_summary %>%
pivot_longer(
cols = contains("duration_"),
names_to = c(".value", "stat"),
names_sep = "_"
)
code_use_summary %>%
mutate(
course_and_lecturer = as.factor(paste(course_anonid, lecturer_anonid)),
#course_and_lecturer = fct_reorder(course_and_lecturer, longest_LT, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = duration_mean, colour = code)) +
facet_grid(~code) +
geom_point() +
#geom_point(aes(x = course_and_lecturer, y = mean_LT), colour = "red", shape = 4) +
scale_colour_manual("FILL+ code", values = fillplus) +
theme_minimal(base_size = 16)
code_use_summary %>%
group_by(discipline, code) %>%
summarise(
avg_duration_max = mean(duration_max, na.rm = TRUE)/60,
avg_duration_mean = mean(duration_mean, na.rm = TRUE)/60,
avg_duration_sd = mean(duration_sd, na.rm = TRUE)/60
) %>%
filter(code == "LT")
codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
filter(code == "LT") %>%
group_by(course_anonid, lecturer_anonid) %>%
mutate(
duration_LT = case_when(code == "LT" ~ duration),
longest_LT = max(duration_LT, na.rm = TRUE),
mean_LT = mean(duration_LT, na.rm = TRUE),
se_LT = sd(duration_LT, na.rm = TRUE) / sqrt(n())
) %>%
ungroup() %>%
mutate(
course_num = parse_number(course_anonid),
course_and_lecturer = as.factor(paste(course_anonid, lecturer_anonid)),
course_and_lecturer = fct_reorder(course_and_lecturer, mean_LT, .desc = TRUE)
) %>%
ggplot(aes(x = course_and_lecturer, y = duration/60, colour = course_num)) +
facet_grid(~discipline ,
scales = "free",
space = "free_x",
shrink = TRUE) +
geom_point() +
geom_errorbar(aes(ymin=(mean_LT-se_LT)/60, ymax=(mean_LT+se_LT)/60), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_LT/60), colour = "black", shape = 4) +
#scale_colour_manual("FILL+ code", values = fillplus) +
scale_colour_viridis_c() +
theme_minimal(base_size = 16) +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),) +
labs(#x = "Course/lecturer combination",
x = "",
y = "Duration of LT (min)")
ggsave("Paper1_FILLplus_LT_distribution.pdf",width=20,height=10,units="cm",dpi=300)
codes_with_durations %>%
filter(!is.na(code), !code == "END") %>% # remove these non-interesting codes
# add the proportion of time spent in LT for each session
group_by(course_anonid, lecturer_anonid, session_number) %>%
mutate(
lecture_duration = sum(duration),
duration_LT = case_when(code == "LT" ~ duration),
prop_LT = sum(duration_LT, na.rm = TRUE) / lecture_duration,
) %>%
#filter(code == "LT") %>%
group_by(course_anonid, lecturer_anonid) %>%
mutate(
longest_LT = max(duration_LT, na.rm = TRUE),
mean_LT = mean(duration_LT, na.rm = TRUE),
se_LT = sd(duration_LT, na.rm = TRUE) / sqrt(n()),
mean_prop_LT = mean(prop_LT),
) %>%
ungroup() %>%
mutate(
course_and_lecturer = as.factor(paste(course_anonid, lecturer_anonid)),
course_and_lecturer = fct_reorder(course_and_lecturer, longest_LT, .desc = TRUE),
course_and_lecturer = fct_reorder(course_and_lecturer, mean_prop_LT, .desc = TRUE)
) %>%
# restrict the plot to just LT durations
filter(code == "LT") %>%
ggplot(aes(x = course_and_lecturer, y = duration/60, colour = code)) +
facet_grid(~discipline ,
scales = "free",
space = "free_x",
shrink = TRUE) +
geom_point() +
geom_errorbar(aes(ymin=(mean_LT-se_LT)/60, ymax=(mean_LT+se_LT)/60), width=.2, colour = "black") +
geom_point(aes(x = course_and_lecturer, y = mean_LT/60), colour = "black", shape = 4) +
geom_point(aes(x = course_and_lecturer, y = mean_prop_LT*50), colour = "red", shape = 4) +
#scale_colour_manual("FILL+ code", values = fillplus) +
scale_colour_viridis_c() +
theme_minimal(base_size = 16) +
theme(strip.text.y = element_text(angle = 0),
axis.text.x=element_text(angle = 90, hjust = 1, vjust = 0.5, size = 8),
legend.position = "none",
panel.grid.minor.y=element_blank(),) +
labs(x = "Course/lecturer combination",
y = "Duration of LT (min)")
ggsave("Paper1_FILLplus_LT_distribution_withprops.pdf",width=20,height=10,units="cm",dpi=300)
# proportions in each session sum to 1
code_summary_by_session %>%
group_by(discipline, lecturer_anonid, session_number) %>%
summarise(tot_prop = sum(prop))
code_summary_table = code_summary_by_session %>%
group_by(discipline, code) %>%
summarise(
mean_prop = mean(prop),
sd_prop = sd(prop)
) %>%
bind_rows(
# produce a version for all the disciplines combined
code_summary_by_session %>%
group_by(code) %>%
summarise(
mean_prop = mean(prop),
sd_prop = sd(prop)
) %>%
mutate(discipline = "OVERALL")
) %>%
filter(!is.na(code), !code == "END") # remove these non-interesting codes
# check: the proportions should sum to 1!
code_summary_table %>%
group_by(discipline) %>%
summarise(tot_prop = sum(mean_prop))
# Table with all the gory details
code_summary_table %>%
mutate(
pretty_cell_value = glue::glue("{format(mean_prop*100, digits = 1)}\\% ($\\pm${format(sd_prop*100, digits = 1)}\\%)")
) %>%
select(discipline, code, pretty_cell_value) %>%
pivot_wider(
names_from = code,
values_from = pretty_cell_value,
values_fill = list(pretty_cell_value = "-")
) %>%
kable(format = "latex", escape = FALSE, booktabs = T)
# More compact version - Table 4 in the paper
code_summary_table %>%
mutate(
pretty_cell_value = glue::glue("{format(mean_prop*100, digits = 1)} ($\\pm${format(sd_prop*100, digits = 1)})"),
# even more compact - remove the sd's
#pretty_cell_value = glue::glue("{format(mean_prop*100, digits = 1)}")
) %>%
select(discipline, code, pretty_cell_value) %>%
pivot_wider(
names_from = code,
values_from = pretty_cell_value,
values_fill = list(pretty_cell_value = "-")
) %>%
# transpose the table
t %>%
kable(format = "latex", escape = FALSE, booktabs = T, linesep = c(""))
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333288247711e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615836595-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333288247711e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
#' Remove Non-binary Sites
#'
#' This function will determine if a site is binary (TRUE) or not (FALSE) and remove those sites that are not binary. For example, c("A", "A", "T") will be kept; c("A", "A", "A") or c("A", "G", "T") will be removed. Ambiguity codes are taken into account to mean either heterozygotes or uncertainty.
#' @param SNPdataset SNP data in the class "matrix", "data.frame", or "snp"
#' @param chatty Optional print to screen messages
#' @export
#' @return Returns a subset dataset with only variable sites.
#' @seealso \link{ReadSNP} \link{WriteSNP} \link{RemoveInvariantSites} \link{IsBinary}
#' @examples
#' data(fakeData)
#' RemoveNonBinary(fakeData)
#' RemoveNonBinary(fakeData, chatty=TRUE)
RemoveNonBinary <- function(SNPdataset, chatty=FALSE){
snpclass <- "table"
if(inherits(SNPdataset, "snp")){
snpclass <- "snp"
SNPdataset <- SNPdataset$data
}
snps <- sum(nchar(SNPdataset[1,]))
splitdata <- SplitSNP(SNPdataset)
binaryVector <- apply(splitdata, 2, IsBinary)
newSNPdataset <- cSNP(splitdata, KeepVector=binaryVector)
newsnps <- sum(nchar(newSNPdataset[1,]))
if(chatty)
message(paste("removed", snps-newsnps, "of", snps, "sites"))
if(snpclass == "snp")
return(ReadSNP(newSNPdataset))
else
return(newSNPdataset)
}
| /R/RemoveNonBinary.R | no_license | bbanbury/phrynomics | R | false | false | 1,290 | r | #' Remove Non-binary Sites
#'
#' This function will determine if a site is binary (TRUE) or not (FALSE) and remove those sites that are not binary. For example, c("A", "A", "T") will be kept; c("A", "A", "A") or c("A", "G", "T") will be removed. Ambiguity codes are taken into account to mean either heterozygotes or uncertainty.
#' @param SNPdataset SNP data in the class "matrix", "data.frame", or "snp"
#' @param chatty Optional print to screen messages
#' @export
#' @return Returns a subset dataset with only variable sites.
#' @seealso \link{ReadSNP} \link{WriteSNP} \link{RemoveInvariantSites} \link{IsBinary}
#' @examples
#' data(fakeData)
#' RemoveNonBinary(fakeData)
#' RemoveNonBinary(fakeData, chatty=TRUE)
RemoveNonBinary <- function(SNPdataset, chatty=FALSE){
snpclass <- "table"
if(inherits(SNPdataset, "snp")){
snpclass <- "snp"
SNPdataset <- SNPdataset$data
}
snps <- sum(nchar(SNPdataset[1,]))
splitdata <- SplitSNP(SNPdataset)
binaryVector <- apply(splitdata, 2, IsBinary)
newSNPdataset <- cSNP(splitdata, KeepVector=binaryVector)
newsnps <- sum(nchar(newSNPdataset[1,]))
if(chatty)
message(paste("removed", snps-newsnps, "of", snps, "sites"))
if(snpclass == "snp")
return(ReadSNP(newSNPdataset))
else
return(newSNPdataset)
}
|
coxme <- function(formula, data,
weights, subset, na.action, init,
control, ties= c("efron", "breslow"),
varlist, vfixed, vinit, sparse=c(50,.02),
x=FALSE, y=TRUE,
refine.n=0, random, fixed, variance, ...) {
time0 <- proc.time() #debugging line
ties <- match.arg(ties)
Call <- match.call()
}
| /data/genthat_extracted_code/noweb/vignettes/noweb.R | no_license | surayaaramli/typeRrh | R | false | false | 362 | r | coxme <- function(formula, data,
weights, subset, na.action, init,
control, ties= c("efron", "breslow"),
varlist, vfixed, vinit, sparse=c(50,.02),
x=FALSE, y=TRUE,
refine.n=0, random, fixed, variance, ...) {
time0 <- proc.time() #debugging line
ties <- match.arg(ties)
Call <- match.call()
}
|
\name{hasMEF}
\alias{hasMEF}
\title{hasMEF}
\arguments{
\item{bead.data}{\code{\link{BeadFlowFrame}}}
\item{parameter}{\code{\link{character}}}
}
\description{
Checks whether we have the MEF for a channel name.
}
| /man/hasMEF.Rd | no_license | pontikos/flowBeads | R | false | false | 221 | rd | \name{hasMEF}
\alias{hasMEF}
\title{hasMEF}
\arguments{
\item{bead.data}{\code{\link{BeadFlowFrame}}}
\item{parameter}{\code{\link{character}}}
}
\description{
Checks whether we have the MEF for a channel name.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/movies.R
\name{etl_extract.etl_imdb}
\alias{etl_extract.etl_imdb}
\alias{etl_load.etl_imdb}
\alias{etl_load_data}
\alias{etl_load_data.src_mysql}
\title{Set up local IMDB}
\source{
IMDB: \url{ftp://ftp.fu-berlin.de/pub/misc/movies/database/}
IMDbPy: \url{http://imdbpy.sourceforge.net/}
}
\usage{
\method{etl_extract}{etl_imdb}(obj, tables = c("movies", "actors",
"actresses", "directors"), all.tables = FALSE, ...)
\method{etl_load}{etl_imdb}(obj, path_to_imdbpy2sql = NULL, password = "",
...)
etl_load_data(obj, ...)
\method{etl_load_data}{src_mysql}(obj, ...)
}
\arguments{
\item{obj}{an \code{\link{etl}} object}
\item{tables}{a character vector of files from IMDB to download. The default is
movies, actors, actresses, and directors. These four files alone will occupy
more than 500 MB of disk space. There are 49 total files available
on IMDB. See \url{ftp://ftp.fu-berlin.de/pub/misc/movies/database/} for the
complete list.}
\item{all.tables}{a logical indicating whether you want to download all of
the tables. Default is \code{FALSE}.}
\item{...}{arguments passed to methods}
\item{path_to_imdbpy2sql}{a path to the IMDB2SQL Python script provided by
IMDBPy. If NULL -- the default -- will attempt to find it using
\code{\link{findimdbpy2sql}}.}
\item{password}{Must re-enter password unless your password is blank. The real
password will not be shown in messages.}
}
\description{
Download the raw data files from IMDB
}
\details{
For best performance, set the MySQL default collation to \code{utf8_unicode_ci}.
See the IMDbPy2sql documentation at
\url{http://imdbpy.sourceforge.net/docs/README.sqldb.txt} for more details.
Please be aware that IMDB contains information about *all* types of movies.
}
\examples{
# Connect using default RSQLite database
imdb <- etl("imdb")
# Connect using pre-configured PostgreSQL database
\dontrun{
if (require(RPostgreSQL)) {
# must have pre-existing database "imdb"
db <- src_postgres(host = "localhost", user="postgres",
password="postgres", dbname = "imdb")
}
imdb <- etl("imdb", db = db, dir = "~/dumps/imdb/")
imdb \%>\%
etl_extract(tables = "movies") \%>\%
etl_load()
}
\dontrun{
if (require(RMySQL)) {
# must have pre-existing database "imdb"
db <- src_mysql_cnf(dbname = "imdb")
}
imdb <- etl("imdb", db = db, dir = "~/dumps/imdb/")
imdb \%>\%
etl_extract(tables = "movies") \%>\%
etl_load()
movies <- imdb \%>\%
tbl("title")
movies \%>\%
filter(title == 'star wars')
people <- imdb \%>\%
tbl("name")
roles <- imdb \%>\%
tbl("cast_info")
movies \%>\%
inner_join(cast_info, by = c("id" = "movie_id")) \%>\%
inner_join(people, by = c("person_id" = "id")) \%>\%
filter(title == 'star wars') \%>\%
filter(production_year == 1977) \%>\%
arrange(nr_order)
}
}
| /man/etl_extract.etl_imdb.Rd | no_license | Yi5117/imdb | R | false | true | 2,937 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/movies.R
\name{etl_extract.etl_imdb}
\alias{etl_extract.etl_imdb}
\alias{etl_load.etl_imdb}
\alias{etl_load_data}
\alias{etl_load_data.src_mysql}
\title{Set up local IMDB}
\source{
IMDB: \url{ftp://ftp.fu-berlin.de/pub/misc/movies/database/}
IMDbPy: \url{http://imdbpy.sourceforge.net/}
}
\usage{
\method{etl_extract}{etl_imdb}(obj, tables = c("movies", "actors",
"actresses", "directors"), all.tables = FALSE, ...)
\method{etl_load}{etl_imdb}(obj, path_to_imdbpy2sql = NULL, password = "",
...)
etl_load_data(obj, ...)
\method{etl_load_data}{src_mysql}(obj, ...)
}
\arguments{
\item{obj}{an \code{\link{etl}} object}
\item{tables}{a character vector of files from IMDB to download. The default is
movies, actors, actresses, and directors. These four files alone will occupy
more than 500 MB of disk space. There are 49 total files available
on IMDB. See \url{ftp://ftp.fu-berlin.de/pub/misc/movies/database/} for the
complete list.}
\item{all.tables}{a logical indicating whether you want to download all of
the tables. Default is \code{FALSE}.}
\item{...}{arguments passed to methods}
\item{path_to_imdbpy2sql}{a path to the IMDB2SQL Python script provided by
IMDBPy. If NULL -- the default -- will attempt to find it using
\code{\link{findimdbpy2sql}}.}
\item{password}{Must re-enter password unless your password is blank. The real
password will not be shown in messages.}
}
\description{
Download the raw data files from IMDB
}
\details{
For best performance, set the MySQL default collation to \code{utf8_unicode_ci}.
See the IMDbPy2sql documentation at
\url{http://imdbpy.sourceforge.net/docs/README.sqldb.txt} for more details.
Please be aware that IMDB contains information about *all* types of movies.
}
\examples{
# Connect using default RSQLite database
imdb <- etl("imdb")
# Connect using pre-configured PostgreSQL database
\dontrun{
if (require(RPostgreSQL)) {
# must have pre-existing database "imdb"
db <- src_postgres(host = "localhost", user="postgres",
password="postgres", dbname = "imdb")
}
imdb <- etl("imdb", db = db, dir = "~/dumps/imdb/")
imdb \%>\%
etl_extract(tables = "movies") \%>\%
etl_load()
}
\dontrun{
if (require(RMySQL)) {
# must have pre-existing database "imdb"
db <- src_mysql_cnf(dbname = "imdb")
}
imdb <- etl("imdb", db = db, dir = "~/dumps/imdb/")
imdb \%>\%
etl_extract(tables = "movies") \%>\%
etl_load()
movies <- imdb \%>\%
tbl("title")
movies \%>\%
filter(title == 'star wars')
people <- imdb \%>\%
tbl("name")
roles <- imdb \%>\%
tbl("cast_info")
movies \%>\%
inner_join(cast_info, by = c("id" = "movie_id")) \%>\%
inner_join(people, by = c("person_id" = "id")) \%>\%
filter(title == 'star wars') \%>\%
filter(production_year == 1977) \%>\%
arrange(nr_order)
}
}
|
#' @export
semantic_widgets_gallery <- function() {
appDir <- system.file("semantic_widgets_gallery", package = "semanticWidgets")
if (appDir == "") {
stop("Could not find example directory. Try re-installing ``.", call. = FALSE)
}
shiny::runApp(appDir, display.mode = "normal")
}
| /R/app.R | no_license | systats/semanticWidgets | R | false | false | 294 | r | #' @export
semantic_widgets_gallery <- function() {
appDir <- system.file("semantic_widgets_gallery", package = "semanticWidgets")
if (appDir == "") {
stop("Could not find example directory. Try re-installing ``.", call. = FALSE)
}
shiny::runApp(appDir, display.mode = "normal")
}
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(swephR)
## ---- eval = FALSE------------------------------------------------------------
# swe_set_ephe_path("C:\\sweph\\ephe")
## -----------------------------------------------------------------------------
year <- 2000
month <- 1
day <- 1
hour <- 12
jdut <- swe_julday(year, month, day, hour, SE$GREG_CAL)
jdut
## -----------------------------------------------------------------------------
ipl <- SE$SUN
iflag <- SE$FLG_MOSEPH + SE$FLG_SPEED
result <- swe_calc_ut(jdut, ipl, iflag)
result
## -----------------------------------------------------------------------------
starname = "sirius"
result <- swe_fixstar2_ut(starname, jdut, iflag)
result
## -----------------------------------------------------------------------------
options(digits=15)
result <- swe_heliacal_ut(jdut,c(0,50,10),c(1013.25,15,50,0.25),c(25,1,1,1,5,0.8),starname,
SE$HELIACAL_RISING,SE$HELFLAG_HIGH_PRECISION+SE$FLG_MOSEPH)
result
## -----------------------------------------------------------------------------
options(digits=6)
swe_set_ephe_path(NULL)
iflag = SE$FLG_SPEED + SE$FLG_MOSEPH
{
#get year
jyear <- 2000
#get month
jmon <- 1
#get day
jday <- 1
#get time
jhour <- 12
#determine julian day number (at 12:00 GMT)
tjd_ut <- swe_julday(jyear, jmon, jday, jhour, SE$GREG_CAL)
cat("Julian day number (UT) :", tjd_ut, "(",jyear,",",jmon,",",jday,"; proleptic Gregorian calendar)\n")
cat("planet :",
c("longitude", "latitude", "distance", "long. speed", "lat. speed"),
"\n")
cat("===========================================================\n")
# loop over all planets
for (p in SE$SUN:SE$OSCU_APOG) {
# get the name of the planet p
objectname = swe_get_planet_name(p)
# do the coordinate calculation for this planet p
i = swe_calc_ut(tjd_ut, p, iflag)
if (i$return < 0) {
cat("Error :", i$err, "(", objectname, ")\n")
}
else
{
# print data
cat (objectname, ":", i$xx[0:5], "\n")
}
}
}
## -----------------------------------------------------------------------------
swe_close()
| /inst/doc/swephR.R | no_license | cran/swephR | R | false | false | 2,392 | r | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(swephR)
## ---- eval = FALSE------------------------------------------------------------
# swe_set_ephe_path("C:\\sweph\\ephe")
## -----------------------------------------------------------------------------
year <- 2000
month <- 1
day <- 1
hour <- 12
jdut <- swe_julday(year, month, day, hour, SE$GREG_CAL)
jdut
## -----------------------------------------------------------------------------
ipl <- SE$SUN
iflag <- SE$FLG_MOSEPH + SE$FLG_SPEED
result <- swe_calc_ut(jdut, ipl, iflag)
result
## -----------------------------------------------------------------------------
starname = "sirius"
result <- swe_fixstar2_ut(starname, jdut, iflag)
result
## -----------------------------------------------------------------------------
options(digits=15)
result <- swe_heliacal_ut(jdut,c(0,50,10),c(1013.25,15,50,0.25),c(25,1,1,1,5,0.8),starname,
SE$HELIACAL_RISING,SE$HELFLAG_HIGH_PRECISION+SE$FLG_MOSEPH)
result
## -----------------------------------------------------------------------------
options(digits=6)
swe_set_ephe_path(NULL)
iflag = SE$FLG_SPEED + SE$FLG_MOSEPH
{
#get year
jyear <- 2000
#get month
jmon <- 1
#get day
jday <- 1
#get time
jhour <- 12
#determine julian day number (at 12:00 GMT)
tjd_ut <- swe_julday(jyear, jmon, jday, jhour, SE$GREG_CAL)
cat("Julian day number (UT) :", tjd_ut, "(",jyear,",",jmon,",",jday,"; proleptic Gregorian calendar)\n")
cat("planet :",
c("longitude", "latitude", "distance", "long. speed", "lat. speed"),
"\n")
cat("===========================================================\n")
# loop over all planets
for (p in SE$SUN:SE$OSCU_APOG) {
# get the name of the planet p
objectname = swe_get_planet_name(p)
# do the coordinate calculation for this planet p
i = swe_calc_ut(tjd_ut, p, iflag)
if (i$return < 0) {
cat("Error :", i$err, "(", objectname, ")\n")
}
else
{
# print data
cat (objectname, ":", i$xx[0:5], "\n")
}
}
}
## -----------------------------------------------------------------------------
swe_close()
|
#!/usr/bin/env Rscript
# Creates random FASTA file
suppressPackageStartupMessages(library(Biostrings))
seqlength <- 100
nseq <- 1000
mydict <- DNAStringSet(sapply(1:nseq, function(x) paste(sample(c("A","T","G","C"), seqlength, replace=T), collapse="")))
names(mydict) <- 1:nseq
writeXStringSet(mydict, file="random.fasta") | /rand.r | no_license | alexpenson/scripts | R | false | false | 323 | r | #!/usr/bin/env Rscript
# Creates random FASTA file
suppressPackageStartupMessages(library(Biostrings))
seqlength <- 100
nseq <- 1000
mydict <- DNAStringSet(sapply(1:nseq, function(x) paste(sample(c("A","T","G","C"), seqlength, replace=T), collapse="")))
names(mydict) <- 1:nseq
writeXStringSet(mydict, file="random.fasta") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blatentSimulate.R
\name{blatentSimulate}
\alias{blatentSimulate}
\title{Simulates data using blatent syntax and simulated parameters input}
\usage{
blatentSimulate(modelText, nObs,
defaultSimulatedParameters = setDefaultSimulatedParameters(),
seed = NULL)
}
\arguments{
\item{modelText}{A character string that contains the specifications for the model to be run. See \code{\link{blatentSyntax}}
or more information about syntax formatting.}
\item{nObs}{The number of observations to be simulated.}
\item{defaultSimulatedParameters}{The specifications for the generation of the types of parameters in the simulation. Currently comprised
of a list of unevaluated expressions (encapsulated in quotation marks; not calls for ease of user input) that will be evaluated by
simulation function to generate parameters. Defaults to values generated by \code{\link{setDefaultSimulatedParameters}}.
The list of unevaluated expressions must include:
\itemize{
\item \code{observedIntercepts} The data generating function for all intercepts for observed variables.
\item \code{observedMainEffects} The data generating function for the main effects for observed variables.
\item \code{observedInteractions} The data generating function for all interactions for observed variables.
\item \code{latentIntercepts} The data generating function for all intercepts for latent variables.
\item \code{latentMainEffects} The data generating function for the main effects for latent variables.
\item \code{latentInteractions} The data generating function for all interactions for latent variables.
}}
}
\description{
Simulates data from a model specified by blatent syntax and using a set of default parameter specifications.
}
\examples{
# Generating data using Q-matrix structure from data example in Chapter 9 of Rupp, Templin, & Henson (2010).
RTHCh9ModelSyntax = "
item1 ~ A1
item2 ~ A2
item3 ~ A3
item4 ~ A1 + A2 + A1:A2
item5 ~ A1 + A3 + A1:A3
item6 ~ A2 + A3 + A2:A3
item7 ~ A1 + A2 + A3 + A1:A2 + A1:A3 + A2:A3 + A1:A2:A3
# Latent Variable Specifications:
A1 A2 A3 <- latent(unit = 'rows', distribution = 'bernoulli', structure = 'univariate', type = 'ordinal')
# Observed Variable Specifications:
item1-item7 <- observed(distribution = 'bernoulli', link = 'probit')
"
simSpecs = setDefaultSimulatedParameters(
observedIntercepts = "runif(n = 1, min = -1, max = -1)",
observedMainEffects = "runif(n = 1, min = 2, max = 2)",
observedInteractions = "runif(n = 1, min = 0, max = 0)",
latentIntercepts = "runif(n = 1, min = 0, max = 0)",
latentMainEffects = "runif(n = 1, min = 0, max = 0)",
latentInteractions = "runif(n = 1, min = 0, max = 0)"
)
simulatedData = blatentSimulate(modelText = RTHCh9ModelSyntax, nObs = 1000, defaultSimulatedParameters = simSpecs)
}
\references{
Rupp, A. A., Templin, J., & Henson, R. A. (2010). Diagnostic Measurement: Theory, Methods, and Applications. New York: Guilford.
}
| /man/blatentSimulate.Rd | no_license | sailendramishra/blatent | R | false | true | 3,046 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blatentSimulate.R
\name{blatentSimulate}
\alias{blatentSimulate}
\title{Simulates data using blatent syntax and simulated parameters input}
\usage{
blatentSimulate(modelText, nObs,
defaultSimulatedParameters = setDefaultSimulatedParameters(),
seed = NULL)
}
\arguments{
\item{modelText}{A character string that contains the specifications for the model to be run. See \code{\link{blatentSyntax}}
or more information about syntax formatting.}
\item{nObs}{The number of observations to be simulated.}
\item{defaultSimulatedParameters}{The specifications for the generation of the types of parameters in the simulation. Currently comprised
of a list of unevaluated expressions (encapsulated in quotation marks; not calls for ease of user input) that will be evaluated by
simulation function to generate parameters. Defaults to values generated by \code{\link{setDefaultSimulatedParameters}}.
The list of unevaluated expressions must include:
\itemize{
\item \code{observedIntercepts} The data generating function for all intercepts for observed variables.
\item \code{observedMainEffects} The data generating function for the main effects for observed variables.
\item \code{observedInteractions} The data generating function for all interactions for observed variables.
\item \code{latentIntercepts} The data generating function for all intercepts for latent variables.
\item \code{latentMainEffects} The data generating function for the main effects for latent variables.
\item \code{latentInteractions} The data generating function for all interactions for latent variables.
}}
}
\description{
Simulates data from a model specified by blatent syntax and using a set of default parameter specifications.
}
\examples{
# Generating data using Q-matrix structure from data example in Chapter 9 of Rupp, Templin, & Henson (2010).
RTHCh9ModelSyntax = "
item1 ~ A1
item2 ~ A2
item3 ~ A3
item4 ~ A1 + A2 + A1:A2
item5 ~ A1 + A3 + A1:A3
item6 ~ A2 + A3 + A2:A3
item7 ~ A1 + A2 + A3 + A1:A2 + A1:A3 + A2:A3 + A1:A2:A3
# Latent Variable Specifications:
A1 A2 A3 <- latent(unit = 'rows', distribution = 'bernoulli', structure = 'univariate', type = 'ordinal')
# Observed Variable Specifications:
item1-item7 <- observed(distribution = 'bernoulli', link = 'probit')
"
simSpecs = setDefaultSimulatedParameters(
observedIntercepts = "runif(n = 1, min = -1, max = -1)",
observedMainEffects = "runif(n = 1, min = 2, max = 2)",
observedInteractions = "runif(n = 1, min = 0, max = 0)",
latentIntercepts = "runif(n = 1, min = 0, max = 0)",
latentMainEffects = "runif(n = 1, min = 0, max = 0)",
latentInteractions = "runif(n = 1, min = 0, max = 0)"
)
simulatedData = blatentSimulate(modelText = RTHCh9ModelSyntax, nObs = 1000, defaultSimulatedParameters = simSpecs)
}
\references{
Rupp, A. A., Templin, J., & Henson, R. A. (2010). Diagnostic Measurement: Theory, Methods, and Applications. New York: Guilford.
}
|
rankall <- function(outcome, num = "best") {
## Read outcome data
table <- read.csv("outcome-of-care-measures.csv", colClasses = "character",header=TRUE,as.is=TRUE)
#Check that outcome and num are valid
outcome <- tolower(outcome)
out <- c("heart attack","heart failure","pneumonia")
if(match(outcome,out, nomatch = -1) == -1 ) stop("Invalid outcome.")
if (!is.numeric(num)){
if (match(num,c("best","worst"), nomatch = -1) == -1) stop("Invalid number.")
}
## For each state, find the hospital of the given rank
if (outcome == "heart attack"){
suppressWarnings(table[,11]<-as.numeric(table[,11]))
tab1 <- table[,c(2,7,11),]
tab2 <- tab1[order(tab1$State,tab1$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack, tab1$Hospital.Name),] #
}
else {
if (outcome == "heart failure"){
suppressWarnings(table[,17]<-as.numeric(table[,17]))
tab1 <- table[,c(2,7,17),]
tab2 <- tab1[order(tab1$State,tab1$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure, tab1$Hospital.Name),]
}
else {
suppressWarnings(table[,23]<-as.numeric(table[,23]))
tab1 <- table[,c(2,7,23),]
tab2 <- tab1[order(tab1$State,tab1$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia, tab1$Hospital.Name),]
}
}
tab2 <- na.omit(tab2)
tab3 <- split(tab2,tab2$State)
## Return a data frame with the hospital names and the (abbreviated) state name
state <- lapply(tab3, function(tab){
tab_temp <- as.data.frame(tab)
tab_temp[1,][,2]})
hospital <- lapply(tab3, function(tab){
tab_temp <- as.data.frame(tab)
if (num == "best") num <- 1
if (num == "worst") num <- nrow(tab_temp)
tab_temp[num,][,1]})
result<- as.data.frame(cbind(hospital,state))
} | /rankall.R | no_license | Gonzalo66/R-Programming | R | false | false | 2,258 | r | rankall <- function(outcome, num = "best") {
## Read outcome data
table <- read.csv("outcome-of-care-measures.csv", colClasses = "character",header=TRUE,as.is=TRUE)
#Check that outcome and num are valid
outcome <- tolower(outcome)
out <- c("heart attack","heart failure","pneumonia")
if(match(outcome,out, nomatch = -1) == -1 ) stop("Invalid outcome.")
if (!is.numeric(num)){
if (match(num,c("best","worst"), nomatch = -1) == -1) stop("Invalid number.")
}
## For each state, find the hospital of the given rank
if (outcome == "heart attack"){
suppressWarnings(table[,11]<-as.numeric(table[,11]))
tab1 <- table[,c(2,7,11),]
tab2 <- tab1[order(tab1$State,tab1$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack, tab1$Hospital.Name),] #
}
else {
if (outcome == "heart failure"){
suppressWarnings(table[,17]<-as.numeric(table[,17]))
tab1 <- table[,c(2,7,17),]
tab2 <- tab1[order(tab1$State,tab1$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure, tab1$Hospital.Name),]
}
else {
suppressWarnings(table[,23]<-as.numeric(table[,23]))
tab1 <- table[,c(2,7,23),]
tab2 <- tab1[order(tab1$State,tab1$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia, tab1$Hospital.Name),]
}
}
tab2 <- na.omit(tab2)
tab3 <- split(tab2,tab2$State)
## Return a data frame with the hospital names and the (abbreviated) state name
state <- lapply(tab3, function(tab){
tab_temp <- as.data.frame(tab)
tab_temp[1,][,2]})
hospital <- lapply(tab3, function(tab){
tab_temp <- as.data.frame(tab)
if (num == "best") num <- 1
if (num == "worst") num <- nrow(tab_temp)
tab_temp[num,][,1]})
result<- as.data.frame(cbind(hospital,state))
} |
\name{write.impute}
\alias{write.impute}
\title{Write a snpStats object in IMPUTE format}
\usage{
write.impute(X, a1, a2, bp, pedfile, snp.id = NULL)
}
\arguments{
\item{pedfile}{Output file name.}
\item{snp.id}{vector of snp ids}
\item{X}{SnpMatrix object}
\item{a1}{vector of first allele at each SNP}
\item{a2}{vector of second allele at each SNP}
\item{bp}{vector of base pair positions for each SNP}
}
\value{
No return value, but has the side effect of writing
specified output files.
}
\description{
see \code{\link{write.simple}} for general information
}
\examples{
data(testdata,package="snpStats")
A.small <- Autosomes[1:6,1:10]
pf <- tempfile()
## write in suitable format for IMPUTE
nsnps <- ncol(A.small)
write.impute(A.small, a1=rep("1",nsnps), a2=rep("2",nsnps), bp=1:nsnps, pedfile=pf)
unlink(pf)
}
\author{
Chris Wallace
}
\keyword{manip}
| /man/write.impute.Rd | no_license | cran/snpStatsWriter | R | false | false | 886 | rd | \name{write.impute}
\alias{write.impute}
\title{Write a snpStats object in IMPUTE format}
\usage{
write.impute(X, a1, a2, bp, pedfile, snp.id = NULL)
}
\arguments{
\item{pedfile}{Output file name.}
\item{snp.id}{vector of snp ids}
\item{X}{SnpMatrix object}
\item{a1}{vector of first allele at each SNP}
\item{a2}{vector of second allele at each SNP}
\item{bp}{vector of base pair positions for each SNP}
}
\value{
No return value, but has the side effect of writing
specified output files.
}
\description{
see \code{\link{write.simple}} for general information
}
\examples{
data(testdata,package="snpStats")
A.small <- Autosomes[1:6,1:10]
pf <- tempfile()
## write in suitable format for IMPUTE
nsnps <- ncol(A.small)
write.impute(A.small, a1=rep("1",nsnps), a2=rep("2",nsnps), bp=1:nsnps, pedfile=pf)
unlink(pf)
}
\author{
Chris Wallace
}
\keyword{manip}
|
context("test output")
test_that("out is df, nrows, ncols", {
tsvFiles <- getTsvFiles("../../../research/out_test/out_test_extSegs3/")
segsList <- extSegs(tsvFiles)
df <- createCountDf(segNames = segsList[['segNames']], tsvFiles = tsvFiles,
readEnd = segsList[['End']])
expect_equal(nrow(df), length(Reduce('union', segsList[['segNames']])))
expect_equal(ncol(df), 2)
tsvFiles <- getTsvFiles("../../../research/out")
segsList <- extSegs(tsvFiles)
df <- createCountDf(segNames = segsList[['segNames']], tsvFiles = tsvFiles,
readEnd = segsList[['End']])
expect_equal(nrow(df), length(Reduce(union, segsList[['segNames']])))
expect_equal(ncol(df), 35)
})
| /tests/testthat/test-createCountDf.R | no_license | NPSDC/segsinglecell | R | false | false | 743 | r | context("test output")
test_that("out is df, nrows, ncols", {
tsvFiles <- getTsvFiles("../../../research/out_test/out_test_extSegs3/")
segsList <- extSegs(tsvFiles)
df <- createCountDf(segNames = segsList[['segNames']], tsvFiles = tsvFiles,
readEnd = segsList[['End']])
expect_equal(nrow(df), length(Reduce('union', segsList[['segNames']])))
expect_equal(ncol(df), 2)
tsvFiles <- getTsvFiles("../../../research/out")
segsList <- extSegs(tsvFiles)
df <- createCountDf(segNames = segsList[['segNames']], tsvFiles = tsvFiles,
readEnd = segsList[['End']])
expect_equal(nrow(df), length(Reduce(union, segsList[['segNames']])))
expect_equal(ncol(df), 35)
})
|
# ######################################################
# ### examining the relationship between the returns ###
# ### attenpt to group the data into sectors ###########
# ######################################################
#
# ### from long to wide
#
# dt7 = dcast (dt6,Date~Ticker , value.var = 'Returns' )
# dim(dt7)
# #########################################################
# ### calculate the correlation matrix on complete data ###
# #########################################################
# dt8 = na.omit(dt7[,!'Date'])
# c1 = cor(dt8)
# dim(c1)
#
# ######################################################################################################
# ### reorder the correlation matrix using a clustering method, arbitrarily cut tree into 10 custers ###
# ### method is slow due to printing the heatmap, set to false to run quicklty #########################
# ######################################################################################################
# out =reorder_cor_mat (corM=c1, n_clusters = 10,method = 'ward.D2')
# setkey(dt6,Ticker)
# dt6a = out$sectors[dt6]
#
# ############################################
# ### use word frequency to guess a sector ###
# ############################################
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==1]$Company.Name)),14L) ### health
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==2]$Company.Name)),14L) #### banking
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==3]$Company.Name)),14L) #### ????
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==4]$Company.Name)),14L) #### technology
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==5]$Company.Name)),14L) #### industrial
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==6]$Company.Name)),14L) #### energy
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==7]$Company.Name)),14L) #### fintech
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==8]$Company.Name)),14L) #### properties
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==9]$Company.Name)),14L) #### energy2
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==10]$Company.Name)),14L) #### education
#
# ### does not work very well, could try different clustering algos and number of clusters
| /R/script_sector_analysis.R | no_license | andrewdeeley/lab49 | R | false | false | 2,280 | r | # ######################################################
# ### examining the relationship between the returns ###
# ### attenpt to group the data into sectors ###########
# ######################################################
#
# ### from long to wide
#
# dt7 = dcast (dt6,Date~Ticker , value.var = 'Returns' )
# dim(dt7)
# #########################################################
# ### calculate the correlation matrix on complete data ###
# #########################################################
# dt8 = na.omit(dt7[,!'Date'])
# c1 = cor(dt8)
# dim(c1)
#
# ######################################################################################################
# ### reorder the correlation matrix using a clustering method, arbitrarily cut tree into 10 custers ###
# ### method is slow due to printing the heatmap, set to false to run quicklty #########################
# ######################################################################################################
# out =reorder_cor_mat (corM=c1, n_clusters = 10,method = 'ward.D2')
# setkey(dt6,Ticker)
# dt6a = out$sectors[dt6]
#
# ############################################
# ### use word frequency to guess a sector ###
# ############################################
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==1]$Company.Name)),14L) ### health
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==2]$Company.Name)),14L) #### banking
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==3]$Company.Name)),14L) #### ????
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==4]$Company.Name)),14L) #### technology
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==5]$Company.Name)),14L) #### industrial
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==6]$Company.Name)),14L) #### energy
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==7]$Company.Name)),14L) #### fintech
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==8]$Company.Name)),14L) #### properties
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==9]$Company.Name)),14L) #### energy2
# tm::findMostFreqTerms(tm::termFreq(unique(dt6a[sector==10]$Company.Name)),14L) #### education
#
# ### does not work very well, could try different clustering algos and number of clusters
|
# Function to get data for the dates '1/2/2007' and '2/2/2007'
# Returned data frame has one DateTime column of POSIXlt class
# Observation Columns 3 to 9 as numeric
getdata <- function (file = "household_power_consumption.txt",
dates = c('1/2/2007', '2/2/2007')) {
# read complete data into R with all columns as character
compdata <- read.table(file = file, header = T, sep = ';',
colClasses = 'character', nrows = 2075260)
# subset data to get data for the dates vector
# This is to facilitate vectors with multiple dates
data <- data.frame()
for (date in dates) {
tempdata <- subset(compdata, Date == date)
data <- rbind(data, tempdata)
}
# Removing extra data frames
rm(compdata, tempdata)
return(formatdata(data))
}
# formatdata function is called by getdata() , changes column formats
# Makes one datetime column of POSIXlt class
# Changes columns 3 to 9 to Numeric
formatdata <- function (data) {
# Converting columns 3 to 9 as numeric
cols <- c(3:9)
for (col in cols){
data[, col] <- as.numeric(data[, col])
}
# Combine Date and Time columns in one with POSIXlt format
DateTime <- paste(... = data[, 1], data[, 2])
DateTime <- (strptime(DateTime, format = "%d/%m/%Y %H:%M:%S",
tz = "UTC"))
data <- cbind(DateTime, data[, 3:9])
return(data)
}
# plotenergysubmetering function takes as input the data frame
# Makes the complete plot for energy submetering data along with legends
plotenergysubmetering <- function(data) {
# Create plot file specific lables
y.label = "Energy sub metering"
# Make initial plot and then add points
plot(data$DateTime, data$Sub_metering_1, type = 'l', col = 'black',
xlab = '', ylab = y.label)
points(data$DateTime, data$Sub_metering_2, type = 'l', col = 'red')
points(data$DateTime, data$Sub_metering_3, type = 'l', col = 'blue')
# Add legend
legend('topright', lty = 1, pt.cex = 1, cex = .75,
legend = c('Sub_metering_1', 'Sub_metering_2',
'Sub_metering_3'),
col = c('black', 'red', 'blue'))
}
# plot3 function calls the plotting function - plotenergysubmetering
# and saves the plot as .png file
plot3 <- function(data) {
# Make png file named plot3.png
png(filename = 'plot3.png', width = 480, height = 480,
units = 'px')
plotenergysubmetering(data)
# Close file
dev.off()
}
# Load data and make the chart
pow.consumption.data <- getdata()
plot3(pow.consumption.data)
| /plot3.R | no_license | amitjha3385/ExData_Plotting1 | R | false | false | 2,785 | r | # Function to get data for the dates '1/2/2007' and '2/2/2007'
# Returned data frame has one DateTime column of POSIXlt class
# Observation Columns 3 to 9 as numeric
getdata <- function (file = "household_power_consumption.txt",
dates = c('1/2/2007', '2/2/2007')) {
# read complete data into R with all columns as character
compdata <- read.table(file = file, header = T, sep = ';',
colClasses = 'character', nrows = 2075260)
# subset data to get data for the dates vector
# This is to facilitate vectors with multiple dates
data <- data.frame()
for (date in dates) {
tempdata <- subset(compdata, Date == date)
data <- rbind(data, tempdata)
}
# Removing extra data frames
rm(compdata, tempdata)
return(formatdata(data))
}
# formatdata function is called by getdata() , changes column formats
# Makes one datetime column of POSIXlt class
# Changes columns 3 to 9 to Numeric
formatdata <- function (data) {
# Converting columns 3 to 9 as numeric
cols <- c(3:9)
for (col in cols){
data[, col] <- as.numeric(data[, col])
}
# Combine Date and Time columns in one with POSIXlt format
DateTime <- paste(... = data[, 1], data[, 2])
DateTime <- (strptime(DateTime, format = "%d/%m/%Y %H:%M:%S",
tz = "UTC"))
data <- cbind(DateTime, data[, 3:9])
return(data)
}
# plotenergysubmetering function takes as input the data frame
# Makes the complete plot for energy submetering data along with legends
plotenergysubmetering <- function(data) {
# Create plot file specific lables
y.label = "Energy sub metering"
# Make initial plot and then add points
plot(data$DateTime, data$Sub_metering_1, type = 'l', col = 'black',
xlab = '', ylab = y.label)
points(data$DateTime, data$Sub_metering_2, type = 'l', col = 'red')
points(data$DateTime, data$Sub_metering_3, type = 'l', col = 'blue')
# Add legend
legend('topright', lty = 1, pt.cex = 1, cex = .75,
legend = c('Sub_metering_1', 'Sub_metering_2',
'Sub_metering_3'),
col = c('black', 'red', 'blue'))
}
# plot3 function calls the plotting function - plotenergysubmetering
# and saves the plot as .png file
plot3 <- function(data) {
# Make png file named plot3.png
png(filename = 'plot3.png', width = 480, height = 480,
units = 'px')
plotenergysubmetering(data)
# Close file
dev.off()
}
# Load data and make the chart
pow.consumption.data <- getdata()
plot3(pow.consumption.data)
|
compute.threshold.FPF.pooledROC.dpm <-
function(object, FPF = 0.5, parallel = c("no", "multicore", "snow"), ncpus = 1, cl = NULL) {
doMCMCTH <- function(k, res0, res1, FPF) {
p0 <- res0$P
p1 <- res1$P
if(is.null(p0) & is.null(p1)) {
thresholds.s <- qnorm(1 - FPF, mean = res0$Mu[k], sd= sqrt(res0$Sigma2[k]))
TPF.s <- 1 - pnorm(thresholds.s, mean = res1$Mu[k], sd = sqrt(res1$Sigma2[k]))
} else if(is.null(p0) & !is.null(p1)){
aux1 <- norMix(mu = res1$Mu[k,], sigma = sqrt(res1$Sigma2[k,]), w = p1[k,])
thresholds.s <- qnorm(1 - FPF, mean = res0$Mu[k], sd= sqrt(res0$Sigma2[k]))
TPF.s <- 1 - pnorMix(thresholds.s, aux1)
} else if (!is.null(p0) & is.null(p1)){
aux0 <- norMix(mu = res0$Mu[k,], sigma = sqrt(res0$Sigma2[k,]), w = p0[k,])
thresholds.s <- qnorMix(1 - FPF, aux0)
TPF.s <- 1 - pnorm(thresholds.s, mean = res1$Mu[k], sd = sqrt(res1$Sigma2[k]))
} else {
aux0 <- norMix(mu = res0$Mu[k,], sigma = sqrt(res0$Sigma2[k,]), w = p0[k,])
aux1 <- norMix(mu = res1$Mu[k,], sigma = sqrt(res1$Sigma2[k,]), w = p1[k,])
thresholds.s <- qnorMix(1 - FPF, aux0)
TPF.s <- 1 - pnorMix(thresholds.s, aux1)
}
res <- list()
res$thresholds.s <- thresholds.s
res$TPF.s <- TPF.s
res
}
if(class(object)[1] != "pooledROC.dpm") {
stop(paste0("This function can not be used for this object class: ", class(object)[1]))
}
parallel <- match.arg(parallel)
if(object$mcmc$nsave > 0) {
do_mc <- do_snow <- FALSE
if (parallel != "no" && ncpus > 1L) {
if (parallel == "multicore") {
do_mc <- .Platform$OS.type != "windows"
} else if (parallel == "snow") {
do_snow <- TRUE
}
if (!do_mc && !do_snow) {
ncpus <- 1L
}
loadNamespace("parallel") # get this out of the way before recording seed
}
# Seed
#if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1)
#seed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE)
# Apply function
resBoot <- if (ncpus > 1L && (do_mc || do_snow)) {
if (do_mc) {
parallel::mclapply(seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, FPF = FPF, mc.cores = ncpus)
} else if (do_snow) {
if (is.null(cl)) {
cl <- parallel::makePSOCKcluster(rep("localhost", ncpus))
if(RNGkind()[1L] == "L'Ecuyer-CMRG") {
parallel::clusterSetRNGStream(cl)
}
res <- parallel::parLapply(cl, seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, FPF = FPF)
parallel::stopCluster(cl)
res
} else {
if(!inherits(cl, "cluster")) {
stop("Class of object 'cl' is not correct")
} else {
parallel::parLapply(cl, seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, FPF = FPF)
}
}
}
} else {
lapply(seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, FPF = FPF)
}
resBoot <- simplify2array(resBoot)
thresholds.s <- simplify2array(resBoot["thresholds.s",])
TPF.s <- simplify2array(resBoot["TPF.s",])
if(length(FPF) == 1) {
thresholds.s <- matrix(thresholds.s, nrow = 1)
TPF.s <- matrix(TPF.s, nrow = 1)
}
} else {
stop("nsave should be larger than zero.")
}
np <- length(FPF)
thresholds <- matrix(0, ncol = 3, nrow = np, dimnames = list(1:np, c("est","ql", "qh")))
rownames(thresholds) <- FPF
thresholds[,1] <- apply(thresholds.s, 1, mean)
thresholds[,2] <- apply(thresholds.s, 1, quantile, prob = 0.025)
thresholds[,3] <- apply(thresholds.s, 1, quantile, prob = 0.975)
TPF <- matrix(0, ncol = 3, nrow = np, dimnames = list(1:np, c("est","ql", "qh")))
rownames(TPF) <- FPF
TPF[,1] <- apply(TPF.s, 1, mean)
TPF[,2] <- apply(TPF.s, 1, quantile, prob = 0.025)
TPF[,3] <- apply(TPF.s, 1, quantile, prob = 0.975)
res <- list()
res$thresholds <- thresholds
res$FPF <- FPF
res$TPF <- TPF
res
}
| /ROCnReg/R/compute.threshold.FPF.pooledROC.dpm.R | no_license | albrizre/spatstat.revdep | R | false | false | 4,641 | r | compute.threshold.FPF.pooledROC.dpm <-
function(object, FPF = 0.5, parallel = c("no", "multicore", "snow"), ncpus = 1, cl = NULL) {
doMCMCTH <- function(k, res0, res1, FPF) {
p0 <- res0$P
p1 <- res1$P
if(is.null(p0) & is.null(p1)) {
thresholds.s <- qnorm(1 - FPF, mean = res0$Mu[k], sd= sqrt(res0$Sigma2[k]))
TPF.s <- 1 - pnorm(thresholds.s, mean = res1$Mu[k], sd = sqrt(res1$Sigma2[k]))
} else if(is.null(p0) & !is.null(p1)){
aux1 <- norMix(mu = res1$Mu[k,], sigma = sqrt(res1$Sigma2[k,]), w = p1[k,])
thresholds.s <- qnorm(1 - FPF, mean = res0$Mu[k], sd= sqrt(res0$Sigma2[k]))
TPF.s <- 1 - pnorMix(thresholds.s, aux1)
} else if (!is.null(p0) & is.null(p1)){
aux0 <- norMix(mu = res0$Mu[k,], sigma = sqrt(res0$Sigma2[k,]), w = p0[k,])
thresholds.s <- qnorMix(1 - FPF, aux0)
TPF.s <- 1 - pnorm(thresholds.s, mean = res1$Mu[k], sd = sqrt(res1$Sigma2[k]))
} else {
aux0 <- norMix(mu = res0$Mu[k,], sigma = sqrt(res0$Sigma2[k,]), w = p0[k,])
aux1 <- norMix(mu = res1$Mu[k,], sigma = sqrt(res1$Sigma2[k,]), w = p1[k,])
thresholds.s <- qnorMix(1 - FPF, aux0)
TPF.s <- 1 - pnorMix(thresholds.s, aux1)
}
res <- list()
res$thresholds.s <- thresholds.s
res$TPF.s <- TPF.s
res
}
if(class(object)[1] != "pooledROC.dpm") {
stop(paste0("This function can not be used for this object class: ", class(object)[1]))
}
parallel <- match.arg(parallel)
if(object$mcmc$nsave > 0) {
do_mc <- do_snow <- FALSE
if (parallel != "no" && ncpus > 1L) {
if (parallel == "multicore") {
do_mc <- .Platform$OS.type != "windows"
} else if (parallel == "snow") {
do_snow <- TRUE
}
if (!do_mc && !do_snow) {
ncpus <- 1L
}
loadNamespace("parallel") # get this out of the way before recording seed
}
# Seed
#if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1)
#seed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE)
# Apply function
resBoot <- if (ncpus > 1L && (do_mc || do_snow)) {
if (do_mc) {
parallel::mclapply(seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, FPF = FPF, mc.cores = ncpus)
} else if (do_snow) {
if (is.null(cl)) {
cl <- parallel::makePSOCKcluster(rep("localhost", ncpus))
if(RNGkind()[1L] == "L'Ecuyer-CMRG") {
parallel::clusterSetRNGStream(cl)
}
res <- parallel::parLapply(cl, seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, FPF = FPF)
parallel::stopCluster(cl)
res
} else {
if(!inherits(cl, "cluster")) {
stop("Class of object 'cl' is not correct")
} else {
parallel::parLapply(cl, seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, FPF = FPF)
}
}
}
} else {
lapply(seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, FPF = FPF)
}
resBoot <- simplify2array(resBoot)
thresholds.s <- simplify2array(resBoot["thresholds.s",])
TPF.s <- simplify2array(resBoot["TPF.s",])
if(length(FPF) == 1) {
thresholds.s <- matrix(thresholds.s, nrow = 1)
TPF.s <- matrix(TPF.s, nrow = 1)
}
} else {
stop("nsave should be larger than zero.")
}
np <- length(FPF)
thresholds <- matrix(0, ncol = 3, nrow = np, dimnames = list(1:np, c("est","ql", "qh")))
rownames(thresholds) <- FPF
thresholds[,1] <- apply(thresholds.s, 1, mean)
thresholds[,2] <- apply(thresholds.s, 1, quantile, prob = 0.025)
thresholds[,3] <- apply(thresholds.s, 1, quantile, prob = 0.975)
TPF <- matrix(0, ncol = 3, nrow = np, dimnames = list(1:np, c("est","ql", "qh")))
rownames(TPF) <- FPF
TPF[,1] <- apply(TPF.s, 1, mean)
TPF[,2] <- apply(TPF.s, 1, quantile, prob = 0.025)
TPF[,3] <- apply(TPF.s, 1, quantile, prob = 0.975)
res <- list()
res$thresholds <- thresholds
res$FPF <- FPF
res$TPF <- TPF
res
}
|
/提取显著差异基因并做超几何检验.R | no_license | zhangyupisa/Analysis-of-Expression-profiling-in-R | R | false | false | 2,500 | r | ||
library(ape)
testtree <- read.tree("10373_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10373_0_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/10373_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10373_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10373_0_unrooted.txt") |
#############################################
# IntegralModel_out.R
#
# Output results of integral model.
#############################################
# Ryan Hastings, 5 May 2020
#############################################
Etot<-rep(0.0,maxt)
Itot<-rep(0.0,maxt)
Htot<-rep(0.0,maxt)
Ctot<-rep(0.0,maxt)
Rtot<-rep(0.0,maxt)
Dtot<-rep(0.0,maxt)
for (t in 1:maxt) {
t0<-max(t,t-Tinc)
Etot[t]<-sum(Enew[t0:t])
t0<-max(t,t-Tinf)
Itot[t]<-sum(Inew[t0:t])
t0<-max(t,t-Trecov)
t1<-max(t,t-Tdeath)
Htot[t]<-sum(Hnew[t0:t])+sum(Cnew[t0:t])+sum(Qnew[t1:t])
Ctot[t]<-sum(Cnew[t0:t])+sum(Qnew[t1:t])
Rtot[t]<-sum(Rnew[1:t])
Dtot[t]<-sum(Dnew[1:t])
}
df.SEIR<-data.frame(day=seq(1,maxt),Susceptible=S,NewExposed=Enew,TotalExposed=Etot,
NewInfectious=Inew,TotalInfectious=Itot,
NewHospitalized=Hnew+Cnew+Qnew,TotalHospitalized=Htot,
NewCritical=Cnew+Qnew,TotalCritical=Ctot,
NewRecovered=Rnew,TotalRecovered=Rtot,
NewDeceased=Dnew,TotalDeceased=Dtot)
print( ggplot(df.SEIR,aes(x=day))+
# geom_line(aes(y=Susceptible,color="S"))+
# geom_line(aes(y=TotalExposed,color="E"))+
# geom_line(aes(y=TotalInfectious,color="I"))+
# geom_line(aes(y=TotalRecovered,color="R"))+
geom_line(aes(y=TotalDeceased,color="D"))+
geom_line(aes(y=TotalHospitalized,color="H"))+
geom_line(aes(y=TotalCritical,color="C"))
) | /model_v2.0/IntegralModel_out.R | no_license | RyanHastings/COVID19 | R | false | false | 1,486 | r | #############################################
# IntegralModel_out.R
#
# Output results of integral model.
#############################################
# Ryan Hastings, 5 May 2020
#############################################
Etot<-rep(0.0,maxt)
Itot<-rep(0.0,maxt)
Htot<-rep(0.0,maxt)
Ctot<-rep(0.0,maxt)
Rtot<-rep(0.0,maxt)
Dtot<-rep(0.0,maxt)
for (t in 1:maxt) {
t0<-max(t,t-Tinc)
Etot[t]<-sum(Enew[t0:t])
t0<-max(t,t-Tinf)
Itot[t]<-sum(Inew[t0:t])
t0<-max(t,t-Trecov)
t1<-max(t,t-Tdeath)
Htot[t]<-sum(Hnew[t0:t])+sum(Cnew[t0:t])+sum(Qnew[t1:t])
Ctot[t]<-sum(Cnew[t0:t])+sum(Qnew[t1:t])
Rtot[t]<-sum(Rnew[1:t])
Dtot[t]<-sum(Dnew[1:t])
}
df.SEIR<-data.frame(day=seq(1,maxt),Susceptible=S,NewExposed=Enew,TotalExposed=Etot,
NewInfectious=Inew,TotalInfectious=Itot,
NewHospitalized=Hnew+Cnew+Qnew,TotalHospitalized=Htot,
NewCritical=Cnew+Qnew,TotalCritical=Ctot,
NewRecovered=Rnew,TotalRecovered=Rtot,
NewDeceased=Dnew,TotalDeceased=Dtot)
print( ggplot(df.SEIR,aes(x=day))+
# geom_line(aes(y=Susceptible,color="S"))+
# geom_line(aes(y=TotalExposed,color="E"))+
# geom_line(aes(y=TotalInfectious,color="I"))+
# geom_line(aes(y=TotalRecovered,color="R"))+
geom_line(aes(y=TotalDeceased,color="D"))+
geom_line(aes(y=TotalHospitalized,color="H"))+
geom_line(aes(y=TotalCritical,color="C"))
) |
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 7.69715866152871e-168, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615833878-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 7.69715866152871e-168, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ReconstructedOutline.R
\name{transform.image.reconstructedOutline}
\alias{transform.image.reconstructedOutline}
\title{Transform an image into the reconstructed space}
\usage{
\method{transform}{image.reconstructedOutline}(r)
}
\arguments{
\item{r}{\code{reconstructedOutline} object}
}
\value{
\code{reconstructedOutline} object with extra elements
\item{\code{ims}}{Coordinates of corners of pixes in spherical coordinates}
\item{\code{immask}}{Mask matrix with same dimensions as image \code{im}}
}
\description{
Transform an image into the reconstructed space. The four corner
coordinates of each pixel are transformed into spherical
coordinates and a mask matrix with the same dimensions as
\code{im} is created. This has \code{TRUE} for pixels that should
be displayed and \code{FALSE} for ones that should not.
}
\author{
David Sterratt
}
| /pkg/retistruct/man/transform.image.reconstructedOutline.Rd | no_license | bala5411/retistruct | R | false | true | 924 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ReconstructedOutline.R
\name{transform.image.reconstructedOutline}
\alias{transform.image.reconstructedOutline}
\title{Transform an image into the reconstructed space}
\usage{
\method{transform}{image.reconstructedOutline}(r)
}
\arguments{
\item{r}{\code{reconstructedOutline} object}
}
\value{
\code{reconstructedOutline} object with extra elements
\item{\code{ims}}{Coordinates of corners of pixes in spherical coordinates}
\item{\code{immask}}{Mask matrix with same dimensions as image \code{im}}
}
\description{
Transform an image into the reconstructed space. The four corner
coordinates of each pixel are transformed into spherical
coordinates and a mask matrix with the same dimensions as
\code{im} is created. This has \code{TRUE} for pixels that should
be displayed and \code{FALSE} for ones that should not.
}
\author{
David Sterratt
}
|
## Header
## ---------------------------------------------------
# Title: REC Registry data extraction script
# Purpose: To extract all required fields from the RecRegistry database.
# Status: <in development>
# (Input/Components):
# (Output):
# (How to execute code):
#
# Author: Rodrigo Lopez
# rodrigo.lopez@cer.gov.au
# CER5049
# Data Science team / Data and Innovation
#
# # Date created: 5/10/2021
# (Date of last major modification):git
# Copyright (c) Clean Energy Regulator
## ---------------------------------------------------
###############
## Libraries ##
###############
library(httr) #needed for content()
library(odbc) # needed for open database connectivity
library(magrittr)
library(readr)
library(stringr) #needed for manipulating strings
library(dplyr)
library(tidyr)
# library(tidyverse)
library(lubridate) #needed for manipulation of date and times
library(RODBC)
library(gtools) # Needed for permutations
library(odbc) # needed for Odbc
####################
## DB connections ##
####################
conRECREG <- dbConnect(odbc::odbc(), "RecRegistry", timeout = 10)
###############
## Variables ##
###############
options(scipen=999, stringsAsFactors = FALSE)
DevOpsNum <- "Work Item 35079" # Used in log file, and relates to the Feature/PBI number in DevOps
# Range for "installation dates" whose records are extracted
DateStart <- as.Date("2021-10-01") # example 2019-10-21
SQLDateStart <- gsub("-","",DateStart) # Used in SQL script
DateEnd <- as.Date("2021-10-08") # example 2019-10-21
SQLDateEnd <- gsub("-","",DateEnd) # Used in SQL script
###############
## Functions ##
###############
#None
##########
## Main ##
##########
#######################
## Data input/ingest ##
#######################
# SQL script as input string to an R function, which extracts the required data.
# Un/comment the required fields, as needed.
# Ensure the last uncommented data field before FROM has no comma at end of line.
RecReg_Raw_SQL <- paste0("SELECT
sgu.ACCREDITATION_CODE as Small_Unit_Accreditation_Code,
sgu.ID, -- Needed for Serial Numbers matching below
--Installation Address
sgu_addr.POSTCODE as Small_Unit_Installation_Postcode,
sgu_addr.STATE as Small_Unit_Installation_State,
sgu_addr.SUBURB as Small_Unit_Installation_City,
CONCAT_WS(', ',CONCAT_WS(' ',sgu_addr.UNIT_TYPE,sgu_addr.UNIT_NUMBER,sgu_addr.STREET_NUMBER,sgu_addr.STREET_NAME,sgu_addr.STREET_TYPE),sgu_addr.SUBURB,sgu_addr.STATE,sgu_addr.POSTCODE) AS 'Small_Unit_Installation_Street_Address_Full',
sgu_addr.SITE_NAME as Small_Unit_Installation_Property_Name,
sgu_addr.UNIT_TYPE as Small_Unit_Installation_Address_Type,
sgu_addr.UNIT_NUMBER as Small_Unit_Installation_Address_Type_Number,
sgu_addr.STREET_NUMBER as Small_Unit_Installation_Street_Number,
sgu_addr.STREET_NAME as Small_Unit_Installation_Street_Name,
sgu_addr.STREET_TYPE as Small_Unit_Installation_Street_Type,
sgu_addr.SPECIAL_ADDRESS as Small_Unit_Installation_Additional_Address_Information,
--Dates
sgu.INSTALLATION_DATE as Small_Unit_Installed_Date,
convert(DATE, cert_reg.CREATED_DATE AT TIME ZONE 'UTC' AT TIME ZONE 'AUS Eastern Standard Time') as REC_Creation_Date,
--Account
acct.[Name] as Account_Name,
vacct.[RPE_ID] as Registered_Person_ID,
sgu.FUEL_SOURCE,
-- --Installer details
installer.INSTALLER_ACCREDITED_NUMBER as Small_Unit_Installer_CEC_Accreditation_Code,
installer.SURNAME as Small_Unit_Installer_Surname,
installer.FIRST_NAME as Small_Unit_Installer_Firstname,
installer.MOBILE as Small_Unit_Installer_Mobile_Number,
installer.PHONE as Small_Unit_Installer_Phone_Number,
installer.FAX as Small_Unit_Installer_Fax_Number,
installer.EMAIL as Small_Unit_Installer_Email_Address,
installer_address.POSTCODE as Small_Unit_Installer_Postcode,
installer_address.STATE as Small_Unit_Installer_State,
installer_address.SUBURB as Small_Unit_Installer_City,
CONCAT_WS(', ',CONCAT_WS(' ',installer_address.UNIT_TYPE,installer_address.UNIT_NUMBER,installer_address.STREET_NUMBER,installer_address.STREET_NAME,installer_address.STREET_TYPE),installer_address.SUBURB,installer_address.STATE,installer_address.POSTCODE) AS 'Small_Unit_Installer_Street_Address_Full',
installer_address.SITE_NAME as Small_Unit_Installer_Property_Name,
installer_address.UNIT_TYPE as Small_Unit_Installer_Address_Type,
installer_address.UNIT_NUMBER as Small_Unit_Installer_Address_Type_Number,
installer_address.STREET_NUMBER as Small_Unit_Installer_Street_Number,
installer_address.STREET_NAME as Small_Unit_Installer_Street_Name,
installer_address.STREET_TYPE as Small_Unit_Installer_Street_Type,
installer_address.SPECIAL_ADDRESS as Small_Unit_Installer_Additional_Address_Information,
-- --Designer details
designer.INSTALLER_ACCREDITED_NUMBER as Small_Unit_Designer_CEC_Accreditation_Code,
designer.SURNAME as Small_Unit_Designer_Surname,
designer.FIRST_NAME as Small_Unit_Designer_Firstname,
designer.MOBILE as Small_Unit_Designer_Mobile_Number,
designer.PHONE as Small_Unit_Designer_Phone_Number,
designer.FAX as Small_Unit_Designer_Fax_Number,
designer.EMAIL as Small_Unit_Designer_Email_Address,
designer_address.POSTCODE as SGU_Designer_Postcode,
designer_address.STATE as SGU_Designer_State,
designer_address.SUBURB as SGU_Designer_City,
CONCAT_WS(', ',CONCAT_WS(' ',designer_address.UNIT_TYPE,designer_address.UNIT_NUMBER,designer_address.STREET_NUMBER,designer_address.STREET_NAME,designer_address.STREET_TYPE),designer_address.SUBURB,designer_address.STATE,designer_address.POSTCODE) AS 'Small_Unit_Designer_Street_Address_Full',
designer_address.SITE_NAME as SGU_Designer_Property_Name,
designer_address.UNIT_TYPE as SGU_Designer_Address_Type,
designer_address.UNIT_NUMBER as SGU_Designer_Address_Type_Number,
designer_address.STREET_NUMBER as SGU_Designer_Address_Street_Number,
designer_address.STREET_NAME as SGU_Designer_Address_Street_Name,
designer_address.STREET_TYPE as SGU_Designer_Address_Street_Type,
designer_address.SPECIAL_ADDRESS as SGU_Designer_Additional_Address_Information,
-- --Electrician
electrician.[ELECTRICIAN_NUMBER] as SGU_Electrician_License_Number,
electrician.[SURNAME] as SGU_Electrician_Surname,
electrician.[FIRST_NAME] as SGU_Electrician_Firstname,
electrician.[MOBILE] as SGU_Electrician_Mobile_Number,
electrician.[PHONE] as SGU_Electrician_Phone_Number,
electrician.[FAX] as SGU_Electrician_Fax_Number,
electrician.[EMAIL] as SGU_Electrician_Email_Address,
electrician_address.[POSTCODE] as SGU_Electrician_Postcode,
electrician_address.[STATE] as SGU_Electrician_State,
electrician_address.[SUBURB] as SGU_Electrician_City,
CONCAT_WS(', ',CONCAT_WS(' ',electrician_address.UNIT_TYPE,electrician_address.UNIT_NUMBER,electrician_address.STREET_NUMBER,electrician_address.STREET_NAME,electrician_address.STREET_TYPE),electrician_address.SUBURB,electrician_address.STATE,electrician_address.POSTCODE) AS 'SGU_Electrician_Street_Address_Full',
electrician_address.SITE_NAME as SGU_Electrician_Property_Name,
electrician_address.[UNIT_TYPE] as SGU_Electrician_Address_Type,
electrician_address.[UNIT_NUMBER] as SGU_Electrician_Address_Type_Number,
electrician_address.[STREET_NUMBER] as SGU_Electrician_Street_Number,
electrician_address.[STREET_NAME] as SGU_Electrician_Street_Name,
electrician_address.[STREET_TYPE] as SGU_Electrician_Street_Type,
electrician_address.[SPECIAL_ADDRESS] as SGU_Electrician_Additional_Address_Information,
--Small Unit Owner
owner.[Small Unit Owner Surname] as Small_Unit_Owner_Surname,
owner.[Small Unit Owner Firstname] as Small_Unit_Owner_Firstname,
--owner.[Small Unit Owner Initials] as Small_Unit_Owner_Initials,
--owner.[Small Unit Owner Title] as Small_Unit_Owner_Title,
owner.[Small Unit Owner Mobile Number] as Small_Unit_Owner_Mobile_Number,
owner.[Small Unit Owner Phone Number] as Small_Unit_Owner_Phone_Number,
owner.[Small Unit Owner Fax Number] as Small_Unit_Owner_Fax_Number,
owner.[Small Unit Owner Email Address] as Small_Unit_Owner_Email_Address,
owner.[Small Unit Owner Postcode] as Small_Unit_Owner_Postcode,
owner.[Small Unit Owner State] as Small_Unit_Owner_State,
owner.[Small Unit Owner City] as Small_Unit_Owner_City,
owner.[Small Unit Owner Street Address Full],
CONCAT_WS(', ',owner.[Small Unit Owner Street Address Full],owner.[Small Unit Owner City],owner.[Small Unit Owner State],owner.[Small Unit Owner Postcode]) AS 'Small_Unit_Owner_Street_Address_Full',
owner.[Small Unit Owner Address Type] as Small_Unit_Owner_Address_Type,
owner.[Small Unit Owner Address Type Number] as Small_Unit_Owner_Address_Type_Number,
owner.[Small Unit Owner Street Number] as Small_Unit_Owner_Street_Number,
owner.[Small Unit Owner Street Name] as Small_Unit_Owner_Street_Name,
owner.[Small Unit Owner Street Type] as Small_Unit_Owner_Street_Type,
----- Facts -----
--validation audit status
vas.Any_RECs_Passed_Validation_Audit_Flag as Any_RECs_Passed_Validation_Audit_Flag,
--RECs
fsur.[RECs Created Quantity] as RECs_Created_Quantity,
fsur.[RECs Pending Audit Quantity] as RECs_Pending_Audit_Quantity,
fsur.[RECs Passed Audit Quantity] as RECs_Passed_Audit_Quantity,
fsur.[RECs Failed Audit Quantity] as RECs_Failed_Audit_Quantity,
fsur.[RECs Registered Quantity] as RECs_Registered_Quantity,
--common
fsur.[Deeming Period in Years] as Deeming_Period_in_Years,
dsur.[Small Unit Zone] as Small_Unit_Zone,
dsur.[Small Unit Customer Reference] as Small_Unit_Customer_Reference,
--SGU facts
dsur.[RECs Multiplier Used] as RECs_Multiplier_Used,
fsur.[SGU Rated Output in kW] as SGU_Rated_Output_in_kW,
dsur.[SGU Brand] as SGU_Brand,
dsur.[SGU Model] as SGU_Model,
--SGU statements etc
--dsur.[SGU Installation Type] as SGU_Installation_Type,
dsur.[SGU Inverter Manufacturer] as SGU_Inverter_Manufacturer,
dsur.[SGU Inverter Series] as SGU_Inverter_Series,
dsur.[SGU Inverter Model Number] as SGU_Inverter_Model_Number,
dsur.[RECs Multiplier Used Previously Flag] as RECs_Multiplier_Used_Previously_Flag,
dsur.[SGU Premises Eligible for Solar Credits Flag] as SGU_Premises_Eligible_for_Solar_Credits_Flag,
dsur.[SGU Complete Unit Flag] as SGU_Complete_Unit_Flag,
dsur.[SGU Transitional Multiplier Flag] as SGU_Transitional_Multiplier_Flag,
dsur.[Site Specific Audit Report Available Flag] as Site_Specific_Audit_Report_Available_Flag,
dsur.[Received Statement for Installer and Designer CEC Accreditation Flag] as Received_Statement_for_Installer_and_Designer_CEC_Accreditation_Flag,
dsur.[Received Statement for Adherence to State Requirements Flag] as Received_Statement_for_Adherence_to_State_Requirements_Flag,
dsur.[Received Certificate of Electrical Safety or Compliance Flag] as Received_Certificate_of_Electrical_Safety_or_Compliance_Flag,
dsur.[Received Statement that System is Off Grid Flag] as Received_Statement_that_System_is_Off_Grid_Flag,
dsur.[All Electrical Work Undertaken by Electrician Flag] as All_Electrical_Work_Undertaken_by_Electrician_Flag,
dsur.[Received Statement Confirming Liability Insurance Flag] as Received_Statement_Confirming_Liability_Insurance_Flag,
dsur.[Received Statement for Adherence to CEC Code of Conduct Flag] as Received_Statement_for_Adherence_to_CEC_Code_of_Conduct_Flag,
dsur.[Received Statement for Adherence to ANZ Standards Flag] as Received_Statement_for_Adherence_to_ANZ_Standards_Flag,
dsur.[SGU Number of Panels] as SGU_Number_of_Panels,
dsur.[SGU Default Availability Used Flag] as SGU_Default_Availability_Used_Flag,
dsur.[SGU Availability] as SGU_Availability,
dsur.[More than One SGU at Address Flag] as More_than_One_SGU_at_Address_Flag,
sgu.MORE_THAN_ONE_SGU_SAME_ADDRESS,
--sgu.HAS_FAILED_PREVIOUSLY,
sgu.INSTALLATION_TYPE,
sgu.RECREATION_EXPLANATION_NOTE,
sgu.ADDITIONAL_CAPACITY_DETAILS,
sgu.VERSION,
sgu.ADDITIONAL_SYSTEM_INFORMATION,
--sgu.PREVIOUS_RECS_MULTIPLIER_FLAG,
dsur.[SGU Rebate Approved Flag] as SGU_Rebate_Approved_Flag,
fsur.[SGU Out of Pocket Expense] as SGU_Out_of_Pocket_Expense,
dsur.[SWH Brand] as SWH_Brand,
dsur.[SWH Model] as SWH_Model,
dsur.[SWH Installation Type] as SWH_Installation_Type,
dsur.[SWH Technology Type] as SWH_Technology_Type,
dsur.[SWH Number of Panels] as SWH_Number_of_Panels,
dsur.[SWH Capacity over 700L Flag] as SWH_Capacity_over_700L_Flag,
dsur.[Stat Declaration for SWH Capacity Supplied Flag] as Stat_Declaration_for_SWH_Capacity_Supplied_Flag,
dsur.[SWH Second Hand Flag] as SWH_Second_Hand_Flag,
dsur.[More than One SWH at Address Flag] as More_than_One_SWH_at_Address_Flag,
dsur.[RETAILER NAME] as RETAILER_NAME,
dsur.[RETAILER ABN] as RETAILER_ABN
--dsur.[NATIONAL METERING NUMBER] as NATIONAL_METERING_NUMBER,
--dsur.[BATTERY MANUFACTURER] as BATTERY_MANUFACTURER,
--dsur.[BATTERY MODEL] as BATTERY_MODEL,
--dsur.[BATTERY PART OF AGG CONTROL] as BATTERY_PART_OF_AGG_CONTROL,
--dsur.[BATTERY SETTINGS CHANGED] as BATTERY_SETTINGS_CHANGED,
--dsur.[SGU ELECTRICITY GRID CONNECTIVITY] as SGU_ELECTRICITY_GRID_CONNECTIVITY
FROM [RECREG_PROD].CERREGISTRY.SGU sgu
INNER JOIN CERREGISTRY.CERTIFICATE_REGISTRATION cert_reg
ON sgu.ACCREDITATION_CODE = cert_reg.ACCREDITATION_CODE
AND sgu.INSTALLATION_DATE >= convert(date,'",SQLDateStart,"',112)
AND sgu.INSTALLATION_DATE <= convert(date,'",SQLDateEnd,"',112)
--Installation Address
LEFT JOIN CERREGISTRY.ADDRESS sgu_addr
ON sgu.INSTALLATION_ADDRESS_ID = sgu_addr.ID
--Installer
LEFT JOIN CERREGISTRY.SGU_TECHNICAL_PERSON installer
ON installer.ID = sgu.INSTALLER_ID
LEFT JOIN CERREGISTRY.ADDRESS installer_address
ON installer_address.ID = installer.ADDRESS_ID
--Designer
LEFT JOIN CERREGISTRY.SGU_TECHNICAL_PERSON designer
ON designer.ID = sgu.DESIGNER_ID
LEFT JOIN CERREGISTRY.ADDRESS designer_address
ON designer_address.ID = designer.ADDRESS_ID
--Electrician
LEFT JOIN CERREGISTRY.SGU_TECHNICAL_PERSON electrician
ON electrician.ID = sgu.ELECTRICIAN_ID
LEFT JOIN CERREGISTRY.ADDRESS electrician_address
ON electrician_address.ID = electrician.ADDRESS_ID
--Agent
LEFT JOIN CERREGISTRY.ACCOUNT acct
ON cert_reg.REGISTERED_PERSON_ACCOUNT_ID = acct.ID
--registered person - vacct
inner join [RECREG_PROD].RETDim.vAccount vacct
on acct.ID = vacct.ACC_ID
--Owner
inner join [RECREG_PROD].RETDim.vw_Small_Unit_Owner owner
on cert_reg.OWNER_ID = owner.[Small Unit Owner ID]
--Small Unit Registrations - dsur
inner join [RECREG_PROD].RETDim.SmallUnitRegistrations dsur
on sgu.ACCREDITATION_CODE = dsur.[Small Unit Accreditation Code]
--Fact Small Unit Registrations - fsur
inner join [RECREG_PROD].RETFact.SmallUnitRegistration fsur
on fsur.[Dim_Small_Unit_Registration_ID] = dsur.[Dim_Small_Unit_Registration_ID]
--Validation Audit Status - vas
inner join [RECREG_PROD].RETDim.Dim_Validation_Audit_Status vas
on vas.[Dim_Validation_Audit_Status_ID] = fsur.[Dim_Validation_Audit_Status_ID]
")
start_time <- Sys.time()
RecReg_data_YTD <- dbGetQuery(conRECREG, RecReg_Raw_SQL) %>% unique() #all columns in the right order; however there is an issue with line breaks
RecReg_data_test2210 <- dbGetQuery(conRECREG, RecReg_Raw_SQL) %>% unique()
## Cleaning
## ---------------------------------------------------
#Found issues in ADDITONAL_SYSTEM_INFORMATION (likely a free text field) that
# contains line breaks hence messing up the output CSV file
# isolating the issue
RecReg_data_YTD %>% filter(Small_Unit_Accreditation_Code == "PVD4268573") %>%
select(ADDITIONAL_SYSTEM_INFORMATION) %>%
# substituting the \n pattern with a space
gsub("[\r\n]", " ", .)
# To clean
RecReg_data_YTD$ADDITIONAL_SYSTEM_INFORMATION <- RecReg_data_YTD$ADDITIONAL_SYSTEM_INFORMATION %>% gsub("[\r\n]", " ", .)
sum(grepl('[\n]', RecReg_data_YTD$Small_Unit_Installation_Additional_Address_Information)) # works; sums all occurences
# apply(RecReg_data_YTD, 2, sum(grepl('[\n]', RecReg_data_YTD$Small_Unit_Installation_Additional_Address_Information)))
#
clean_data <- as.data.frame(apply(RecReg_data_YTD, 2, function(x) gsub("[\r\n]", " ", x)))
str(clean_data)
write_csv2(clean_data, "RecReg_data_FYTD_clean.csv")
# str_replace_all(x, "[\r\n]" , " ") - also works
# grepl('[^[:punct:]]', val)
##############
## Analysis ##
##############
# After all the exciting analysis then remember to record the
# Execution time of main:
end_time <- Sys.time()
run_time <- end_time - start_time
run_time
#######################
#### Write outputs ####
#######################
# write_rds(SRES_Raw2, "top100_rows.rds")
#
# write_rds(RecReg_data, "RR_2021.rds")
# Other text files/tables
## ------------------------------------------------------
write_csv2(RecReg_data_YTD, "RecReg_data_FYTD_check.csv")
## Log file
## ------------------------------------------------------
log_df <- cbind(run_time, date()) %>% as.data.frame()
write.table(log_df, "log_file.csv", sep = ",", col.names = !file.exists("log_file.csv"), append = TRUE)
| /RecRegistry_data_extraction_script_v0_2.R | no_license | CER5049/RecReg-data-extract | R | false | false | 18,142 | r | ## Header
## ---------------------------------------------------
# Title: REC Registry data extraction script
# Purpose: To extract all required fields from the RecRegistry database.
# Status: <in development>
# (Input/Components):
# (Output):
# (How to execute code):
#
# Author: Rodrigo Lopez
# rodrigo.lopez@cer.gov.au
# CER5049
# Data Science team / Data and Innovation
#
# # Date created: 5/10/2021
# (Date of last major modification):git
# Copyright (c) Clean Energy Regulator
## ---------------------------------------------------
###############
## Libraries ##
###############
library(httr) #needed for content()
library(odbc) # needed for open database connectivity
library(magrittr)
library(readr)
library(stringr) #needed for manipulating strings
library(dplyr)
library(tidyr)
# library(tidyverse)
library(lubridate) #needed for manipulation of date and times
library(RODBC)
library(gtools) # Needed for permutations
library(odbc) # needed for Odbc
####################
## DB connections ##
####################
conRECREG <- dbConnect(odbc::odbc(), "RecRegistry", timeout = 10)
###############
## Variables ##
###############
options(scipen=999, stringsAsFactors = FALSE)
DevOpsNum <- "Work Item 35079" # Used in log file, and relates to the Feature/PBI number in DevOps
# Range for "installation dates" whose records are extracted
DateStart <- as.Date("2021-10-01") # example 2019-10-21
SQLDateStart <- gsub("-","",DateStart) # Used in SQL script
DateEnd <- as.Date("2021-10-08") # example 2019-10-21
SQLDateEnd <- gsub("-","",DateEnd) # Used in SQL script
###############
## Functions ##
###############
#None
##########
## Main ##
##########
#######################
## Data input/ingest ##
#######################
# SQL script as input string to an R function, which extracts the required data.
# Un/comment the required fields, as needed.
# Ensure the last uncommented data field before FROM has no comma at end of line.
RecReg_Raw_SQL <- paste0("SELECT
sgu.ACCREDITATION_CODE as Small_Unit_Accreditation_Code,
sgu.ID, -- Needed for Serial Numbers matching below
--Installation Address
sgu_addr.POSTCODE as Small_Unit_Installation_Postcode,
sgu_addr.STATE as Small_Unit_Installation_State,
sgu_addr.SUBURB as Small_Unit_Installation_City,
CONCAT_WS(', ',CONCAT_WS(' ',sgu_addr.UNIT_TYPE,sgu_addr.UNIT_NUMBER,sgu_addr.STREET_NUMBER,sgu_addr.STREET_NAME,sgu_addr.STREET_TYPE),sgu_addr.SUBURB,sgu_addr.STATE,sgu_addr.POSTCODE) AS 'Small_Unit_Installation_Street_Address_Full',
sgu_addr.SITE_NAME as Small_Unit_Installation_Property_Name,
sgu_addr.UNIT_TYPE as Small_Unit_Installation_Address_Type,
sgu_addr.UNIT_NUMBER as Small_Unit_Installation_Address_Type_Number,
sgu_addr.STREET_NUMBER as Small_Unit_Installation_Street_Number,
sgu_addr.STREET_NAME as Small_Unit_Installation_Street_Name,
sgu_addr.STREET_TYPE as Small_Unit_Installation_Street_Type,
sgu_addr.SPECIAL_ADDRESS as Small_Unit_Installation_Additional_Address_Information,
--Dates
sgu.INSTALLATION_DATE as Small_Unit_Installed_Date,
convert(DATE, cert_reg.CREATED_DATE AT TIME ZONE 'UTC' AT TIME ZONE 'AUS Eastern Standard Time') as REC_Creation_Date,
--Account
acct.[Name] as Account_Name,
vacct.[RPE_ID] as Registered_Person_ID,
sgu.FUEL_SOURCE,
-- --Installer details
installer.INSTALLER_ACCREDITED_NUMBER as Small_Unit_Installer_CEC_Accreditation_Code,
installer.SURNAME as Small_Unit_Installer_Surname,
installer.FIRST_NAME as Small_Unit_Installer_Firstname,
installer.MOBILE as Small_Unit_Installer_Mobile_Number,
installer.PHONE as Small_Unit_Installer_Phone_Number,
installer.FAX as Small_Unit_Installer_Fax_Number,
installer.EMAIL as Small_Unit_Installer_Email_Address,
installer_address.POSTCODE as Small_Unit_Installer_Postcode,
installer_address.STATE as Small_Unit_Installer_State,
installer_address.SUBURB as Small_Unit_Installer_City,
CONCAT_WS(', ',CONCAT_WS(' ',installer_address.UNIT_TYPE,installer_address.UNIT_NUMBER,installer_address.STREET_NUMBER,installer_address.STREET_NAME,installer_address.STREET_TYPE),installer_address.SUBURB,installer_address.STATE,installer_address.POSTCODE) AS 'Small_Unit_Installer_Street_Address_Full',
installer_address.SITE_NAME as Small_Unit_Installer_Property_Name,
installer_address.UNIT_TYPE as Small_Unit_Installer_Address_Type,
installer_address.UNIT_NUMBER as Small_Unit_Installer_Address_Type_Number,
installer_address.STREET_NUMBER as Small_Unit_Installer_Street_Number,
installer_address.STREET_NAME as Small_Unit_Installer_Street_Name,
installer_address.STREET_TYPE as Small_Unit_Installer_Street_Type,
installer_address.SPECIAL_ADDRESS as Small_Unit_Installer_Additional_Address_Information,
-- --Designer details
designer.INSTALLER_ACCREDITED_NUMBER as Small_Unit_Designer_CEC_Accreditation_Code,
designer.SURNAME as Small_Unit_Designer_Surname,
designer.FIRST_NAME as Small_Unit_Designer_Firstname,
designer.MOBILE as Small_Unit_Designer_Mobile_Number,
designer.PHONE as Small_Unit_Designer_Phone_Number,
designer.FAX as Small_Unit_Designer_Fax_Number,
designer.EMAIL as Small_Unit_Designer_Email_Address,
designer_address.POSTCODE as SGU_Designer_Postcode,
designer_address.STATE as SGU_Designer_State,
designer_address.SUBURB as SGU_Designer_City,
CONCAT_WS(', ',CONCAT_WS(' ',designer_address.UNIT_TYPE,designer_address.UNIT_NUMBER,designer_address.STREET_NUMBER,designer_address.STREET_NAME,designer_address.STREET_TYPE),designer_address.SUBURB,designer_address.STATE,designer_address.POSTCODE) AS 'Small_Unit_Designer_Street_Address_Full',
designer_address.SITE_NAME as SGU_Designer_Property_Name,
designer_address.UNIT_TYPE as SGU_Designer_Address_Type,
designer_address.UNIT_NUMBER as SGU_Designer_Address_Type_Number,
designer_address.STREET_NUMBER as SGU_Designer_Address_Street_Number,
designer_address.STREET_NAME as SGU_Designer_Address_Street_Name,
designer_address.STREET_TYPE as SGU_Designer_Address_Street_Type,
designer_address.SPECIAL_ADDRESS as SGU_Designer_Additional_Address_Information,
-- --Electrician
electrician.[ELECTRICIAN_NUMBER] as SGU_Electrician_License_Number,
electrician.[SURNAME] as SGU_Electrician_Surname,
electrician.[FIRST_NAME] as SGU_Electrician_Firstname,
electrician.[MOBILE] as SGU_Electrician_Mobile_Number,
electrician.[PHONE] as SGU_Electrician_Phone_Number,
electrician.[FAX] as SGU_Electrician_Fax_Number,
electrician.[EMAIL] as SGU_Electrician_Email_Address,
electrician_address.[POSTCODE] as SGU_Electrician_Postcode,
electrician_address.[STATE] as SGU_Electrician_State,
electrician_address.[SUBURB] as SGU_Electrician_City,
CONCAT_WS(', ',CONCAT_WS(' ',electrician_address.UNIT_TYPE,electrician_address.UNIT_NUMBER,electrician_address.STREET_NUMBER,electrician_address.STREET_NAME,electrician_address.STREET_TYPE),electrician_address.SUBURB,electrician_address.STATE,electrician_address.POSTCODE) AS 'SGU_Electrician_Street_Address_Full',
electrician_address.SITE_NAME as SGU_Electrician_Property_Name,
electrician_address.[UNIT_TYPE] as SGU_Electrician_Address_Type,
electrician_address.[UNIT_NUMBER] as SGU_Electrician_Address_Type_Number,
electrician_address.[STREET_NUMBER] as SGU_Electrician_Street_Number,
electrician_address.[STREET_NAME] as SGU_Electrician_Street_Name,
electrician_address.[STREET_TYPE] as SGU_Electrician_Street_Type,
electrician_address.[SPECIAL_ADDRESS] as SGU_Electrician_Additional_Address_Information,
--Small Unit Owner
owner.[Small Unit Owner Surname] as Small_Unit_Owner_Surname,
owner.[Small Unit Owner Firstname] as Small_Unit_Owner_Firstname,
--owner.[Small Unit Owner Initials] as Small_Unit_Owner_Initials,
--owner.[Small Unit Owner Title] as Small_Unit_Owner_Title,
owner.[Small Unit Owner Mobile Number] as Small_Unit_Owner_Mobile_Number,
owner.[Small Unit Owner Phone Number] as Small_Unit_Owner_Phone_Number,
owner.[Small Unit Owner Fax Number] as Small_Unit_Owner_Fax_Number,
owner.[Small Unit Owner Email Address] as Small_Unit_Owner_Email_Address,
owner.[Small Unit Owner Postcode] as Small_Unit_Owner_Postcode,
owner.[Small Unit Owner State] as Small_Unit_Owner_State,
owner.[Small Unit Owner City] as Small_Unit_Owner_City,
owner.[Small Unit Owner Street Address Full],
CONCAT_WS(', ',owner.[Small Unit Owner Street Address Full],owner.[Small Unit Owner City],owner.[Small Unit Owner State],owner.[Small Unit Owner Postcode]) AS 'Small_Unit_Owner_Street_Address_Full',
owner.[Small Unit Owner Address Type] as Small_Unit_Owner_Address_Type,
owner.[Small Unit Owner Address Type Number] as Small_Unit_Owner_Address_Type_Number,
owner.[Small Unit Owner Street Number] as Small_Unit_Owner_Street_Number,
owner.[Small Unit Owner Street Name] as Small_Unit_Owner_Street_Name,
owner.[Small Unit Owner Street Type] as Small_Unit_Owner_Street_Type,
----- Facts -----
--validation audit status
vas.Any_RECs_Passed_Validation_Audit_Flag as Any_RECs_Passed_Validation_Audit_Flag,
--RECs
fsur.[RECs Created Quantity] as RECs_Created_Quantity,
fsur.[RECs Pending Audit Quantity] as RECs_Pending_Audit_Quantity,
fsur.[RECs Passed Audit Quantity] as RECs_Passed_Audit_Quantity,
fsur.[RECs Failed Audit Quantity] as RECs_Failed_Audit_Quantity,
fsur.[RECs Registered Quantity] as RECs_Registered_Quantity,
--common
fsur.[Deeming Period in Years] as Deeming_Period_in_Years,
dsur.[Small Unit Zone] as Small_Unit_Zone,
dsur.[Small Unit Customer Reference] as Small_Unit_Customer_Reference,
--SGU facts
dsur.[RECs Multiplier Used] as RECs_Multiplier_Used,
fsur.[SGU Rated Output in kW] as SGU_Rated_Output_in_kW,
dsur.[SGU Brand] as SGU_Brand,
dsur.[SGU Model] as SGU_Model,
--SGU statements etc
--dsur.[SGU Installation Type] as SGU_Installation_Type,
dsur.[SGU Inverter Manufacturer] as SGU_Inverter_Manufacturer,
dsur.[SGU Inverter Series] as SGU_Inverter_Series,
dsur.[SGU Inverter Model Number] as SGU_Inverter_Model_Number,
dsur.[RECs Multiplier Used Previously Flag] as RECs_Multiplier_Used_Previously_Flag,
dsur.[SGU Premises Eligible for Solar Credits Flag] as SGU_Premises_Eligible_for_Solar_Credits_Flag,
dsur.[SGU Complete Unit Flag] as SGU_Complete_Unit_Flag,
dsur.[SGU Transitional Multiplier Flag] as SGU_Transitional_Multiplier_Flag,
dsur.[Site Specific Audit Report Available Flag] as Site_Specific_Audit_Report_Available_Flag,
dsur.[Received Statement for Installer and Designer CEC Accreditation Flag] as Received_Statement_for_Installer_and_Designer_CEC_Accreditation_Flag,
dsur.[Received Statement for Adherence to State Requirements Flag] as Received_Statement_for_Adherence_to_State_Requirements_Flag,
dsur.[Received Certificate of Electrical Safety or Compliance Flag] as Received_Certificate_of_Electrical_Safety_or_Compliance_Flag,
dsur.[Received Statement that System is Off Grid Flag] as Received_Statement_that_System_is_Off_Grid_Flag,
dsur.[All Electrical Work Undertaken by Electrician Flag] as All_Electrical_Work_Undertaken_by_Electrician_Flag,
dsur.[Received Statement Confirming Liability Insurance Flag] as Received_Statement_Confirming_Liability_Insurance_Flag,
dsur.[Received Statement for Adherence to CEC Code of Conduct Flag] as Received_Statement_for_Adherence_to_CEC_Code_of_Conduct_Flag,
dsur.[Received Statement for Adherence to ANZ Standards Flag] as Received_Statement_for_Adherence_to_ANZ_Standards_Flag,
dsur.[SGU Number of Panels] as SGU_Number_of_Panels,
dsur.[SGU Default Availability Used Flag] as SGU_Default_Availability_Used_Flag,
dsur.[SGU Availability] as SGU_Availability,
dsur.[More than One SGU at Address Flag] as More_than_One_SGU_at_Address_Flag,
sgu.MORE_THAN_ONE_SGU_SAME_ADDRESS,
--sgu.HAS_FAILED_PREVIOUSLY,
sgu.INSTALLATION_TYPE,
sgu.RECREATION_EXPLANATION_NOTE,
sgu.ADDITIONAL_CAPACITY_DETAILS,
sgu.VERSION,
sgu.ADDITIONAL_SYSTEM_INFORMATION,
--sgu.PREVIOUS_RECS_MULTIPLIER_FLAG,
dsur.[SGU Rebate Approved Flag] as SGU_Rebate_Approved_Flag,
fsur.[SGU Out of Pocket Expense] as SGU_Out_of_Pocket_Expense,
dsur.[SWH Brand] as SWH_Brand,
dsur.[SWH Model] as SWH_Model,
dsur.[SWH Installation Type] as SWH_Installation_Type,
dsur.[SWH Technology Type] as SWH_Technology_Type,
dsur.[SWH Number of Panels] as SWH_Number_of_Panels,
dsur.[SWH Capacity over 700L Flag] as SWH_Capacity_over_700L_Flag,
dsur.[Stat Declaration for SWH Capacity Supplied Flag] as Stat_Declaration_for_SWH_Capacity_Supplied_Flag,
dsur.[SWH Second Hand Flag] as SWH_Second_Hand_Flag,
dsur.[More than One SWH at Address Flag] as More_than_One_SWH_at_Address_Flag,
dsur.[RETAILER NAME] as RETAILER_NAME,
dsur.[RETAILER ABN] as RETAILER_ABN
--dsur.[NATIONAL METERING NUMBER] as NATIONAL_METERING_NUMBER,
--dsur.[BATTERY MANUFACTURER] as BATTERY_MANUFACTURER,
--dsur.[BATTERY MODEL] as BATTERY_MODEL,
--dsur.[BATTERY PART OF AGG CONTROL] as BATTERY_PART_OF_AGG_CONTROL,
--dsur.[BATTERY SETTINGS CHANGED] as BATTERY_SETTINGS_CHANGED,
--dsur.[SGU ELECTRICITY GRID CONNECTIVITY] as SGU_ELECTRICITY_GRID_CONNECTIVITY
FROM [RECREG_PROD].CERREGISTRY.SGU sgu
INNER JOIN CERREGISTRY.CERTIFICATE_REGISTRATION cert_reg
ON sgu.ACCREDITATION_CODE = cert_reg.ACCREDITATION_CODE
AND sgu.INSTALLATION_DATE >= convert(date,'",SQLDateStart,"',112)
AND sgu.INSTALLATION_DATE <= convert(date,'",SQLDateEnd,"',112)
--Installation Address
LEFT JOIN CERREGISTRY.ADDRESS sgu_addr
ON sgu.INSTALLATION_ADDRESS_ID = sgu_addr.ID
--Installer
LEFT JOIN CERREGISTRY.SGU_TECHNICAL_PERSON installer
ON installer.ID = sgu.INSTALLER_ID
LEFT JOIN CERREGISTRY.ADDRESS installer_address
ON installer_address.ID = installer.ADDRESS_ID
--Designer
LEFT JOIN CERREGISTRY.SGU_TECHNICAL_PERSON designer
ON designer.ID = sgu.DESIGNER_ID
LEFT JOIN CERREGISTRY.ADDRESS designer_address
ON designer_address.ID = designer.ADDRESS_ID
--Electrician
LEFT JOIN CERREGISTRY.SGU_TECHNICAL_PERSON electrician
ON electrician.ID = sgu.ELECTRICIAN_ID
LEFT JOIN CERREGISTRY.ADDRESS electrician_address
ON electrician_address.ID = electrician.ADDRESS_ID
--Agent
LEFT JOIN CERREGISTRY.ACCOUNT acct
ON cert_reg.REGISTERED_PERSON_ACCOUNT_ID = acct.ID
--registered person - vacct
inner join [RECREG_PROD].RETDim.vAccount vacct
on acct.ID = vacct.ACC_ID
--Owner
inner join [RECREG_PROD].RETDim.vw_Small_Unit_Owner owner
on cert_reg.OWNER_ID = owner.[Small Unit Owner ID]
--Small Unit Registrations - dsur
inner join [RECREG_PROD].RETDim.SmallUnitRegistrations dsur
on sgu.ACCREDITATION_CODE = dsur.[Small Unit Accreditation Code]
--Fact Small Unit Registrations - fsur
inner join [RECREG_PROD].RETFact.SmallUnitRegistration fsur
on fsur.[Dim_Small_Unit_Registration_ID] = dsur.[Dim_Small_Unit_Registration_ID]
--Validation Audit Status - vas
inner join [RECREG_PROD].RETDim.Dim_Validation_Audit_Status vas
on vas.[Dim_Validation_Audit_Status_ID] = fsur.[Dim_Validation_Audit_Status_ID]
")
start_time <- Sys.time()
RecReg_data_YTD <- dbGetQuery(conRECREG, RecReg_Raw_SQL) %>% unique() #all columns in the right order; however there is an issue with line breaks
RecReg_data_test2210 <- dbGetQuery(conRECREG, RecReg_Raw_SQL) %>% unique()
## Cleaning
## ---------------------------------------------------
#Found issues in ADDITONAL_SYSTEM_INFORMATION (likely a free text field) that
# contains line breaks hence messing up the output CSV file
# isolating the issue
RecReg_data_YTD %>% filter(Small_Unit_Accreditation_Code == "PVD4268573") %>%
select(ADDITIONAL_SYSTEM_INFORMATION) %>%
# substituting the \n pattern with a space
gsub("[\r\n]", " ", .)
# To clean
RecReg_data_YTD$ADDITIONAL_SYSTEM_INFORMATION <- RecReg_data_YTD$ADDITIONAL_SYSTEM_INFORMATION %>% gsub("[\r\n]", " ", .)
sum(grepl('[\n]', RecReg_data_YTD$Small_Unit_Installation_Additional_Address_Information)) # works; sums all occurences
# apply(RecReg_data_YTD, 2, sum(grepl('[\n]', RecReg_data_YTD$Small_Unit_Installation_Additional_Address_Information)))
#
clean_data <- as.data.frame(apply(RecReg_data_YTD, 2, function(x) gsub("[\r\n]", " ", x)))
str(clean_data)
write_csv2(clean_data, "RecReg_data_FYTD_clean.csv")
# str_replace_all(x, "[\r\n]" , " ") - also works
# grepl('[^[:punct:]]', val)
##############
## Analysis ##
##############
# After all the exciting analysis then remember to record the
# Execution time of main:
end_time <- Sys.time()
run_time <- end_time - start_time
run_time
#######################
#### Write outputs ####
#######################
# write_rds(SRES_Raw2, "top100_rows.rds")
#
# write_rds(RecReg_data, "RR_2021.rds")
# Other text files/tables
## ------------------------------------------------------
write_csv2(RecReg_data_YTD, "RecReg_data_FYTD_check.csv")
## Log file
## ------------------------------------------------------
log_df <- cbind(run_time, date()) %>% as.data.frame()
write.table(log_df, "log_file.csv", sep = ",", col.names = !file.exists("log_file.csv"), append = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/F022.run.imput.R
\name{run.impute}
\alias{run.impute}
\title{Run MAGIC on Main Data.}
\usage{
run.impute(x = NULL, genes = "all_genes", k = 10, alpha = 15,
t = "auto", npca = 100, init = NULL, t.max = 20,
knn.dist.method = "euclidean", verbose = 1, n.jobs = 1,
seed = NULL)
}
\arguments{
\item{x}{An object of class iCellR.}
\item{genes}{character or integer vector, default: NULL vector of column names or column indices for which to return smoothed data If 'all_genes' or NULL, the entire smoothed matrix is returned}
\item{k}{int, optional, default: 10 number of nearest neighbors on which to build kernel}
\item{alpha}{int, optional, default: 15 sets decay rate of kernel tails. If NULL, alpha decaying kernel is not used}
\item{t}{int, optional, default: 'auto' power to which the diffusion operator is powered sets the level of diffusion. If 'auto', t is selected according to the Procrustes disparity of the diffused data.'}
\item{npca}{number of PCA components that should be used; default: 100.}
\item{init}{magic object, optional object to use for initialization. Avoids recomputing intermediate steps if parameters are the same.}
\item{t.max}{int, optional, default: 20 Maximum value of t to test for automatic t selection.}
\item{knn.dist.method}{string, optional, default: 'euclidean'. recommended values: 'euclidean', 'cosine' Any metric from 'scipy.spatial.distance' can be used distance metric for building kNN graph.}
\item{verbose}{'int' or 'boolean', optional (default : 1) If 'TRUE' or '> 0', print verbose updates.}
\item{n.jobs}{'int', optional (default: 1) The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n.cpus + 1 + n.jobs) are used. Thus for n_jobs = -2, all CPUs but one are used}
\item{seed}{int or 'NULL', random state (default: 'NULL')}
}
\value{
An object of class iCellR.
}
\description{
This function takes an object of class iCellR and runs MAGIC on main data. Markov Affinity-based Graph Imputation of Cells (MAGIC) is an algorithm for denoising and transcript recover of single cells applied to single-cell RNA sequencing data, as described in van Dijk et al, 2018.
}
\examples{
\dontrun{
my.obj <- run.impute(my.obj)
}
}
| /man/run.impute.Rd | no_license | weiliuyuan/iCellR | R | false | true | 2,388 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/F022.run.imput.R
\name{run.impute}
\alias{run.impute}
\title{Run MAGIC on Main Data.}
\usage{
run.impute(x = NULL, genes = "all_genes", k = 10, alpha = 15,
t = "auto", npca = 100, init = NULL, t.max = 20,
knn.dist.method = "euclidean", verbose = 1, n.jobs = 1,
seed = NULL)
}
\arguments{
\item{x}{An object of class iCellR.}
\item{genes}{character or integer vector, default: NULL vector of column names or column indices for which to return smoothed data If 'all_genes' or NULL, the entire smoothed matrix is returned}
\item{k}{int, optional, default: 10 number of nearest neighbors on which to build kernel}
\item{alpha}{int, optional, default: 15 sets decay rate of kernel tails. If NULL, alpha decaying kernel is not used}
\item{t}{int, optional, default: 'auto' power to which the diffusion operator is powered sets the level of diffusion. If 'auto', t is selected according to the Procrustes disparity of the diffused data.'}
\item{npca}{number of PCA components that should be used; default: 100.}
\item{init}{magic object, optional object to use for initialization. Avoids recomputing intermediate steps if parameters are the same.}
\item{t.max}{int, optional, default: 20 Maximum value of t to test for automatic t selection.}
\item{knn.dist.method}{string, optional, default: 'euclidean'. recommended values: 'euclidean', 'cosine' Any metric from 'scipy.spatial.distance' can be used distance metric for building kNN graph.}
\item{verbose}{'int' or 'boolean', optional (default : 1) If 'TRUE' or '> 0', print verbose updates.}
\item{n.jobs}{'int', optional (default: 1) The number of jobs to use for the computation. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n.cpus + 1 + n.jobs) are used. Thus for n_jobs = -2, all CPUs but one are used}
\item{seed}{int or 'NULL', random state (default: 'NULL')}
}
\value{
An object of class iCellR.
}
\description{
This function takes an object of class iCellR and runs MAGIC on main data. Markov Affinity-based Graph Imputation of Cells (MAGIC) is an algorithm for denoising and transcript recover of single cells applied to single-cell RNA sequencing data, as described in van Dijk et al, 2018.
}
\examples{
\dontrun{
my.obj <- run.impute(my.obj)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggspatial-package.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{ggplot}
\alias{aes}
\alias{coord_sf}
\alias{geom_sf}
\alias{waiver}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{ggplot2}{\code{\link[ggplot2]{aes}}, \code{\link[ggplot2:ggsf]{coord_sf}}, \code{\link[ggplot2:ggsf]{geom_sf}}, \code{\link[ggplot2]{ggplot}}, \code{\link[ggplot2]{waiver}}}
}}
| /man/reexports.Rd | no_license | paleolimbot/ggspatial | R | false | true | 600 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggspatial-package.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{ggplot}
\alias{aes}
\alias{coord_sf}
\alias{geom_sf}
\alias{waiver}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{ggplot2}{\code{\link[ggplot2]{aes}}, \code{\link[ggplot2:ggsf]{coord_sf}}, \code{\link[ggplot2:ggsf]{geom_sf}}, \code{\link[ggplot2]{ggplot}}, \code{\link[ggplot2]{waiver}}}
}}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{dmdims}
\alias{dmdims}
\title{Fetch dimensions of a DataMarket dataset.}
\usage{
dmdims(ds, .params = list())
}
\arguments{
\item{ds}{a dataset ID, DS string, URL query-string, or whole URL. The DS
string to send is extracted from the URL as needed, and short URLs
at data.is, bit.ly, is.gd, t.co and url.is are expanded.
If the DS string contains dimension filter specifications (the
stuff after the ! character, so it's not just a dataset ID), these
are preserved in the request to the API, but for normal DataMarket
datasets they do not affect the response.}
\item{.params}{extra GET parameters to pass along in the API request.}
}
\value{
named list of dataset dimension information. Each name is a dataset
ID and each element is a named list of dimensions of that dataset.
Each dimension is named for its dimension ID in that list, and is
itself a named list of the four properties \code{id, title, type,
values}. The first three of these properties are character strings,
while \code{values} is a named list of dimension values. Each of
these is a list of two properties \code{id, title}, and the \code{id}
is also the name of the dimension value
}
\description{
Get a list of dataset dimension objects for the given dataset.
}
\examples{
dmdims("17tm")
dmdims("17tm!kqc=a")
dmdims("ds=17tm")
dmdims("ds=17tm!kqc=a")
dmdims("foo=bar&ds=17tm&baz=xyzzy")
dmdims("http://datamarket.com/api/v1/series.json?foo=bar&ds=17tm&baz=xyzzy")
dmdims("http://datamarket.com/data/set/17tm/#ds=17tm")
}
| /man/dmdims.Rd | no_license | cran/rdatamarket | R | false | false | 1,610 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{dmdims}
\alias{dmdims}
\title{Fetch dimensions of a DataMarket dataset.}
\usage{
dmdims(ds, .params = list())
}
\arguments{
\item{ds}{a dataset ID, DS string, URL query-string, or whole URL. The DS
string to send is extracted from the URL as needed, and short URLs
at data.is, bit.ly, is.gd, t.co and url.is are expanded.
If the DS string contains dimension filter specifications (the
stuff after the ! character, so it's not just a dataset ID), these
are preserved in the request to the API, but for normal DataMarket
datasets they do not affect the response.}
\item{.params}{extra GET parameters to pass along in the API request.}
}
\value{
named list of dataset dimension information. Each name is a dataset
ID and each element is a named list of dimensions of that dataset.
Each dimension is named for its dimension ID in that list, and is
itself a named list of the four properties \code{id, title, type,
values}. The first three of these properties are character strings,
while \code{values} is a named list of dimension values. Each of
these is a list of two properties \code{id, title}, and the \code{id}
is also the name of the dimension value
}
\description{
Get a list of dataset dimension objects for the given dataset.
}
\examples{
dmdims("17tm")
dmdims("17tm!kqc=a")
dmdims("ds=17tm")
dmdims("ds=17tm!kqc=a")
dmdims("foo=bar&ds=17tm&baz=xyzzy")
dmdims("http://datamarket.com/api/v1/series.json?foo=bar&ds=17tm&baz=xyzzy")
dmdims("http://datamarket.com/data/set/17tm/#ds=17tm")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setup.R
\name{create.new.issues}
\alias{create.new.issues}
\title{Automatically create issues on the repo}
\usage{
create.new.issues(repo.name, issue.json = "inst/extdata/issuetemplates.json",
org = "USGS-R", ctx = get.github.context())
}
\arguments{
\item{org}{string, GitHub organization to create repository. Defaults to "USGS-R"}
\item{ctx}{GitHub context for authentication, see \link[grithub]{get.github.context}}
\item{repo_name}{string, name for the new repository}
\item{issue_json}{file path indicating the JSON file to be used to define what
issues to create. Defaults to the `issuetemplates.json` file in this package.}
}
\description{
This function should be run by instructors to setup the issues
that will be created for students in each class. The idea is that they fix and
close out the issues in each instance of the course, and we reset the code to
have errors before the next course. We also need to reinstate the issues
associated with the errors. This function should automate that.
}
| /man/create.new.issues.Rd | permissive | wdwatkins/trainR | R | false | true | 1,092 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setup.R
\name{create.new.issues}
\alias{create.new.issues}
\title{Automatically create issues on the repo}
\usage{
create.new.issues(repo.name, issue.json = "inst/extdata/issuetemplates.json",
org = "USGS-R", ctx = get.github.context())
}
\arguments{
\item{org}{string, GitHub organization to create repository. Defaults to "USGS-R"}
\item{ctx}{GitHub context for authentication, see \link[grithub]{get.github.context}}
\item{repo_name}{string, name for the new repository}
\item{issue_json}{file path indicating the JSON file to be used to define what
issues to create. Defaults to the `issuetemplates.json` file in this package.}
}
\description{
This function should be run by instructors to setup the issues
that will be created for students in each class. The idea is that they fix and
close out the issues in each instance of the course, and we reset the code to
have errors before the next course. We also need to reinstate the issues
associated with the errors. This function should automate that.
}
|
\name{RPA}
\alias{RPA}
\docType{data}
\title{
Recombinase polymerase amplification (RPA) by Lutz et al.(2009)
}
\description{
Real-time amplification plot of an Recombinase Polymerase Amplification (RPA)
by Lutz et al.(2009) in a centrifugal microfluidic foil cartridge.
}
\usage{data("RPA")}
\format{
A data frame with 184 observations on the following 2 variables.
\describe{
\item{\code{Reaction.Time}}{Reaction Time [min]}
\item{\code{RFU}}{Relative fluorescence units [RFU]}
}
}
\details{
The data were digitized as described by Poisot (2011). The image for data
extraction was taken from Figure 3b by Lutz et al.(2009). The amplification
curve present the results of a cross-contamination test of a foil disk for a
sample containing 2000 copies of the mecA gene. For further experimental and
technical details refer to Lutz et al.(2009).
}
\source{
The original data (open access under CC BY-NC-ND license) were taken from Figure
3b by Lutz et al.(2009).
}
\references{
Poisot, T. (2011). The digitize Package: Extracting Numerical Data from
Scatterplots. \emph{The R Journal} 3, 25--26.
Lutz, S., Weber, P., Focke, M., Faltin, B., Roth, G., Piepenburg, O., Armes,
N., Mark, D., Zengerle, R., and von Stetten, F. (2009). Isothermal Polymerase
Amplification in a Centrifugal Microfluidic Foil Cartridge. \emph{Procedia
Chemistry} 1, 529--531.}
\examples{
data(RPA)
plot(RPA, main = "RPA by Lutz et al.(2009)", xlab = "Reaction Time [min]",
ylab = "Relative fluorescence units [RFU]", type = "l")
abline(h = 5, lty = 2, col = "grey")
}
\keyword{datasets}
\keyword{RPA}
\keyword{isothermal}
| /man/RPA.Rd | no_license | PCRuniversum/chipPCR | R | false | false | 1,629 | rd | \name{RPA}
\alias{RPA}
\docType{data}
\title{
Recombinase polymerase amplification (RPA) by Lutz et al.(2009)
}
\description{
Real-time amplification plot of an Recombinase Polymerase Amplification (RPA)
by Lutz et al.(2009) in a centrifugal microfluidic foil cartridge.
}
\usage{data("RPA")}
\format{
A data frame with 184 observations on the following 2 variables.
\describe{
\item{\code{Reaction.Time}}{Reaction Time [min]}
\item{\code{RFU}}{Relative fluorescence units [RFU]}
}
}
\details{
The data were digitized as described by Poisot (2011). The image for data
extraction was taken from Figure 3b by Lutz et al.(2009). The amplification
curve present the results of a cross-contamination test of a foil disk for a
sample containing 2000 copies of the mecA gene. For further experimental and
technical details refer to Lutz et al.(2009).
}
\source{
The original data (open access under CC BY-NC-ND license) were taken from Figure
3b by Lutz et al.(2009).
}
\references{
Poisot, T. (2011). The digitize Package: Extracting Numerical Data from
Scatterplots. \emph{The R Journal} 3, 25--26.
Lutz, S., Weber, P., Focke, M., Faltin, B., Roth, G., Piepenburg, O., Armes,
N., Mark, D., Zengerle, R., and von Stetten, F. (2009). Isothermal Polymerase
Amplification in a Centrifugal Microfluidic Foil Cartridge. \emph{Procedia
Chemistry} 1, 529--531.}
\examples{
data(RPA)
plot(RPA, main = "RPA by Lutz et al.(2009)", xlab = "Reaction Time [min]",
ylab = "Relative fluorescence units [RFU]", type = "l")
abline(h = 5, lty = 2, col = "grey")
}
\keyword{datasets}
\keyword{RPA}
\keyword{isothermal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef}
\alias{GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef}
\title{GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef Object}
\usage{
GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef(
displayName = NULL,
id = NULL
)
}
\arguments{
\item{displayName}{Display name of the AnnotationSpec}
\item{id}{ID of the AnnotationSpec}
}
\value{
GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef object
}
\description{
GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\concept{GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef functions}
| /googleaiplatformv1.auto/man/GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef.Rd | no_license | justinjm/autoGoogleAPI | R | false | true | 1,025 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef}
\alias{GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef}
\title{GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef Object}
\usage{
GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef(
displayName = NULL,
id = NULL
)
}
\arguments{
\item{displayName}{Display name of the AnnotationSpec}
\item{id}{ID of the AnnotationSpec}
}
\value{
GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef object
}
\description{
GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\concept{GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef functions}
|
library(raster)
# Load the image and crop it to hanover
r = raster("images/BlackMarble_2012_C1_geo.tif")
r.hanover = crop(r,extent(c(9,10,52,53)))
r2 = raster("images/BlackMarble_2016_C1_geo.tif")
r2.hanover = crop(r2,extent(c(9,10,52,53)))
pdf("plots.pdf") # remove this in an interactive session
plot(r.hanover, main="Hanover at night in 2012")
points(9.71730046412,52.375993496,pch=23,bg="red",col="red",cex=2)
plot(r2.hanover, main="Hanover at night in 2016")
points(9.71730046412,52.375993496,pch=23,bg="red",col="red",cex=2)
plot(r.hanover - r2.hanover, main="Difference of Hanover at night")
points(9.71730046412,52.375993496,pch=23,bg="red",col="red",cex=2)
dev.off();
| /01_night_light_images/solution.R | no_license | giserh/big_geospatial_data_lecture | R | false | false | 687 | r | library(raster)
# Load the image and crop it to hanover
r = raster("images/BlackMarble_2012_C1_geo.tif")
r.hanover = crop(r,extent(c(9,10,52,53)))
r2 = raster("images/BlackMarble_2016_C1_geo.tif")
r2.hanover = crop(r2,extent(c(9,10,52,53)))
pdf("plots.pdf") # remove this in an interactive session
plot(r.hanover, main="Hanover at night in 2012")
points(9.71730046412,52.375993496,pch=23,bg="red",col="red",cex=2)
plot(r2.hanover, main="Hanover at night in 2016")
points(9.71730046412,52.375993496,pch=23,bg="red",col="red",cex=2)
plot(r.hanover - r2.hanover, main="Difference of Hanover at night")
points(9.71730046412,52.375993496,pch=23,bg="red",col="red",cex=2)
dev.off();
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfl_players.R
\name{mfl_players}
\alias{mfl_players}
\title{MFL players library}
\usage{
mfl_players(conn = NULL)
}
\arguments{
\item{conn}{optionally, pass in a conn object generated by ff_connect to receive league-specific custom players}
}
\value{
a dataframe containing all ~2000+ players in the MFL database
}
\description{
A cached table of MFL players. Will store in memory for each session!
(via memoise in zzz.R)
}
\examples{
\donttest{
try({ # try only shown here because sometimes CRAN checks are weird
player_list <- mfl_players()
dplyr::sample_n(player_list, 5)
}) # end try
}
}
| /man/mfl_players.Rd | permissive | mgirlich/ffscrapr | R | false | true | 671 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfl_players.R
\name{mfl_players}
\alias{mfl_players}
\title{MFL players library}
\usage{
mfl_players(conn = NULL)
}
\arguments{
\item{conn}{optionally, pass in a conn object generated by ff_connect to receive league-specific custom players}
}
\value{
a dataframe containing all ~2000+ players in the MFL database
}
\description{
A cached table of MFL players. Will store in memory for each session!
(via memoise in zzz.R)
}
\examples{
\donttest{
try({ # try only shown here because sometimes CRAN checks are weird
player_list <- mfl_players()
dplyr::sample_n(player_list, 5)
}) # end try
}
}
|
#Assigning value to a variable to store in m/m
# 3 ways to do this
x=5
x<-5
5->x
#Creating vectors of data
Name<-c("elon","abraham","jon","sena")
marks<-c(80,92,77,76)
passing_status<-c(TRUE,TRUE,FALSE,FALSE)
#Accessing the 3rd element in marks vector
marks[3]
#Accessing elements in marks vector ranging 2 to 4
marks[2:4]
#Accessing all elements in name vector except 2nd
z=Name[-2]
z
#Creating matrix
M=matrix(c(2,4,6,8,1,2,3,4,5),nrow=3,ncol=3)
M
# YOU CAN ALSO ADD,SUB,MULTIPLY MATRIX IN SAME WAY
#Taking one more matrix
N=matrix(c(3,3,4,6,7,8,2,3,1),nrow=3,ncol=3)
N
#Adding 2 matrix
add=M+N
add
#sub(-),multiplication(*) works in the same way.
#Working on in-built dataset--AirPassengers
AirPassengers
#To check the description, usage, format, & source of this dataset(Airpassangers)
?AirPassengers
#For sake of ease let's assign this dataset to a variable 'n'.
n=AirPassengers
n
#To check the summary of this dataset
# like min.,1st qu., median, mean, 3rd qu.,max.
summary(n)
#To plot the dataset
plot(n)
#To plot dataset in histogram
plot(n,type='h')
#"p" for points
#"l" for lines
#"b" for both
#"c" for the lines part alone of "b"
#"o" for both 'overplotted'
#"s" for stair steps, for more check "base package's plot section"
#print dataset
print(n)
#check dataset type
typeof(n)
#maximum value
max(n)
#minimum value
min(n)
#starting point of dataset
start(n)
#ending point
end(n)
# for first 16 values of dataset
# here n is dataset & n=16 is range, you can also take any other variable name
# other than n of dataset.
head(n, n=16)
#for last 10 values of dataset
tail(n, n=10)
#structure of dataset
str(n)
#to check the type of dataset
typeof(n)
#to check the frequency or total no. of observations
frequency(n)
#to check the class of dataset for example time-series, matrix, array....
class(n)
#to view the dataset
View(n)
#position of maximum value
which.max(n)
#position of minimum value
which.min(n)
#length or total no. of elements in a dataset
length(n)
#cycle of dataset for example in airpassengers dataset jan is showing
#from 1949 to 1960, means 1 will be shown in jan 1949, in jan 1950..
#till jan 1960. same the case with feb to dec.
cycle(n)
# CHECK THE HELP SECTION FOR EXACT DESCRIPTION.
| /Getting_started_with_R (Basics).R | no_license | Tanya00001/Getting_started_with_R | R | false | false | 2,385 | r | #Assigning value to a variable to store in m/m
# 3 ways to do this
x=5
x<-5
5->x
#Creating vectors of data
Name<-c("elon","abraham","jon","sena")
marks<-c(80,92,77,76)
passing_status<-c(TRUE,TRUE,FALSE,FALSE)
#Accessing the 3rd element in marks vector
marks[3]
#Accessing elements in marks vector ranging 2 to 4
marks[2:4]
#Accessing all elements in name vector except 2nd
z=Name[-2]
z
#Creating matrix
M=matrix(c(2,4,6,8,1,2,3,4,5),nrow=3,ncol=3)
M
# YOU CAN ALSO ADD,SUB,MULTIPLY MATRIX IN SAME WAY
#Taking one more matrix
N=matrix(c(3,3,4,6,7,8,2,3,1),nrow=3,ncol=3)
N
#Adding 2 matrix
add=M+N
add
#sub(-),multiplication(*) works in the same way.
#Working on in-built dataset--AirPassengers
AirPassengers
#To check the description, usage, format, & source of this dataset(Airpassangers)
?AirPassengers
#For sake of ease let's assign this dataset to a variable 'n'.
n=AirPassengers
n
#To check the summary of this dataset
# like min.,1st qu., median, mean, 3rd qu.,max.
summary(n)
#To plot the dataset
plot(n)
#To plot dataset in histogram
plot(n,type='h')
#"p" for points
#"l" for lines
#"b" for both
#"c" for the lines part alone of "b"
#"o" for both 'overplotted'
#"s" for stair steps, for more check "base package's plot section"
#print dataset
print(n)
#check dataset type
typeof(n)
#maximum value
max(n)
#minimum value
min(n)
#starting point of dataset
start(n)
#ending point
end(n)
# for first 16 values of dataset
# here n is dataset & n=16 is range, you can also take any other variable name
# other than n of dataset.
head(n, n=16)
#for last 10 values of dataset
tail(n, n=10)
#structure of dataset
str(n)
#to check the type of dataset
typeof(n)
#to check the frequency or total no. of observations
frequency(n)
#to check the class of dataset for example time-series, matrix, array....
class(n)
#to view the dataset
View(n)
#position of maximum value
which.max(n)
#position of minimum value
which.min(n)
#length or total no. of elements in a dataset
length(n)
#cycle of dataset for example in airpassengers dataset jan is showing
#from 1949 to 1960, means 1 will be shown in jan 1949, in jan 1950..
#till jan 1960. same the case with feb to dec.
cycle(n)
# CHECK THE HELP SECTION FOR EXACT DESCRIPTION.
|
testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.72430108578345e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_omitMatrix/AFL_cpp_omitMatrix/cpp_omitMatrix_valgrind_files/1615846060-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,091 | r | testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.72430108578345e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) |
#' @title Baruník and Křehlík (2018) frequency connectedness approach
#' @description This function calculates the Baruník and Křehlík (2018) frequency connectedness measures.
#' @param Phi VAR coefficient matrix
#' @param Sigma Residual variance-covariance matrix
#' @param nfore H-step ahead forecast horizon
#' @param partition Frequency spectrum
#' @param generalized Orthorgonalized/generalized FEVD
#' @param scenario ABS or WTH
#' @param corrected Boolean value whether corrected or standard TCI should be computed
#' @param orth Orthorgonalized shocks
#' @return Get connectedness measures
#' @examples
#' \donttest{
#' data("dy2012")
#' partition = c(pi+0.00001, pi/4, 0)
#' fit = VAR(dy2012, configuration=list(nlag=4))
#' dca = FrequencyConnectedness(Phi=fit$B, Sigma=fit$Q, nfore=100, partition=partition)
#' }
#' @import frequencyConnectedness
#' @references
#' Baruník, J., & Křehlík, T. (2018). Measuring the frequency dynamics of financial connectedness and systemic risk. Journal of Financial Econometrics, 16(2), 271-296.
#' @author David Gabauer
#' @export
FrequencyConnectedness = function(Phi, Sigma, nfore=100, partition=c(pi,pi/2,0), generalized=TRUE, orth=FALSE, scenario="ABS", corrected=FALSE) {
if (nfore<=0) {
stop("nfore needs to be a positive integer")
}
if (length(dim(Sigma))<=1) {
stop("Sigma needs to be at least a 2-dimensional matrix")
}
if (length(dim(Phi))<=1) {
stop("Phi needs to be at least a 2-dimensional matrix")
}
NAMES = colnames(Sigma)
if (length(dim(Phi))==2) {
Phi = array(Phi, c(nrow(Phi),ncol(Phi),1))
}
if (length(dim(Sigma))==2) {
Sigma = array(Sigma, c(nrow(Sigma),ncol(Sigma),1))
}
k = dim(Sigma)[1]
t = dim(Sigma)[3]
if (is.null(NAMES)) {
NAMES = 1:k
}
periods = round(pi/partition)
period_names = NULL
for (i in 1:(length(periods)-1)) {
period_names = c(period_names, paste0(periods[i], "-", periods[i+1]))
}
period_names = c("Total",period_names)
date = as.character(dimnames(Sigma)[[3]])
interval = length(period_names)
new_p = frequencyConnectedness::getPartition(partition, nfore)
range = sort(unique(do.call(c, new_p)))
date = as.character(date)
TCI = array(0, c(t,interval), dimnames=list(date, period_names))
PCI = INFLUENCE = CT = NPDC = array(0, c(k, k, t, interval), dimnames=list(NAMES, NAMES, date, period_names))
NET = FROM = TO = array(0, c(t, k, interval), dimnames=list(date, NAMES, period_names))
NPT = array(0, c(t, k, interval), dimnames=list(date, NAMES, period_names))
PCI = INFLUENCE = array(0, c(k, k, t, interval), dimnames=list(NAMES, NAMES, date, period_names))
pb = progress_bar$new(total=t)
for (i in 1:t) {
decomp = FEVD(Phi=Phi[,,i], Sigma=Sigma[,,i], nfore=nfore, generalized=generalized, type="frequency", range=range)$FEVD
for (ij in 1:length(decomp)) {
rownames(decomp[[ij]]) = colnames(decomp[[ij]]) = 1:ncol(Sigma)
}
tables = lapply(new_p, function(j) Reduce('+', decomp[j]))
for (j in 2:interval) {
if (scenario=="ABS") {
dca = ConnectednessTable(tables[[j-1]])
CT[,,i,j] = dca$FEVD
TO[i,,j] = dca$TO
FROM[i,,j] = dca$FROM
NET[i,,j] = dca$NET
NPDC[,,i,j] = dca$NPDC
INFLUENCE[,,i,j] = dca$INFLUENCE
NPT[i,,j] = dca$NPT
if (corrected) {
TCI[i,j] = dca$cTCI
} else {
TCI[i,j] = dca$TCI
}
} else if (scenario=="WTH") {
dca = ConnectednessTable(tables[[j-1]]/sum(sum(tables[[j-1]]))*k)
CT[,,i,j] = dca$FEVD
TO[i,,j] = dca$TO
FROM[i,,j] = dca$FROM
NET[i,,j] = dca$NET
NPDC[,,i,j] = dca$NPDC
INFLUENCE[,,i,j] = dca$INFLUENCE
NPT[i,,j] = dca$NPT
if (corrected) {
TCI[i,j] = dca$cTCI
} else {
TCI[i,j] = dca$TCI
}
}
}
pb$tick()
}
CT[,,,1] = apply(CT,1:3,sum)
TCI[,1] = apply(TCI,1,sum)
TO[,,1] = apply(TO,1:2,sum)
FROM[,,1] = apply(FROM,1:2,sum)
NET[,,1] = apply(NET,1:2,sum)
NPDC[,,,1] = apply(NPDC,1:3,sum)
for (ij in 1:t) {
for (jl in interval:1) {
for (i in 1:k) {
for (j in 1:k) {
PCI[i,j,ij,jl] = 200*(CT[i,j,ij,jl]+CT[j,i,ij,jl])/(CT[i,i,ij,1]+CT[i,j,ij,1]+CT[j,i,ij,1]+CT[j,j,ij,1])
}
}
INFLUENCE[,,ij,jl] = 100*abs(NPDC[,,ij,jl]/t(t(CT[,,ij,1])+CT[,,ij,1]))
}
NPT[ij,,1] = rowSums(NPDC[,,ij,1]<0)
}
TABLE = array(NA,c(k+4,k+1,interval), dimnames=list(c(NAMES, "TO", "Inc.Own", "Net", "NPDC"), c(NAMES, "FROM"), period_names))
for (i in 1:interval) {
TABLE[,,i] = ConnectednessTable(CT[,,,i]/100)$TABLE
}
config = list(partition=partition, nfore=nfore, generalized=generalized, orth=orth, scenario=scenario, corrected=corrected, approach="Frequency")
return = list(TABLE=TABLE, CT=CT/100, TCI=TCI, TO=TO, FROM=FROM,
NET=NET, NPT=NPT, NPDC=NPDC, PCI=PCI, INFLUENCE=INFLUENCE, config=config)
}
| /R/FrequencyConnectedness.R | no_license | GabauerDavid/ConnectednessApproach | R | false | false | 4,969 | r | #' @title Baruník and Křehlík (2018) frequency connectedness approach
#' @description This function calculates the Baruník and Křehlík (2018) frequency connectedness measures.
#' @param Phi VAR coefficient matrix
#' @param Sigma Residual variance-covariance matrix
#' @param nfore H-step ahead forecast horizon
#' @param partition Frequency spectrum
#' @param generalized Orthorgonalized/generalized FEVD
#' @param scenario ABS or WTH
#' @param corrected Boolean value whether corrected or standard TCI should be computed
#' @param orth Orthorgonalized shocks
#' @return Get connectedness measures
#' @examples
#' \donttest{
#' data("dy2012")
#' partition = c(pi+0.00001, pi/4, 0)
#' fit = VAR(dy2012, configuration=list(nlag=4))
#' dca = FrequencyConnectedness(Phi=fit$B, Sigma=fit$Q, nfore=100, partition=partition)
#' }
#' @import frequencyConnectedness
#' @references
#' Baruník, J., & Křehlík, T. (2018). Measuring the frequency dynamics of financial connectedness and systemic risk. Journal of Financial Econometrics, 16(2), 271-296.
#' @author David Gabauer
#' @export
FrequencyConnectedness = function(Phi, Sigma, nfore=100, partition=c(pi,pi/2,0), generalized=TRUE, orth=FALSE, scenario="ABS", corrected=FALSE) {
if (nfore<=0) {
stop("nfore needs to be a positive integer")
}
if (length(dim(Sigma))<=1) {
stop("Sigma needs to be at least a 2-dimensional matrix")
}
if (length(dim(Phi))<=1) {
stop("Phi needs to be at least a 2-dimensional matrix")
}
NAMES = colnames(Sigma)
if (length(dim(Phi))==2) {
Phi = array(Phi, c(nrow(Phi),ncol(Phi),1))
}
if (length(dim(Sigma))==2) {
Sigma = array(Sigma, c(nrow(Sigma),ncol(Sigma),1))
}
k = dim(Sigma)[1]
t = dim(Sigma)[3]
if (is.null(NAMES)) {
NAMES = 1:k
}
periods = round(pi/partition)
period_names = NULL
for (i in 1:(length(periods)-1)) {
period_names = c(period_names, paste0(periods[i], "-", periods[i+1]))
}
period_names = c("Total",period_names)
date = as.character(dimnames(Sigma)[[3]])
interval = length(period_names)
new_p = frequencyConnectedness::getPartition(partition, nfore)
range = sort(unique(do.call(c, new_p)))
date = as.character(date)
TCI = array(0, c(t,interval), dimnames=list(date, period_names))
PCI = INFLUENCE = CT = NPDC = array(0, c(k, k, t, interval), dimnames=list(NAMES, NAMES, date, period_names))
NET = FROM = TO = array(0, c(t, k, interval), dimnames=list(date, NAMES, period_names))
NPT = array(0, c(t, k, interval), dimnames=list(date, NAMES, period_names))
PCI = INFLUENCE = array(0, c(k, k, t, interval), dimnames=list(NAMES, NAMES, date, period_names))
pb = progress_bar$new(total=t)
for (i in 1:t) {
decomp = FEVD(Phi=Phi[,,i], Sigma=Sigma[,,i], nfore=nfore, generalized=generalized, type="frequency", range=range)$FEVD
for (ij in 1:length(decomp)) {
rownames(decomp[[ij]]) = colnames(decomp[[ij]]) = 1:ncol(Sigma)
}
tables = lapply(new_p, function(j) Reduce('+', decomp[j]))
for (j in 2:interval) {
if (scenario=="ABS") {
dca = ConnectednessTable(tables[[j-1]])
CT[,,i,j] = dca$FEVD
TO[i,,j] = dca$TO
FROM[i,,j] = dca$FROM
NET[i,,j] = dca$NET
NPDC[,,i,j] = dca$NPDC
INFLUENCE[,,i,j] = dca$INFLUENCE
NPT[i,,j] = dca$NPT
if (corrected) {
TCI[i,j] = dca$cTCI
} else {
TCI[i,j] = dca$TCI
}
} else if (scenario=="WTH") {
dca = ConnectednessTable(tables[[j-1]]/sum(sum(tables[[j-1]]))*k)
CT[,,i,j] = dca$FEVD
TO[i,,j] = dca$TO
FROM[i,,j] = dca$FROM
NET[i,,j] = dca$NET
NPDC[,,i,j] = dca$NPDC
INFLUENCE[,,i,j] = dca$INFLUENCE
NPT[i,,j] = dca$NPT
if (corrected) {
TCI[i,j] = dca$cTCI
} else {
TCI[i,j] = dca$TCI
}
}
}
pb$tick()
}
CT[,,,1] = apply(CT,1:3,sum)
TCI[,1] = apply(TCI,1,sum)
TO[,,1] = apply(TO,1:2,sum)
FROM[,,1] = apply(FROM,1:2,sum)
NET[,,1] = apply(NET,1:2,sum)
NPDC[,,,1] = apply(NPDC,1:3,sum)
for (ij in 1:t) {
for (jl in interval:1) {
for (i in 1:k) {
for (j in 1:k) {
PCI[i,j,ij,jl] = 200*(CT[i,j,ij,jl]+CT[j,i,ij,jl])/(CT[i,i,ij,1]+CT[i,j,ij,1]+CT[j,i,ij,1]+CT[j,j,ij,1])
}
}
INFLUENCE[,,ij,jl] = 100*abs(NPDC[,,ij,jl]/t(t(CT[,,ij,1])+CT[,,ij,1]))
}
NPT[ij,,1] = rowSums(NPDC[,,ij,1]<0)
}
TABLE = array(NA,c(k+4,k+1,interval), dimnames=list(c(NAMES, "TO", "Inc.Own", "Net", "NPDC"), c(NAMES, "FROM"), period_names))
for (i in 1:interval) {
TABLE[,,i] = ConnectednessTable(CT[,,,i]/100)$TABLE
}
config = list(partition=partition, nfore=nfore, generalized=generalized, orth=orth, scenario=scenario, corrected=corrected, approach="Frequency")
return = list(TABLE=TABLE, CT=CT/100, TCI=TCI, TO=TO, FROM=FROM,
NET=NET, NPT=NPT, NPDC=NPDC, PCI=PCI, INFLUENCE=INFLUENCE, config=config)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partial_likelihood.R
\name{timevaryingPL}
\alias{timevaryingPL}
\title{timevaryingPL function}
\usage{
timevaryingPL(formula, t0, t, delta, dist, data, ties = "Efron",
optimcontrol = NULL)
}
\arguments{
\item{formula}{a formula of the form 'S ~ coef1 + coef2' etc the object S will be created}
\item{t0}{X}
\item{t}{X}
\item{delta}{censoring indicator a vector of 1 for an event and 0 for censoring}
\item{dist}{X}
\item{data}{X}
\item{ties}{X default is Efron}
\item{optimcontrol}{X}
}
\value{
...
}
\description{
A function to
}
| /man/timevaryingPL.Rd | no_license | bentaylor1/spatsurv | R | false | true | 619 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partial_likelihood.R
\name{timevaryingPL}
\alias{timevaryingPL}
\title{timevaryingPL function}
\usage{
timevaryingPL(formula, t0, t, delta, dist, data, ties = "Efron",
optimcontrol = NULL)
}
\arguments{
\item{formula}{a formula of the form 'S ~ coef1 + coef2' etc the object S will be created}
\item{t0}{X}
\item{t}{X}
\item{delta}{censoring indicator a vector of 1 for an event and 0 for censoring}
\item{dist}{X}
\item{data}{X}
\item{ties}{X default is Efron}
\item{optimcontrol}{X}
}
\value{
...
}
\description{
A function to
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Events.R
\name{Events}
\alias{Events}
\title{Events}
\usage{
Events(events_path, crs)
}
\arguments{
\item{events_path}{Select path of events txt file.}
\item{crs}{Coordinate system used. Strongly recommended the use of EPSG.}
}
\description{
Reads and uploads txt events file.
}
| /man/Events.Rd | permissive | rdornas/siriemashapes | R | false | true | 358 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Events.R
\name{Events}
\alias{Events}
\title{Events}
\usage{
Events(events_path, crs)
}
\arguments{
\item{events_path}{Select path of events txt file.}
\item{crs}{Coordinate system used. Strongly recommended the use of EPSG.}
}
\description{
Reads and uploads txt events file.
}
|
library(shiny)
library(shinydashboard)
library(DT)
library(shinythemes)
ui <- navbarPage(
'GuidePro',
theme = shinytheme("cerulean"),
id = 'panels',
## The introduction page of GuidePro
tabPanel(strong('Introduction',style = "font-size:18px"),
titlePanel(h2("GuidePro: An ensemble predictor for prioritizing sgRNAs in CRISPR/Cas9 protein knockouts")),
br(),
fluidRow(
column(12,
## The left column gives detailed introduction of GuidePro:
column(6,
p(align='justify',style = "font-size:18px",'CRISPR/Cas9 has evolved as the most powerful tool for gene perturbation, and is widely used in protein functional
analysis. Successful knockout of a protein-coding gene relies on the selection of sgRNAs with high efficiency. GuidePro is a two-layer ensemble
predictor that enables the integration of multiple predictive methods and feature sets to predict sgRNA efficiency for the CRISPR/Cas9 protein knockouts'),
p(align='justify',style = "font-size:18px",'As shown in the figure at right, GuidePro integrates three sub-predictors trained with different types of
features that jointly contribute to protein knockouts: i) The first predictor (SA) predicts sgRNA activity by combining the outputs of other sgRNA sequence-based
predictive methods. ii) The second predictor (FP) predicts the frameshift probabilities, leveraging the indel type predictions of three different machine learning
models. iii) The third predictor (AS) predicts the amino acid sensitivity to CRISPR knockouts from annotation of protein features.
Tested on 6 independent datasets from 5 studies, GuidePro demonstrated consistent superior performance in predicting phenotypes caused by protein loss-of-function,
suggesting its robustness in a broad spectrum of experimental settings.'),
p(align='justify',style = "font-size:18px",'You can select highly efficient sgRNAs for any given protein-coding gene',
actionLink("sgRNASelection", strong("here."))),
p(align='justify',style = "font-size:18px",strong('OR'),'download genome-wide prioritized top10 sgRNAs for ',
a(strong('human,'),href="https://figshare.com/articles/Genome-wide-sgRNA-Selection-human_csv_zip/12504167",style = "font-size:18px"),
a(strong('monkey,'),href="https://figshare.com/articles/Genome-wide-sgRNA-Selection-monkey_csv_zip/12504245",style = "font-size:18px"),
a(strong('mouse.'),href="https://figshare.com/articles/Genome-wide-sgRNA-Selection-mouse_csv_zip/12504203",style = "font-size:18px")),
p(align='justify',style = "font-size:18px",'To learn more about our research, please visit'
,a(strong('Xu lab.'),href="https://www.mdanderson.org/research/departments-labs-institutes/labs/xu-laboratory.html"))
),
## The right columns contains a figure of GuidePro workflow
column(6, img(src='Workflow.png', align = "center",width="600",height="425"),
hr(),
p(align='center',style = "font-size:20px",'Copyright (C) 2020 @',img(src='MD_Anderson.png', align = "center",width="200",height="100")))
))
),
## This tab is the main page for sgRNA selection
tabPanel(strong("sgRNA Selection",style = "font-size:18px"),value='Selection',
titlePanel("Select high efficient sgRNAs for CRISPR-Cas9 mediated Protein Knockouts"),
br(),
## User input the genome and gene name
sidebarLayout(
sidebarPanel(
radioButtons("GenomeInput", "Select Genome",
choices = c("Human",'Monkey','Mouse'),
selected = "Human"),
selectInput("GeneInput", "Gene Symbol",
choices = c("EP300", "CREBBP", "CDK9"),
selected = 'CREBBP'),
downloadButton('downloadData', 'Download'),
width = 4,
hr(),
## Text to explain the meaning of output table
p(align = 'justify', strong('Explaination of different columns of output table:')),
p(align = 'justify','1.spacer: 20 nt spacer sequence followed by NGG PAM motif.'),
p(align = 'justify','2.align: The number of perfect match sequences in the genome.'),
p(align = 'justify','3.genomic.loci: The genomic location of target DNA sequence.'),
p(align = 'justify','4.gene: The official symbol of target protein-coding gene.'),
p(align = 'justify','5.SA.score: Combined score for sgRNA activity, higher value means higher activity.'),
p(align = 'justify','6.FS.score: Combined score for frameshift probability, higher value means higher probability.'),
p(align = 'justify','7.AS.score: Combined score for amino acid sensitivity to knockout, higher value means more sensitive.'),
p(align = 'justify','8.GuidePro.score: Combined score of FS, AS and SA, higher value indicates higher knockout efficiency'),
'Source code and data sets are available at:',
a('https://github.com/MDhewei/GuidePro',href="https://github.com/MDhewei/GuidePro")
),
## Render the inquried table
mainPanel(DT::dataTableOutput('results'))
)
),
tabPanel(strong('Contacts',style = "font-size:18px"),
titlePanel('Any feedbacks and reasonable requests are welcomed to contact us'),
br(),
p(align = 'left',style = "font-size:18px",'Wei He (First author)',br(),
'Postdoctoral Fellow',br(),
'The University of Texas MD Anderson Cancer Center',br(),
'Department of Epigenetics and Molecular Carcinogenesis',br(),
'Science Park',br(),
'1808 Park Road 1C',br(),
'Smithville, Texas 78957',br(),
'512-237-6510',br(),
'whe3@mdanderson.org'),
hr(),
p(align = 'left',style = "font-size:18px",'Han Xu (Supervisor)',br(),
'Principal Investigator',br(),
'The University of Texas MD Anderson Cancer Center',br(),
'Department of Epigenetics and Molecular Carcinogenesis',br(),
'Science Park',br(),
'1808 Park Road 1C',br(),
'Smithville, Texas 78957',br(),
'512-237-9474',br(),
'hxu4@mdanderson.org'),
br(),
img(src='MD_Anderson.png', align = "center",width="200",height="100")
),
navbarMenu(strong("Useful links",style = "font-size:18px"),
tabPanel(a('Xu lab web page',href="https://www.mdanderson.org/research/departments-labs-institutes/labs/xu-laboratory.html")),
tabPanel(a('GuidePro source code',href="https://github.com/MDhewei/GuidePro")),
tabPanel(a('SSC web server',href="http://cistrome.org/SSC/")),
tabPanel(a('MoPAC for multi-sample CRISPR screen analysis',href="https://sourceforge.net/projects/mopac/")),
tabPanel(a('ProTiler for tiling CRISPR screen analysis',href="https://github.com/MDhewei/protiler")),
tabPanel(a('CRISPR-DO for genome-wide CRISPR design',href="http://cistrome.org/crispr/"))
)
)
server <- function(input, output, session){
## Link the 'here' in introduction page to sgRNA selection page
observeEvent(input$sgRNASelection, {
newvalue <- "Selection"
updateTabItems(session, "panels", newvalue)
})
## Response to input genome to change list of selectinput gene names
observe({
genome <- input$GenomeInput
gene_tb <- read.csv(paste(genome,'_genes.csv',sep=''))
x <- gene_tb['Gene']
updateSelectInput(session, "GeneInput",
label = 'Gene',
choices = x,
selected = x[1,1]
)
})
## Response to input gene to render output tables
output$results <- DT::renderDataTable({
genome <- input$GenomeInput
gene <- input$GeneInput
f_path <- paste('./',genome,'/',gene,'_sgRNA_selection.csv',sep='')
tb_o <- read.csv(f_path)
return(tb_o)
})
## Response to download button to download the table
output$downloadData <- downloadHandler(
filename = function(){
paste(input$GenomeInput,'/',input$GeneInput,'_sgRNA_selection.csv',sep='')
},
content = function(theFile){
genome <- input$GenomeInput
gene <- input$GeneInput
f_path <- paste('./',genome,'/',gene,'_sgRNA_selection.csv',sep='')
tb <- read.csv(f_path)
write.csv(tb,theFile)
}
)
}
shinyApp(ui = ui, server = server)
| /Shiny_App/app.R | no_license | MDhewei/GuidePro | R | false | false | 9,282 | r | library(shiny)
library(shinydashboard)
library(DT)
library(shinythemes)
ui <- navbarPage(
'GuidePro',
theme = shinytheme("cerulean"),
id = 'panels',
## The introduction page of GuidePro
tabPanel(strong('Introduction',style = "font-size:18px"),
titlePanel(h2("GuidePro: An ensemble predictor for prioritizing sgRNAs in CRISPR/Cas9 protein knockouts")),
br(),
fluidRow(
column(12,
## The left column gives detailed introduction of GuidePro:
column(6,
p(align='justify',style = "font-size:18px",'CRISPR/Cas9 has evolved as the most powerful tool for gene perturbation, and is widely used in protein functional
analysis. Successful knockout of a protein-coding gene relies on the selection of sgRNAs with high efficiency. GuidePro is a two-layer ensemble
predictor that enables the integration of multiple predictive methods and feature sets to predict sgRNA efficiency for the CRISPR/Cas9 protein knockouts'),
p(align='justify',style = "font-size:18px",'As shown in the figure at right, GuidePro integrates three sub-predictors trained with different types of
features that jointly contribute to protein knockouts: i) The first predictor (SA) predicts sgRNA activity by combining the outputs of other sgRNA sequence-based
predictive methods. ii) The second predictor (FP) predicts the frameshift probabilities, leveraging the indel type predictions of three different machine learning
models. iii) The third predictor (AS) predicts the amino acid sensitivity to CRISPR knockouts from annotation of protein features.
Tested on 6 independent datasets from 5 studies, GuidePro demonstrated consistent superior performance in predicting phenotypes caused by protein loss-of-function,
suggesting its robustness in a broad spectrum of experimental settings.'),
p(align='justify',style = "font-size:18px",'You can select highly efficient sgRNAs for any given protein-coding gene',
actionLink("sgRNASelection", strong("here."))),
p(align='justify',style = "font-size:18px",strong('OR'),'download genome-wide prioritized top10 sgRNAs for ',
a(strong('human,'),href="https://figshare.com/articles/Genome-wide-sgRNA-Selection-human_csv_zip/12504167",style = "font-size:18px"),
a(strong('monkey,'),href="https://figshare.com/articles/Genome-wide-sgRNA-Selection-monkey_csv_zip/12504245",style = "font-size:18px"),
a(strong('mouse.'),href="https://figshare.com/articles/Genome-wide-sgRNA-Selection-mouse_csv_zip/12504203",style = "font-size:18px")),
p(align='justify',style = "font-size:18px",'To learn more about our research, please visit'
,a(strong('Xu lab.'),href="https://www.mdanderson.org/research/departments-labs-institutes/labs/xu-laboratory.html"))
),
## The right columns contains a figure of GuidePro workflow
column(6, img(src='Workflow.png', align = "center",width="600",height="425"),
hr(),
p(align='center',style = "font-size:20px",'Copyright (C) 2020 @',img(src='MD_Anderson.png', align = "center",width="200",height="100")))
))
),
## This tab is the main page for sgRNA selection
tabPanel(strong("sgRNA Selection",style = "font-size:18px"),value='Selection',
titlePanel("Select high efficient sgRNAs for CRISPR-Cas9 mediated Protein Knockouts"),
br(),
## User input the genome and gene name
sidebarLayout(
sidebarPanel(
radioButtons("GenomeInput", "Select Genome",
choices = c("Human",'Monkey','Mouse'),
selected = "Human"),
selectInput("GeneInput", "Gene Symbol",
choices = c("EP300", "CREBBP", "CDK9"),
selected = 'CREBBP'),
downloadButton('downloadData', 'Download'),
width = 4,
hr(),
## Text to explain the meaning of output table
p(align = 'justify', strong('Explaination of different columns of output table:')),
p(align = 'justify','1.spacer: 20 nt spacer sequence followed by NGG PAM motif.'),
p(align = 'justify','2.align: The number of perfect match sequences in the genome.'),
p(align = 'justify','3.genomic.loci: The genomic location of target DNA sequence.'),
p(align = 'justify','4.gene: The official symbol of target protein-coding gene.'),
p(align = 'justify','5.SA.score: Combined score for sgRNA activity, higher value means higher activity.'),
p(align = 'justify','6.FS.score: Combined score for frameshift probability, higher value means higher probability.'),
p(align = 'justify','7.AS.score: Combined score for amino acid sensitivity to knockout, higher value means more sensitive.'),
p(align = 'justify','8.GuidePro.score: Combined score of FS, AS and SA, higher value indicates higher knockout efficiency'),
'Source code and data sets are available at:',
a('https://github.com/MDhewei/GuidePro',href="https://github.com/MDhewei/GuidePro")
),
## Render the inquried table
mainPanel(DT::dataTableOutput('results'))
)
),
tabPanel(strong('Contacts',style = "font-size:18px"),
titlePanel('Any feedbacks and reasonable requests are welcomed to contact us'),
br(),
p(align = 'left',style = "font-size:18px",'Wei He (First author)',br(),
'Postdoctoral Fellow',br(),
'The University of Texas MD Anderson Cancer Center',br(),
'Department of Epigenetics and Molecular Carcinogenesis',br(),
'Science Park',br(),
'1808 Park Road 1C',br(),
'Smithville, Texas 78957',br(),
'512-237-6510',br(),
'whe3@mdanderson.org'),
hr(),
p(align = 'left',style = "font-size:18px",'Han Xu (Supervisor)',br(),
'Principal Investigator',br(),
'The University of Texas MD Anderson Cancer Center',br(),
'Department of Epigenetics and Molecular Carcinogenesis',br(),
'Science Park',br(),
'1808 Park Road 1C',br(),
'Smithville, Texas 78957',br(),
'512-237-9474',br(),
'hxu4@mdanderson.org'),
br(),
img(src='MD_Anderson.png', align = "center",width="200",height="100")
),
navbarMenu(strong("Useful links",style = "font-size:18px"),
tabPanel(a('Xu lab web page',href="https://www.mdanderson.org/research/departments-labs-institutes/labs/xu-laboratory.html")),
tabPanel(a('GuidePro source code',href="https://github.com/MDhewei/GuidePro")),
tabPanel(a('SSC web server',href="http://cistrome.org/SSC/")),
tabPanel(a('MoPAC for multi-sample CRISPR screen analysis',href="https://sourceforge.net/projects/mopac/")),
tabPanel(a('ProTiler for tiling CRISPR screen analysis',href="https://github.com/MDhewei/protiler")),
tabPanel(a('CRISPR-DO for genome-wide CRISPR design',href="http://cistrome.org/crispr/"))
)
)
server <- function(input, output, session){
## Link the 'here' in introduction page to sgRNA selection page
observeEvent(input$sgRNASelection, {
newvalue <- "Selection"
updateTabItems(session, "panels", newvalue)
})
## Response to input genome to change list of selectinput gene names
observe({
genome <- input$GenomeInput
gene_tb <- read.csv(paste(genome,'_genes.csv',sep=''))
x <- gene_tb['Gene']
updateSelectInput(session, "GeneInput",
label = 'Gene',
choices = x,
selected = x[1,1]
)
})
## Response to input gene to render output tables
output$results <- DT::renderDataTable({
genome <- input$GenomeInput
gene <- input$GeneInput
f_path <- paste('./',genome,'/',gene,'_sgRNA_selection.csv',sep='')
tb_o <- read.csv(f_path)
return(tb_o)
})
## Response to download button to download the table
output$downloadData <- downloadHandler(
filename = function(){
paste(input$GenomeInput,'/',input$GeneInput,'_sgRNA_selection.csv',sep='')
},
content = function(theFile){
genome <- input$GenomeInput
gene <- input$GeneInput
f_path <- paste('./',genome,'/',gene,'_sgRNA_selection.csv',sep='')
tb <- read.csv(f_path)
write.csv(tb,theFile)
}
)
}
shinyApp(ui = ui, server = server)
|
rm(list = ls())
####### Enviornment setup
library(class) #Has the knn function
####### Data pull
#Path
rootPathVar <- "/Users/ael-annan/Desktop/Storage/MveMveMve/UCLAInstructor/Summer2018-361188-Introduction to Data Science COM SCI X 450.1/Module 1/Module 1a/"
valentineTreats <- read.csv(paste(rootPathVar,"ValentinesTreats.csv", sep=""), header = TRUE)
####### Data Inspection
#Inspect/characterize data
dim(valentineTreats)
summary(valentineTreats)
#Simplify data
columnVars <- c("saltyOrSweet", "x_sodiumLevel_.g.", "y_highFructoseCornSyrup_.g.")
valentineTreatsSubset <- valentineTreats[columnVars]
dim(valentineTreatsSubset)
summary(valentineTreatsSubset)
####### Get training/test data ready
train = head(valentineTreatsSubset, 6)
X_default_train = train[, -1]
y_default_train = train$saltyOrSweet
test = tail(valentineTreatsSubset, 3)
X_default_test = test[, -1]
####### Perform KNN algorithim using Euclidean distance
#https://cran.r-project.org/web/packages/class/class.pdf
result = (knn(train = X_default_train,
test = X_default_test,
cl = y_default_train,
k = 3))
| /module 1/What is DataScience/.ipynb_checkpoints/KNN-checkpoint.R | no_license | natestrong/ucla-data-science | R | false | false | 1,115 | r | rm(list = ls())
####### Enviornment setup
library(class) #Has the knn function
####### Data pull
#Path
rootPathVar <- "/Users/ael-annan/Desktop/Storage/MveMveMve/UCLAInstructor/Summer2018-361188-Introduction to Data Science COM SCI X 450.1/Module 1/Module 1a/"
valentineTreats <- read.csv(paste(rootPathVar,"ValentinesTreats.csv", sep=""), header = TRUE)
####### Data Inspection
#Inspect/characterize data
dim(valentineTreats)
summary(valentineTreats)
#Simplify data
columnVars <- c("saltyOrSweet", "x_sodiumLevel_.g.", "y_highFructoseCornSyrup_.g.")
valentineTreatsSubset <- valentineTreats[columnVars]
dim(valentineTreatsSubset)
summary(valentineTreatsSubset)
####### Get training/test data ready
train = head(valentineTreatsSubset, 6)
X_default_train = train[, -1]
y_default_train = train$saltyOrSweet
test = tail(valentineTreatsSubset, 3)
X_default_test = test[, -1]
####### Perform KNN algorithim using Euclidean distance
#https://cran.r-project.org/web/packages/class/class.pdf
result = (knn(train = X_default_train,
test = X_default_test,
cl = y_default_train,
k = 3))
|
\name{dat_research}
\alias{dat_research}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Un-normalized altmetrics for research PLoS articles
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
See altmetrics project documentation
}
\usage{data(dat_research)}
\format{
> str(dat.research, max.levels=1, vec.len=1)
'data.frame': 21156 obs. of 33 variables:
$ doi : chr "10.1371/journal.pbio.0000001" ...
$ pubDate : POSIXlt, format: "2003-10-13" ...
$ journal.x : Factor w/ 7 levels "pbio","pcbi",..: 1 1 ...
$ title : chr "A Functional Analysis of the Spacer of V(D)J Recombination Signal Sequences" ...
$ articleType : Factor w/ 48 levels "Best Practice",..: 37 37 ...
$ authorsCount : num 6 14 ...
$ f1000Factor : num 6 0 ...
$ backtweetsCount : int 0 0 ...
$ deliciousCount : int 0 0 ...
$ pmid : int 14551903 14624234 ...
$ plosSubjectTags : chr "Cell Biology|Immunology|Molecular Biology" ...
$ plosSubSubjectTags : chr "" ...
$ facebookShareCount : int 0 0 ...
$ facebookLikeCount : int 0 0 ...
$ facebookCommentCount : int 0 0 ...
$ facebookClickCount : num 0 0 ...
$ mendeleyReadersCount : num 4 17 ...
$ almBlogsCount : int 0 0 ...
$ pdfDownloadsCount : int 348 2436 ...
$ xmlDownloadsCount : int 71 74 ...
$ htmlDownloadsCount : int 6131 14149 ...
$ almCiteULikeCount : int 0 3 ...
$ almScopusCount : int 28 141 ...
$ almPubMedCentralCount : int 7 54 ...
$ almCrossRefCount : int 5 40 ...
$ plosCommentCount : int 0 0 ...
$ plosCommentResponsesCount: int 0 0 ...
$ wikipediaCites : num 0 0 ...
$ daysSincePublished : int 2628 2593 ...
$ wosCount : num 29 137 ...
$ journal.y : chr "PLOS BIOLOGY" ...
$ articleNumber : chr "e1" ...
$ year : chr "2003" ...
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
See altmetrics project documentation
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
ADD LINK TO ALTMETRICS PROJECT
}
\references{
%% ~~ possibly secondary sources and usages ~~
ADD LINK TO ALTMETRICS PROJECT
}
\examples{
data(dat_research)
See vignettes in altmetrics.analysis project
## maybe str(dat.raw.wos) ; plot(dat.raw.wos) ...
}
\keyword{datasets}
| /stats/scripts/altmetrics.analysis/man/dat_research.Rd | permissive | neostoic/plos_altmetrics_study | R | false | false | 2,572 | rd | \name{dat_research}
\alias{dat_research}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Un-normalized altmetrics for research PLoS articles
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
See altmetrics project documentation
}
\usage{data(dat_research)}
\format{
> str(dat.research, max.levels=1, vec.len=1)
'data.frame': 21156 obs. of 33 variables:
$ doi : chr "10.1371/journal.pbio.0000001" ...
$ pubDate : POSIXlt, format: "2003-10-13" ...
$ journal.x : Factor w/ 7 levels "pbio","pcbi",..: 1 1 ...
$ title : chr "A Functional Analysis of the Spacer of V(D)J Recombination Signal Sequences" ...
$ articleType : Factor w/ 48 levels "Best Practice",..: 37 37 ...
$ authorsCount : num 6 14 ...
$ f1000Factor : num 6 0 ...
$ backtweetsCount : int 0 0 ...
$ deliciousCount : int 0 0 ...
$ pmid : int 14551903 14624234 ...
$ plosSubjectTags : chr "Cell Biology|Immunology|Molecular Biology" ...
$ plosSubSubjectTags : chr "" ...
$ facebookShareCount : int 0 0 ...
$ facebookLikeCount : int 0 0 ...
$ facebookCommentCount : int 0 0 ...
$ facebookClickCount : num 0 0 ...
$ mendeleyReadersCount : num 4 17 ...
$ almBlogsCount : int 0 0 ...
$ pdfDownloadsCount : int 348 2436 ...
$ xmlDownloadsCount : int 71 74 ...
$ htmlDownloadsCount : int 6131 14149 ...
$ almCiteULikeCount : int 0 3 ...
$ almScopusCount : int 28 141 ...
$ almPubMedCentralCount : int 7 54 ...
$ almCrossRefCount : int 5 40 ...
$ plosCommentCount : int 0 0 ...
$ plosCommentResponsesCount: int 0 0 ...
$ wikipediaCites : num 0 0 ...
$ daysSincePublished : int 2628 2593 ...
$ wosCount : num 29 137 ...
$ journal.y : chr "PLOS BIOLOGY" ...
$ articleNumber : chr "e1" ...
$ year : chr "2003" ...
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
See altmetrics project documentation
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
ADD LINK TO ALTMETRICS PROJECT
}
\references{
%% ~~ possibly secondary sources and usages ~~
ADD LINK TO ALTMETRICS PROJECT
}
\examples{
data(dat_research)
See vignettes in altmetrics.analysis project
## maybe str(dat.raw.wos) ; plot(dat.raw.wos) ...
}
\keyword{datasets}
|
library(rmongodb)
library(stringr)
library(plyr)
library(parallel)
PrepareLine <- function(readLine){
splitLine <- str_split(readLine,"\\s+")
splitLine <- unlist(splitLine)
return (splitLine)
}
MakeListBson <- function(object){
msg <- paste(c(object),collapse = ' ')
buffer <- mongo.bson.buffer.create()
i <<- i+1
mongo.bson.buffer.append(buffer,"originalText",msg)
mongo.bson.buffer.append(buffer,"_id",i)
mongo.bson.buffer.append(buffer,"howWords",length(object))
newobject <- mongo.bson.from.buffer(buffer)
return(newobject)
}
CheckAndRemoveCollection <- function(mongo,nameDatabase){
coll <- mongo.get.database.collections(mongo,"test")
coll <- coll[coll == nameDatabase]
if(length(coll) != 0){
mongo.drop(mongo,coll)
}
}
PrepareData <- function(line){
numberCores <- detectCores()
list <- mclapply(line,PrepareLine,mc.cores = numberCores)
nullElements <- mclapply(list,is.null,mc.cores = numberCores)
list <- list[nullElements == FALSE]
return (list)
}
LoadData <- function(filename,howMuch,nameDatabase){
numberSteps <- 0
i <<- 0
numberCores <- detectCores()
fileOpen <- file(filename,open ="r")
mongo <- mongo.create()
if(mongo.is.connected(mongo)){
CheckAndRemoveCollection(mongo,nameDatabase)
while(length(readLine <- readLines(fileOpen,n=howMuch,warn=FALSE)) > 0){
numberSteps <- numberSteps + 1
list <- PrepareData(readLine)
bsonList <- lapply(list,MakeListBson)
mongo.insert.batch(mongo,nameDatabase,bsonList)
msg <- paste("Add",howMuch,"records per",numberSteps,sep=" ")
print(msg)
}
}
close(fileOpen)
}
main <- function(filename,howMuch,nameDatabase){
LoadData(filename,howMuch,nameDatabase)
}
args = commandArgs(trailingOnly = TRUE)
if(length(args) < 3 ){
stop("Correct usage: Rscript PrepareData.R <filename> <how lines load> <name collections>")
}
main(as.character(args[1]),as.numeric(args[2]),as.character(args[3]))
| /r_scripts/PrepareData.R | no_license | mmiotk/MapReduceMongoDB | R | false | false | 1,956 | r | library(rmongodb)
library(stringr)
library(plyr)
library(parallel)
PrepareLine <- function(readLine){
splitLine <- str_split(readLine,"\\s+")
splitLine <- unlist(splitLine)
return (splitLine)
}
MakeListBson <- function(object){
msg <- paste(c(object),collapse = ' ')
buffer <- mongo.bson.buffer.create()
i <<- i+1
mongo.bson.buffer.append(buffer,"originalText",msg)
mongo.bson.buffer.append(buffer,"_id",i)
mongo.bson.buffer.append(buffer,"howWords",length(object))
newobject <- mongo.bson.from.buffer(buffer)
return(newobject)
}
CheckAndRemoveCollection <- function(mongo,nameDatabase){
coll <- mongo.get.database.collections(mongo,"test")
coll <- coll[coll == nameDatabase]
if(length(coll) != 0){
mongo.drop(mongo,coll)
}
}
PrepareData <- function(line){
numberCores <- detectCores()
list <- mclapply(line,PrepareLine,mc.cores = numberCores)
nullElements <- mclapply(list,is.null,mc.cores = numberCores)
list <- list[nullElements == FALSE]
return (list)
}
LoadData <- function(filename,howMuch,nameDatabase){
numberSteps <- 0
i <<- 0
numberCores <- detectCores()
fileOpen <- file(filename,open ="r")
mongo <- mongo.create()
if(mongo.is.connected(mongo)){
CheckAndRemoveCollection(mongo,nameDatabase)
while(length(readLine <- readLines(fileOpen,n=howMuch,warn=FALSE)) > 0){
numberSteps <- numberSteps + 1
list <- PrepareData(readLine)
bsonList <- lapply(list,MakeListBson)
mongo.insert.batch(mongo,nameDatabase,bsonList)
msg <- paste("Add",howMuch,"records per",numberSteps,sep=" ")
print(msg)
}
}
close(fileOpen)
}
main <- function(filename,howMuch,nameDatabase){
LoadData(filename,howMuch,nameDatabase)
}
args = commandArgs(trailingOnly = TRUE)
if(length(args) < 3 ){
stop("Correct usage: Rscript PrepareData.R <filename> <how lines load> <name collections>")
}
main(as.character(args[1]),as.numeric(args[2]),as.character(args[3]))
|
#ZIP File URL
zipUrl <-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#ZIP file name
zipFile <- "./household_power_consumption.zip"
#Data File name
dataFile <- "./household_power_consumption.txt"
#Download and Unzip the file
if (!file.exists(dataFile)) {
download.file(zipUrl, zipFile)
unzip(zipFile, overwrite = T, exdir = ".")
}
#get column names for the data
cnames <- names(read.table(dataFile, header = T, nrows=1,sep=";", na.strings="?"))
#Read Data
powerConsumption <- read.table(dataFile, header=T, sep=";", na.strings="?")
#Set Names
powerConsumption <- setNames(powerConsumption,cnames)
# Get Data Scope
scopedData <- powerConsumption[powerConsumption$Date %in% c("1/2/2007","2/2/2007"),]
scopedData$Time <- strptime(paste(scopedData$Date, scopedData$Time), "%d/%m/%Y %H:%M:%S")
# Check Data Structure
head(scopedData)
tail(scopedData)
str(scopedData)
#Plot 4
par(mfrow = c(2,2),cex=0.6)
# plot 2 first
plot(scopedData$Time, scopedData$Global_active_power, type = "l", ylab="Global Active Power", xlab="", col="green" )
# Then the next new plot
plot(scopedData$Time, scopedData$Voltage, type="l", ylab="Voltage",xlab="datetime", col="pink")
# then plot 3
plot(scopedData$Time, scopedData$Sub_metering_1, type = "l", ylab="Energy sub metering", xlab="" )
lines(x =scopedData$Time, y= scopedData$Sub_metering_2, col="red")
lines(x =scopedData$Time, y= scopedData$Sub_metering_3, col="blue")
legend("topright", lty=c(1,1,1), col = c("black", "red", "blue"), text.width=100000 , bty="n",
legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
# Then the last new plot
plot(scopedData$Time, scopedData$Global_reactive_power, type="l", ylab="Global_reactive_power",xlab="datetime", col="brown")
#Copy Plot4 to PNG file
dev.copy(png, file = "Plot4.png")
#Close the PNG file device
dev.off() | /Plot4.R | no_license | asatram/ExData_Plotting1 | R | false | false | 1,880 | r | #ZIP File URL
zipUrl <-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#ZIP file name
zipFile <- "./household_power_consumption.zip"
#Data File name
dataFile <- "./household_power_consumption.txt"
#Download and Unzip the file
if (!file.exists(dataFile)) {
download.file(zipUrl, zipFile)
unzip(zipFile, overwrite = T, exdir = ".")
}
#get column names for the data
cnames <- names(read.table(dataFile, header = T, nrows=1,sep=";", na.strings="?"))
#Read Data
powerConsumption <- read.table(dataFile, header=T, sep=";", na.strings="?")
#Set Names
powerConsumption <- setNames(powerConsumption,cnames)
# Get Data Scope
scopedData <- powerConsumption[powerConsumption$Date %in% c("1/2/2007","2/2/2007"),]
scopedData$Time <- strptime(paste(scopedData$Date, scopedData$Time), "%d/%m/%Y %H:%M:%S")
# Check Data Structure
head(scopedData)
tail(scopedData)
str(scopedData)
#Plot 4
par(mfrow = c(2,2),cex=0.6)
# plot 2 first
plot(scopedData$Time, scopedData$Global_active_power, type = "l", ylab="Global Active Power", xlab="", col="green" )
# Then the next new plot
plot(scopedData$Time, scopedData$Voltage, type="l", ylab="Voltage",xlab="datetime", col="pink")
# then plot 3
plot(scopedData$Time, scopedData$Sub_metering_1, type = "l", ylab="Energy sub metering", xlab="" )
lines(x =scopedData$Time, y= scopedData$Sub_metering_2, col="red")
lines(x =scopedData$Time, y= scopedData$Sub_metering_3, col="blue")
legend("topright", lty=c(1,1,1), col = c("black", "red", "blue"), text.width=100000 , bty="n",
legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
# Then the last new plot
plot(scopedData$Time, scopedData$Global_reactive_power, type="l", ylab="Global_reactive_power",xlab="datetime", col="brown")
#Copy Plot4 to PNG file
dev.copy(png, file = "Plot4.png")
#Close the PNG file device
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/androidenterprise_objects.R
\name{UsersListResponse}
\alias{UsersListResponse}
\title{UsersListResponse Object}
\usage{
UsersListResponse(user = NULL)
}
\arguments{
\item{user}{A user of an enterprise}
}
\value{
UsersListResponse object
}
\description{
UsersListResponse Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The matching user resources.
}
| /googleandroidenterprisev1.auto/man/UsersListResponse.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 467 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/androidenterprise_objects.R
\name{UsersListResponse}
\alias{UsersListResponse}
\title{UsersListResponse Object}
\usage{
UsersListResponse(user = NULL)
}
\arguments{
\item{user}{A user of an enterprise}
}
\value{
UsersListResponse object
}
\description{
UsersListResponse Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The matching user resources.
}
|
# code for replicating Yang et al
# Steps:
# 1. Make list of studies and samples
# - do we get the same n?
# 2. Grab raw cel files --> RMA --> report probe level
# 3. Visualize
# 4. DE analysis w covars they used
# 5. Add date to covariate analysis
# 16 studies
# sex, smoking status, age, COPD status, ethnicity and pack-years were available for 211 subjects
# (never smokers n=68; current smokers n=143) after removing duplicate data and outliers
#
# validation GSE7895
library(tidyverse)
library(readxl)
supp_files1 <- read_xlsx("ref/41598_2019_54051_MOESM2_ESM.xlsx", sheet=1, skip=2, col_names=TRUE)
head(supp_files1)
incl_files <- supp_files1 %>%
filter(`Microarray Platform`=="U133 Plus 2.0 Array")
list_geo_studies <- incl_files %>% pull(`GEO Accession`)
# get the lists of files associated with these data
library(GEOmetadb)
con <- dbConnect(SQLite(), "../GEOmetadb.sqlite")
gse_gsm <- dbGetQuery(con, sprintf("SELECT gse, gsm FROM gse_gsm WHERE gse IN ('%s');",
paste(list_geo_studies, collapse="','")))
gsm2 <- dbGetQuery(con, sprintf("SELECT gsm, title, source_name_ch1, description,
characteristics_ch1 FROM gsm
WHERE gsm IN ('%s');", paste(unique(gse_gsm$gsm), collapse="','")))
dbDisconnect(con)
# clean up AE phenotype data
gsm2.1 <- gsm2 %>%
separate_rows(characteristics_ch1, sep=";\t") %>%
mutate(characteristics_ch1=tolower(characteristics_ch1)) %>%
separate(characteristics_ch1, into=c("key", "value"), sep=": ") %>%
dplyr::select(-title, -source_name_ch1, -description) %>%
pivot_wider(names_from=key, values_from=value)
# TODO: check for pheno duplicates with other vals!
gsm2.2 <- gsm2.1 %>%
mutate(race_ethnicity=case_when(
`ethnic group` == "hispnaic" ~ "hispanic",
`ethnic group`=="afr" ~ "black",
`ethnic group`=="eur" ~ "white",
`ancestry`=="african" ~ "black",
`ancestry`=="european" ~ "white",
`ancestry`=="hispanic" ~ "hispanic",
`ethnicity`=="afr" ~ "black",
`ethnicity`=="eur" ~ "white",
TRUE ~ `ethnic group`
)) %>%
dplyr::select(-ethnicity, -ancestry, -`ethnic group`) %>%
separate(`smoking status`, into=c("smoking", "pack_years"),
sep=", ", extra="merge") %>%
mutate(copd=case_when(
`copd status`=="yes" ~ "yes",
smoking == "copd" ~ "yes",
smoking == "early-copd" ~ "early",
TRUE ~ "no"
)) %>%
mutate(smoking=case_when(
smoking %in% c("non-smoker", "nonsmoker", "ns") ~ "NS",
smoking %in% c("smoker", "s") ~ "S"
)) %>%
dplyr::select(-`copd status`) %>%
mutate(pack_years=as.numeric(str_replace_all(pack_years, " pack-years", ""))) %>%
dplyr::select(gsm, age, sex, smoking, race_ethnicity, copd, pack_years, everything())
full_gsm_dat <- gsm2.2 %>% select(-`smoking status:smoker, 1 pack-years`)
full_gsm_dat %>% write_csv("data/rep_full_gsm.csv")
# none of the DGM IDs are replicated
gsm2.3 <- gsm2.2 %>% filter(!is.na(smoking), copd=="no",
!is.na(race_ethnicity), !is.na(age)) %>%
dplyr::select(gsm, age, sex, smoking, race_ethnicity, pack_years) %>%
filter(smoking=="NS" | (smoking=="S" & !is.na(pack_years)))
table(gsm2.3$smoking) # 212 S, 153 NS
length(unique(gsm2.3$gsm))
# get download information
con <- dbConnect(SQLite(), "../GEOmetadb.sqlite")
download_info = dbGetQuery(con, sprintf("SELECT gsm, gpl, submission_date, supplementary_file FROM gsm WHERE gsm IN ('%s')",
paste(gsm2.3$gsm, collapse="','")))
dbDisconnect(con)
download_info2 <- download_info %>%
separate_rows(supplementary_file, sep=";\t") %>%
filter(str_detect(supplementary_file, "CEL"))
gsm2.4 <- gsm2.3 %>% left_join(download_info %>%
dplyr::select(gsm, submission_date))
gsm2.4 %>% write_csv("data/ae_phe_data.csv")
download_info2 %>% write_csv("data/list_ae_to_download_replication.csv")
# ------- READ IN significant genes tables ------ #
fill_empty_cells <- function(df){
# fill empty cells w previous values
df2 <- df %>%
mutate(gene=case_when(
is.na(gene) ~ lag(gene),
TRUE ~ gene),
chromosome=case_when(
is.na(chromosome) ~ lag(chromosome),
TRUE ~ chromosome
))
num_nas <- length(which(is.na(df2$gene)))
if(num_nas==0){
return(df2)
}
return(fill_empty_cells(df2))
}
format_sig_genes <- function(df){
disc <- df[,1:6]
rep <- df[,8:13]
colnames(disc) <- c("gene", "probe", "logFC", "pval", "FDR", "chromosome")
colnames(rep) <- c("gene", "probe", "logFC", "pval", "FDR", "chromosome")
disc1 <- fill_empty_cells(disc %>% filter(!is.na(pval)))
rep1 <- fill_empty_cells(rep %>% filter(!is.na(pval)))
return(list("disc"=disc1, "rep"=rep1))
}
supp_files2 <- read_xlsx("ref/41598_2019_54051_MOESM2_ESM.xlsx", sheet=2, skip=4, col_names=TRUE)
supp_files3 <- read_xlsx("ref/41598_2019_54051_MOESM2_ESM.xlsx", sheet=3, skip=4, col_names=TRUE)
supp_files4 <- read_xlsx("ref/41598_2019_54051_MOESM2_ESM.xlsx", sheet=4, skip=4, col_names=TRUE)
smok_dr <- format_sig_genes(supp_files2)
sex_dr <- format_sig_genes(supp_files3)
int_dr <- format_sig_genes(supp_files4)
disc1 <- int_dr$disc
rep1 <- int_dr$rep
disc_sex1 <- smok_dr$disc
disc_smok1 <- smok_dr$disc
rep_sex1 <- sex_dr$rep
rep_smok1 <- smok_dr$rep
stopifnot(length(which(is.na(disc1$gene)))==0)
stopifnot(length(which(is.na(rep1$gene)))==0)
# fix date genes
which(sapply(rep1$gene, function(x) !is.na(as.numeric(x))))
# "43350" == SEPTIN7 (ch7)
# "43164" == MARCH5 (ch10) MARCHF5
# "43160" == Mar1 or March1 (ch1) MARC1 --> MTARC1
rep2 <- rep1 %>%
mutate(gene=case_when(
gene=="43350" ~ "SEPTIN7",
gene=="43164" ~ "MARCHF5",
gene=="43160" ~ "MTARC1",
TRUE ~ gene
)) %>%
filter(!is.na(pval))
stopifnot(length(which(sapply(rep2$gene, function(x) !is.na(as.numeric(x)))))==0)
which(sapply(disc1$gene, function(x) !is.na(as.numeric(x))))
# "43167"== Mar-8 (ch10) MARCHF8
# "43355" = 12-Sep (ch16) SEPTIN12
# "43168" = Mar 9 (ch12) MARCHF12
# "43349" = 6-sep (X) SEPTIN6
# "43165" = 6-Mar (5) MARCHF6
# "43354" = 11-Sep (4) SEPTIN11
# "43350" = 7-Sep (ch7) SEPTIN7
# "43352" = 9-Sep (17) SEPTIN9
disc2 <- disc1 %>%
mutate(gene=case_when(
gene=="43167" ~ "MARCHF8",
gene=="43355" ~ "SEPTIN12",
gene=="43168" ~ "MARCHF12",
gene=="43349" ~ "SEPTIN6",
gene=="43165" ~ "MARCHF6",
gene=="43354" ~ "SEPTIN11",
gene=="43350" ~ "SEPTIN7",
gene=="43352" ~ "SEPTIN9",
TRUE ~ gene
))
stopifnot(length(which(sapply(disc2$gene, function(x) !is.na(as.numeric(x)))))==0)
disc2 %>% write_csv("ref/yang_int_disc.csv")
rep2 %>% write_csv("ref/yang_int_rep.csv")
# look at overlap btw discovery and validation
overlapping <- intersect(disc2 %>% distinct(gene) %>% pull(gene),
rep2 %>% distinct(gene) %>% pull(gene))
# 333 genes
# look at directionality
both_g <- disc2 %>% inner_join(rep2 %>% dplyr::select(-chromosome), by="gene")
ggplot(both_g, aes(x=logFC.x, y=logFC.y))+
geom_point(alpha=0.7)+
theme_bw()+
ylab("logFC in yang replication")+
xlab("logFC in yang discovery")
# --> 184 (149 did not)
cor.test(both_g$logFC.x, both_g$logFC.y, method="kendall")
ggsave("figures/yang_disc_valid_compare.png")
# simulate random and view
# TODO: sample with replacement!
both_g %>% filter(logFC.x*logFC.y>0) %>% nrow() # 302
both_g2 <- both_g
sim_counts <- sapply(1:1000, function(x){
both_g2$logFC.y <- sample(both_g2$logFC.y, nrow(both_g2))
both_g2 %>% filter(logFC.x*logFC.y>0) %>% nrow()
})
ggplot(both_g2, aes(x=logFC.x, y=logFC.y))+
geom_point(alpha=0.7)+
theme_bw()+
ylab("random logFC")+
xlab("logFC in yang discovery")
ggsave("figures/random_logFC.png")
ggplot(tibble("num_probes"=sim_counts), aes(x=num_probes))+
geom_histogram()+theme_bw()+
geom_vline(xintercept=302, col="red")+
xlab("Number of same direction probes")+
ylab("Number of randomized runs")
ggsave("figures/same_dir_probes.png")
ggplot(both_g2, aes(x=logFC.x, y=logFC.y))+
geom_point(alpha=0.7)+
theme_bw()+
ylab("logFC in yang replication")+
xlab("shuffled logFC")
# look at sex coefficient disc/rep directionality
both_sex <- disc_sex1 %>%
inner_join(rep_sex1 %>% dplyr::select(-chromosome), by="gene")
both_sex %>%
ggplot(aes(x=logFC.x, y=logFC.y))+
geom_point(alpha=0.4)+
theme_bw()+
ylab("logFC in yang replication")+
xlab("logFC in yang discovery")
ggsave("figures/rep_fc_sex.png")
both_sex %>% filter(logFC.x*logFC.y > 0) # 70 / 79
both_sex2 <- both_sex
sim_counts_s <- sapply(1:1000, function(x){
both_sex2$logFC.y <- sample(both_sex$logFC.y, nrow(both_sex))
both_sex2 %>% filter(logFC.x*logFC.y>0) %>% nrow()
})
ggplot(tibble("num_probes"=sim_counts_s), aes(x=num_probes))+
geom_histogram()+theme_bw()+
geom_vline(xintercept=70, col="red")+
xlab("Number of same direction probes")+
ylab("Number of randomized runs")
ggsave("figures/random_sex.png")
# look at directionality of multiple probes across genes
mult_g <- disc2 %>% semi_join(rep2, by="gene") %>%
arrange(gene) %>%
group_by(gene) %>%
mutate(n=n()) %>%
filter(n>1)
# neg*neg --> pos
# neg*neg*neg --> neg
mult_g2 <- mult_g %>%
group_by(gene) %>%
summarize(num_neg=sum(logFC<0), num_pos=sum(logFC>0), n=unique(n)) %>%
arrange(desc(n))
# 32 of the overlapping genes have probes in opposite directionsin disc
disc_opp <- mult_g2 %>%
filter(num_neg!= 0 & num_pos!=0) %>%
pull(gene)
rep_g2 <- rep2 %>%
semi_join(disc2, by="gene") %>%
arrange(gene) %>%
group_by(gene) %>%
mutate(n=n()) %>%
filter(n>1) %>%
group_by(gene) %>%
summarize(num_neg=sum(logFC<0),
num_pos=sum(logFC>0),
n=unique(n)) %>%
arrange(desc(n))
# 28 of the overlapping gnes have probes in opp dir in rep
rep_opp <- rep_g2 %>%
filter(num_neg!= 0 & num_pos!=0) %>%
pull(gene)
# filter and get the list
both_g_filt <- both_g %>%
filter(!gene %in% rep_opp,
!gene %in% disc_opp) %>%
arrange(gene) %>%
filter(logFC.x*logFC.y>0)
length(unique(both_g_filt$gene)) # --> 144
both_g_filt %>%
select(gene, chromosome, everything()) %>%
write_csv("ref/disc_rep_filt.csv")
| /code/04b_ae_analysis/replicate_yang_et_al.R | no_license | erflynn/smoking_sex_expression | R | false | false | 10,119 | r | # code for replicating Yang et al
# Steps:
# 1. Make list of studies and samples
# - do we get the same n?
# 2. Grab raw cel files --> RMA --> report probe level
# 3. Visualize
# 4. DE analysis w covars they used
# 5. Add date to covariate analysis
# 16 studies
# sex, smoking status, age, COPD status, ethnicity and pack-years were available for 211 subjects
# (never smokers n=68; current smokers n=143) after removing duplicate data and outliers
#
# validation GSE7895
library(tidyverse)
library(readxl)
supp_files1 <- read_xlsx("ref/41598_2019_54051_MOESM2_ESM.xlsx", sheet=1, skip=2, col_names=TRUE)
head(supp_files1)
incl_files <- supp_files1 %>%
filter(`Microarray Platform`=="U133 Plus 2.0 Array")
list_geo_studies <- incl_files %>% pull(`GEO Accession`)
# get the lists of files associated with these data
library(GEOmetadb)
con <- dbConnect(SQLite(), "../GEOmetadb.sqlite")
gse_gsm <- dbGetQuery(con, sprintf("SELECT gse, gsm FROM gse_gsm WHERE gse IN ('%s');",
paste(list_geo_studies, collapse="','")))
gsm2 <- dbGetQuery(con, sprintf("SELECT gsm, title, source_name_ch1, description,
characteristics_ch1 FROM gsm
WHERE gsm IN ('%s');", paste(unique(gse_gsm$gsm), collapse="','")))
dbDisconnect(con)
# clean up AE phenotype data
gsm2.1 <- gsm2 %>%
separate_rows(characteristics_ch1, sep=";\t") %>%
mutate(characteristics_ch1=tolower(characteristics_ch1)) %>%
separate(characteristics_ch1, into=c("key", "value"), sep=": ") %>%
dplyr::select(-title, -source_name_ch1, -description) %>%
pivot_wider(names_from=key, values_from=value)
# TODO: check for pheno duplicates with other vals!
gsm2.2 <- gsm2.1 %>%
mutate(race_ethnicity=case_when(
`ethnic group` == "hispnaic" ~ "hispanic",
`ethnic group`=="afr" ~ "black",
`ethnic group`=="eur" ~ "white",
`ancestry`=="african" ~ "black",
`ancestry`=="european" ~ "white",
`ancestry`=="hispanic" ~ "hispanic",
`ethnicity`=="afr" ~ "black",
`ethnicity`=="eur" ~ "white",
TRUE ~ `ethnic group`
)) %>%
dplyr::select(-ethnicity, -ancestry, -`ethnic group`) %>%
separate(`smoking status`, into=c("smoking", "pack_years"),
sep=", ", extra="merge") %>%
mutate(copd=case_when(
`copd status`=="yes" ~ "yes",
smoking == "copd" ~ "yes",
smoking == "early-copd" ~ "early",
TRUE ~ "no"
)) %>%
mutate(smoking=case_when(
smoking %in% c("non-smoker", "nonsmoker", "ns") ~ "NS",
smoking %in% c("smoker", "s") ~ "S"
)) %>%
dplyr::select(-`copd status`) %>%
mutate(pack_years=as.numeric(str_replace_all(pack_years, " pack-years", ""))) %>%
dplyr::select(gsm, age, sex, smoking, race_ethnicity, copd, pack_years, everything())
full_gsm_dat <- gsm2.2 %>% select(-`smoking status:smoker, 1 pack-years`)
full_gsm_dat %>% write_csv("data/rep_full_gsm.csv")
# none of the DGM IDs are replicated
gsm2.3 <- gsm2.2 %>% filter(!is.na(smoking), copd=="no",
!is.na(race_ethnicity), !is.na(age)) %>%
dplyr::select(gsm, age, sex, smoking, race_ethnicity, pack_years) %>%
filter(smoking=="NS" | (smoking=="S" & !is.na(pack_years)))
table(gsm2.3$smoking) # 212 S, 153 NS
length(unique(gsm2.3$gsm))
# get download information
con <- dbConnect(SQLite(), "../GEOmetadb.sqlite")
download_info = dbGetQuery(con, sprintf("SELECT gsm, gpl, submission_date, supplementary_file FROM gsm WHERE gsm IN ('%s')",
paste(gsm2.3$gsm, collapse="','")))
dbDisconnect(con)
download_info2 <- download_info %>%
separate_rows(supplementary_file, sep=";\t") %>%
filter(str_detect(supplementary_file, "CEL"))
gsm2.4 <- gsm2.3 %>% left_join(download_info %>%
dplyr::select(gsm, submission_date))
gsm2.4 %>% write_csv("data/ae_phe_data.csv")
download_info2 %>% write_csv("data/list_ae_to_download_replication.csv")
# ------- READ IN significant genes tables ------ #
fill_empty_cells <- function(df){
# fill empty cells w previous values
df2 <- df %>%
mutate(gene=case_when(
is.na(gene) ~ lag(gene),
TRUE ~ gene),
chromosome=case_when(
is.na(chromosome) ~ lag(chromosome),
TRUE ~ chromosome
))
num_nas <- length(which(is.na(df2$gene)))
if(num_nas==0){
return(df2)
}
return(fill_empty_cells(df2))
}
format_sig_genes <- function(df){
disc <- df[,1:6]
rep <- df[,8:13]
colnames(disc) <- c("gene", "probe", "logFC", "pval", "FDR", "chromosome")
colnames(rep) <- c("gene", "probe", "logFC", "pval", "FDR", "chromosome")
disc1 <- fill_empty_cells(disc %>% filter(!is.na(pval)))
rep1 <- fill_empty_cells(rep %>% filter(!is.na(pval)))
return(list("disc"=disc1, "rep"=rep1))
}
supp_files2 <- read_xlsx("ref/41598_2019_54051_MOESM2_ESM.xlsx", sheet=2, skip=4, col_names=TRUE)
supp_files3 <- read_xlsx("ref/41598_2019_54051_MOESM2_ESM.xlsx", sheet=3, skip=4, col_names=TRUE)
supp_files4 <- read_xlsx("ref/41598_2019_54051_MOESM2_ESM.xlsx", sheet=4, skip=4, col_names=TRUE)
smok_dr <- format_sig_genes(supp_files2)
sex_dr <- format_sig_genes(supp_files3)
int_dr <- format_sig_genes(supp_files4)
disc1 <- int_dr$disc
rep1 <- int_dr$rep
disc_sex1 <- smok_dr$disc
disc_smok1 <- smok_dr$disc
rep_sex1 <- sex_dr$rep
rep_smok1 <- smok_dr$rep
stopifnot(length(which(is.na(disc1$gene)))==0)
stopifnot(length(which(is.na(rep1$gene)))==0)
# fix date genes
which(sapply(rep1$gene, function(x) !is.na(as.numeric(x))))
# "43350" == SEPTIN7 (ch7)
# "43164" == MARCH5 (ch10) MARCHF5
# "43160" == Mar1 or March1 (ch1) MARC1 --> MTARC1
rep2 <- rep1 %>%
mutate(gene=case_when(
gene=="43350" ~ "SEPTIN7",
gene=="43164" ~ "MARCHF5",
gene=="43160" ~ "MTARC1",
TRUE ~ gene
)) %>%
filter(!is.na(pval))
stopifnot(length(which(sapply(rep2$gene, function(x) !is.na(as.numeric(x)))))==0)
which(sapply(disc1$gene, function(x) !is.na(as.numeric(x))))
# "43167"== Mar-8 (ch10) MARCHF8
# "43355" = 12-Sep (ch16) SEPTIN12
# "43168" = Mar 9 (ch12) MARCHF12
# "43349" = 6-sep (X) SEPTIN6
# "43165" = 6-Mar (5) MARCHF6
# "43354" = 11-Sep (4) SEPTIN11
# "43350" = 7-Sep (ch7) SEPTIN7
# "43352" = 9-Sep (17) SEPTIN9
disc2 <- disc1 %>%
mutate(gene=case_when(
gene=="43167" ~ "MARCHF8",
gene=="43355" ~ "SEPTIN12",
gene=="43168" ~ "MARCHF12",
gene=="43349" ~ "SEPTIN6",
gene=="43165" ~ "MARCHF6",
gene=="43354" ~ "SEPTIN11",
gene=="43350" ~ "SEPTIN7",
gene=="43352" ~ "SEPTIN9",
TRUE ~ gene
))
stopifnot(length(which(sapply(disc2$gene, function(x) !is.na(as.numeric(x)))))==0)
disc2 %>% write_csv("ref/yang_int_disc.csv")
rep2 %>% write_csv("ref/yang_int_rep.csv")
# look at overlap btw discovery and validation
overlapping <- intersect(disc2 %>% distinct(gene) %>% pull(gene),
rep2 %>% distinct(gene) %>% pull(gene))
# 333 genes
# look at directionality
both_g <- disc2 %>% inner_join(rep2 %>% dplyr::select(-chromosome), by="gene")
ggplot(both_g, aes(x=logFC.x, y=logFC.y))+
geom_point(alpha=0.7)+
theme_bw()+
ylab("logFC in yang replication")+
xlab("logFC in yang discovery")
# --> 184 (149 did not)
cor.test(both_g$logFC.x, both_g$logFC.y, method="kendall")
ggsave("figures/yang_disc_valid_compare.png")
# simulate random and view
# TODO: sample with replacement!
both_g %>% filter(logFC.x*logFC.y>0) %>% nrow() # 302
both_g2 <- both_g
sim_counts <- sapply(1:1000, function(x){
both_g2$logFC.y <- sample(both_g2$logFC.y, nrow(both_g2))
both_g2 %>% filter(logFC.x*logFC.y>0) %>% nrow()
})
ggplot(both_g2, aes(x=logFC.x, y=logFC.y))+
geom_point(alpha=0.7)+
theme_bw()+
ylab("random logFC")+
xlab("logFC in yang discovery")
ggsave("figures/random_logFC.png")
ggplot(tibble("num_probes"=sim_counts), aes(x=num_probes))+
geom_histogram()+theme_bw()+
geom_vline(xintercept=302, col="red")+
xlab("Number of same direction probes")+
ylab("Number of randomized runs")
ggsave("figures/same_dir_probes.png")
ggplot(both_g2, aes(x=logFC.x, y=logFC.y))+
geom_point(alpha=0.7)+
theme_bw()+
ylab("logFC in yang replication")+
xlab("shuffled logFC")
# look at sex coefficient disc/rep directionality
both_sex <- disc_sex1 %>%
inner_join(rep_sex1 %>% dplyr::select(-chromosome), by="gene")
both_sex %>%
ggplot(aes(x=logFC.x, y=logFC.y))+
geom_point(alpha=0.4)+
theme_bw()+
ylab("logFC in yang replication")+
xlab("logFC in yang discovery")
ggsave("figures/rep_fc_sex.png")
both_sex %>% filter(logFC.x*logFC.y > 0) # 70 / 79
both_sex2 <- both_sex
sim_counts_s <- sapply(1:1000, function(x){
both_sex2$logFC.y <- sample(both_sex$logFC.y, nrow(both_sex))
both_sex2 %>% filter(logFC.x*logFC.y>0) %>% nrow()
})
ggplot(tibble("num_probes"=sim_counts_s), aes(x=num_probes))+
geom_histogram()+theme_bw()+
geom_vline(xintercept=70, col="red")+
xlab("Number of same direction probes")+
ylab("Number of randomized runs")
ggsave("figures/random_sex.png")
# look at directionality of multiple probes across genes
mult_g <- disc2 %>% semi_join(rep2, by="gene") %>%
arrange(gene) %>%
group_by(gene) %>%
mutate(n=n()) %>%
filter(n>1)
# neg*neg --> pos
# neg*neg*neg --> neg
mult_g2 <- mult_g %>%
group_by(gene) %>%
summarize(num_neg=sum(logFC<0), num_pos=sum(logFC>0), n=unique(n)) %>%
arrange(desc(n))
# 32 of the overlapping genes have probes in opposite directionsin disc
disc_opp <- mult_g2 %>%
filter(num_neg!= 0 & num_pos!=0) %>%
pull(gene)
rep_g2 <- rep2 %>%
semi_join(disc2, by="gene") %>%
arrange(gene) %>%
group_by(gene) %>%
mutate(n=n()) %>%
filter(n>1) %>%
group_by(gene) %>%
summarize(num_neg=sum(logFC<0),
num_pos=sum(logFC>0),
n=unique(n)) %>%
arrange(desc(n))
# 28 of the overlapping gnes have probes in opp dir in rep
rep_opp <- rep_g2 %>%
filter(num_neg!= 0 & num_pos!=0) %>%
pull(gene)
# filter and get the list
both_g_filt <- both_g %>%
filter(!gene %in% rep_opp,
!gene %in% disc_opp) %>%
arrange(gene) %>%
filter(logFC.x*logFC.y>0)
length(unique(both_g_filt$gene)) # --> 144
both_g_filt %>%
select(gene, chromosome, everything()) %>%
write_csv("ref/disc_rep_filt.csv")
|
#-------------------------------------------------------------------------------#
# Package: Multistage Adaptive Enrichment Design #
# maed.maed.main(): The main function for solving the formulated LP for #
# multi-stage, 2-sub-population AED problem. #
#-------------------------------------------------------------------------------#
#' The main function for solving the formulated LP for multi-stage, 2-sub-population adaptive enrichment design problem.
#'
#' The solver is defaulted to be a pre-exsiting LP library \code{'PRIMAL'}. For high-dimensional & high-sparsity setting, we adopt the sub-space optimization methods and row generation methods. We leave these for future implementation.
#'
#' @param L A loss matrix (in the objective) of size \code{n_dim}.
#' @param P A probability matrix (in the objective) of size \code{n_dim}.
#' @param L_c2 A loss matrix (in the C2 constraints) of size \code{J} by \code{n_dim}.
#' @param P_c2 A probability matrix (in the C2 constraints) of size \code{J} by \code{n_dim}.
#' @param beta C2 constraints values that the user should specify. Its size should agree with \code{J}.
#' @param solver The solver solving the formulated LP problem. Default to be \code{PRIMAL}.
#' @param J Total number of C2 constraints that the user should specify.
#' @param n_dim Total number of possible trails.
#' @return
#' An object with S3 class \code{"maed.maed"} is returned:
#' \item{lambda}{
#' The sequence of regularization parameters \code{lambda} obtained in the program.
#' }
#' \item{value}{
#' The sequence of optimal value of the object function corresponded to the sequence of \code{lambda}.
#' }
#' @seealso \code{\link{maed.p}}, \code{\link{maed.l}}, \code{\link{maed.g}}, and \code{\link{maed-package}}.
#' @export
maed.maed.main <- function(L=NULL, P=NULL, L_c2=NULL, P_c2=NULL, beta=NULL, solver="primal", J=NULL, n_dim=NULL){
# print(sum(P==0))
# print(sum(L==0))
# print(sum(P_c2==0))
# print(sum(L_c2==0))
c <- -1 * L * P # [n_dim]
A <- L_c2 * P_c2 #[J, n_dim]
b <- beta #[J]
est = list()
if(solver=="primal"){
library(PRIMAL)
m = dim(A)[1]
n = dim(A)[2]
b_bar = rep(0,m)
c_bar = rep(0,m+n)
B_init = seq(n,n+m-1)
fit <- PSM_solver(A=A, b=b, b_bar=b_bar, c=c, c_bar=c_bar, B_init=B_init)
est$lambda = fit$lambda
est$value = fit$value
print(fit)
rm(fit)
}else{
stop("The specified solver package is not supported.")
}
class(est) = "maed.maed"
return(est)
}
#' @useDynLib maed
#' @importFrom Rcpp sourceCpp
NULL
| /R/maed.maed.R | no_license | cliang1453/maed | R | false | false | 2,702 | r | #-------------------------------------------------------------------------------#
# Package: Multistage Adaptive Enrichment Design #
# maed.maed.main(): The main function for solving the formulated LP for #
# multi-stage, 2-sub-population AED problem. #
#-------------------------------------------------------------------------------#
#' The main function for solving the formulated LP for multi-stage, 2-sub-population adaptive enrichment design problem.
#'
#' The solver is defaulted to be a pre-exsiting LP library \code{'PRIMAL'}. For high-dimensional & high-sparsity setting, we adopt the sub-space optimization methods and row generation methods. We leave these for future implementation.
#'
#' @param L A loss matrix (in the objective) of size \code{n_dim}.
#' @param P A probability matrix (in the objective) of size \code{n_dim}.
#' @param L_c2 A loss matrix (in the C2 constraints) of size \code{J} by \code{n_dim}.
#' @param P_c2 A probability matrix (in the C2 constraints) of size \code{J} by \code{n_dim}.
#' @param beta C2 constraints values that the user should specify. Its size should agree with \code{J}.
#' @param solver The solver solving the formulated LP problem. Default to be \code{PRIMAL}.
#' @param J Total number of C2 constraints that the user should specify.
#' @param n_dim Total number of possible trails.
#' @return
#' An object with S3 class \code{"maed.maed"} is returned:
#' \item{lambda}{
#' The sequence of regularization parameters \code{lambda} obtained in the program.
#' }
#' \item{value}{
#' The sequence of optimal value of the object function corresponded to the sequence of \code{lambda}.
#' }
#' @seealso \code{\link{maed.p}}, \code{\link{maed.l}}, \code{\link{maed.g}}, and \code{\link{maed-package}}.
#' @export
maed.maed.main <- function(L=NULL, P=NULL, L_c2=NULL, P_c2=NULL, beta=NULL, solver="primal", J=NULL, n_dim=NULL){
# print(sum(P==0))
# print(sum(L==0))
# print(sum(P_c2==0))
# print(sum(L_c2==0))
c <- -1 * L * P # [n_dim]
A <- L_c2 * P_c2 #[J, n_dim]
b <- beta #[J]
est = list()
if(solver=="primal"){
library(PRIMAL)
m = dim(A)[1]
n = dim(A)[2]
b_bar = rep(0,m)
c_bar = rep(0,m+n)
B_init = seq(n,n+m-1)
fit <- PSM_solver(A=A, b=b, b_bar=b_bar, c=c, c_bar=c_bar, B_init=B_init)
est$lambda = fit$lambda
est$value = fit$value
print(fit)
rm(fit)
}else{
stop("The specified solver package is not supported.")
}
class(est) = "maed.maed"
return(est)
}
#' @useDynLib maed
#' @importFrom Rcpp sourceCpp
NULL
|
#Args[6]=input Args[7]=output
library(ggplot2)
Args <- commandArgs()
data =read.table(Args[6],header=T,row.names=1)
r0 = ggplot(data,aes(log10_C_Expression,log10_T_Expression))
r1=r0+theme_bw()+theme(panel.grid=element_blank(),panel.border=element_blank(),axis.line=element_line(size=1,colour="black"))
r2 =r1+ geom_point(aes(color =Gene_group))
r3 = r2 +geom_point(aes(color =Gene_group))
r4=r3 + labs(title="Relationship between associated genes and non-associated genes",x="log10_(C-Expression)",y="log10_(T-Expression)")+theme(title=element_text(family="myFont",size=15,
face="italic",hjust=0.2,lineheight=0.2))+theme(axis.text.x=element_text(size=15))+theme(axis.text.y=element_text(size=15))
r5 = r4 +scale_color_manual(values = c("#619cff","#f8766d"))
ggsave(Args[7],r5,width=8,height=8)
| /module/Quantitative_association_analysis/volcano_correlation.R | no_license | luoxing1996/TPCor | R | false | false | 825 | r | #Args[6]=input Args[7]=output
library(ggplot2)
Args <- commandArgs()
data =read.table(Args[6],header=T,row.names=1)
r0 = ggplot(data,aes(log10_C_Expression,log10_T_Expression))
r1=r0+theme_bw()+theme(panel.grid=element_blank(),panel.border=element_blank(),axis.line=element_line(size=1,colour="black"))
r2 =r1+ geom_point(aes(color =Gene_group))
r3 = r2 +geom_point(aes(color =Gene_group))
r4=r3 + labs(title="Relationship between associated genes and non-associated genes",x="log10_(C-Expression)",y="log10_(T-Expression)")+theme(title=element_text(family="myFont",size=15,
face="italic",hjust=0.2,lineheight=0.2))+theme(axis.text.x=element_text(size=15))+theme(axis.text.y=element_text(size=15))
r5 = r4 +scale_color_manual(values = c("#619cff","#f8766d"))
ggsave(Args[7],r5,width=8,height=8)
|
\name{localSupp}
\docType{methods}
\alias{localSupp-methods}
\alias{localSupp,ANY-method}
\alias{localSupp,sdcMicroObj-method}
\alias{localSupp}
\title{ Local Suppression }
\description{
A simple method to perfom local suppression.
}
\usage{
localSupp(obj, threshold=0.15, keyVar,...)# indivRisk)
}
\section{Methods}{
\describe{
\item{\code{signature(obj = "ANY")}}{
%% ~~describe this method here~~
}
\item{\code{signature(obj = "sdcMicroObj")}}{
%% ~~describe this method here~~
}
}}
\arguments{
\item{obj}{ object of class freqCalc or sdcMicroObj }
\item{threshold}{ threshold for individual risk }
\item{keyVar}{ Variable on which some values might be suppressed }
\item{...}{see arguments below}
\item{indivRisk}{ object from class indivRisk }
}
\details{
Values of high risk (above the threshold) of a certain variable (parameter keyVar)
are suppressed.
}
\value{
Manipulated data with suppressions or the \dQuote{sdcMicroObj} object with manipulated data.
}
\references{
Templ, M.
\emph{Statistical Disclosure Control for Microdata Using the R-Package sdcMicro},
Transactions on Data Privacy,
vol. 1, number 2, pp. 67-85, 2008.
\url{http://www.tdp.cat/issues/abs.a004a08.php}
}
\author{ Matthias Templ }
\seealso{ \code{\link{freqCalc}}, \code{\link{indivRisk}} }
\examples{
## example from Capobianchi, Polettini and Lucarelli:
data(francdat)
f <- freqCalc(francdat, keyVars=c(2,4,5,6),w=8)
f
f$fk
f$Fk
## individual risk calculation:
indivf <- indivRisk(f)
indivf$rk
## Local Suppression
localS <- localSupp(f, keyVar=2, indivRisk=indivf$rk, threshold=0.25)
f2 <- freqCalc(localS$freqCalc, keyVars=c(4,5,6), w=8)
indivf2 <- indivRisk(f2)
indivf2$rk
## select another keyVar and run localSupp once again,
# if you think the table is not fully protected
## for objects of class sdcMicro:
data(testdata2)
sdc <- createSdcObj(testdata2,
keyVars=c('urbrur','roof','walls','water','electcon','relat','sex'),
numVars=c('expend','income','savings'), w='sampling_weight')
sdc <- localSupp(sdc, keyVar='urbrur')
}
\keyword{ manip }
| /man/localSupp.Rd | no_license | orlinresearch/sdcMicro | R | false | false | 2,207 | rd | \name{localSupp}
\docType{methods}
\alias{localSupp-methods}
\alias{localSupp,ANY-method}
\alias{localSupp,sdcMicroObj-method}
\alias{localSupp}
\title{ Local Suppression }
\description{
A simple method to perfom local suppression.
}
\usage{
localSupp(obj, threshold=0.15, keyVar,...)# indivRisk)
}
\section{Methods}{
\describe{
\item{\code{signature(obj = "ANY")}}{
%% ~~describe this method here~~
}
\item{\code{signature(obj = "sdcMicroObj")}}{
%% ~~describe this method here~~
}
}}
\arguments{
\item{obj}{ object of class freqCalc or sdcMicroObj }
\item{threshold}{ threshold for individual risk }
\item{keyVar}{ Variable on which some values might be suppressed }
\item{...}{see arguments below}
\item{indivRisk}{ object from class indivRisk }
}
\details{
Values of high risk (above the threshold) of a certain variable (parameter keyVar)
are suppressed.
}
\value{
Manipulated data with suppressions or the \dQuote{sdcMicroObj} object with manipulated data.
}
\references{
Templ, M.
\emph{Statistical Disclosure Control for Microdata Using the R-Package sdcMicro},
Transactions on Data Privacy,
vol. 1, number 2, pp. 67-85, 2008.
\url{http://www.tdp.cat/issues/abs.a004a08.php}
}
\author{ Matthias Templ }
\seealso{ \code{\link{freqCalc}}, \code{\link{indivRisk}} }
\examples{
## example from Capobianchi, Polettini and Lucarelli:
data(francdat)
f <- freqCalc(francdat, keyVars=c(2,4,5,6),w=8)
f
f$fk
f$Fk
## individual risk calculation:
indivf <- indivRisk(f)
indivf$rk
## Local Suppression
localS <- localSupp(f, keyVar=2, indivRisk=indivf$rk, threshold=0.25)
f2 <- freqCalc(localS$freqCalc, keyVars=c(4,5,6), w=8)
indivf2 <- indivRisk(f2)
indivf2$rk
## select another keyVar and run localSupp once again,
# if you think the table is not fully protected
## for objects of class sdcMicro:
data(testdata2)
sdc <- createSdcObj(testdata2,
keyVars=c('urbrur','roof','walls','water','electcon','relat','sex'),
numVars=c('expend','income','savings'), w='sampling_weight')
sdc <- localSupp(sdc, keyVar='urbrur')
}
\keyword{ manip }
|
/R/Graph.R | permissive | momah/RPackageForRESTfullAPI | R | false | false | 655 | r | ||
x <- "Youth Clubs.csv"
f <- paste("outputs/", x, sep = "")
raw <- read.csv(f, header=TRUE, sep = ",")
print(f)
# remove outliers, DNS, DNF, etc.
athletes <- raw[grep("\\.", as.character(raw$time)),]
# convert date into total days since 1/1/2000
year <- c(athletes$date %% (10^4))
day <- c(floor((athletes$date %% (10^6)) / 10^4))
month <- c(floor((athletes$date %% (10^8)) / 10^6))
athletes$datedays = ((year - 2000) * 365) + (30 * month) + day
print("datedays done")
#convert runtime into seconds
athletes$time <- gsub("[^0-9.:]", "", as.character(athletes$time)) # remove letters
splittimes <- sapply(athletes$time, function(x) unlist(strsplit(x, "[. :]")))
time_in_seconds <- sapply(splittimes, function(x) { # convert into seconds
time <- as.integer(x)
time[is.na(time)] <- 0
time_in_seconds <- ifelse(length(time) == 3, sum(time * c(60, 1, 0.01)), sum(time * c(1, 0.01)))
})
athletes$tis <- unlist(time_in_seconds)
print("tis done")
# create IDs for event and name
athletes$nameID <- as.numeric(as.factor(athletes$name))
athletes$eventID <- as.numeric(as.factor(athletes$event))
athletes$athlete_event <- paste(as.character(athletes$nameID), as.character(athletes$eventID), sep = "-")
# order by athlete, then event, then date and ID them for future ref
athletes <- athletes[order(athletes$athlete_event, athletes$datedays),]
athletes$ID <- seq.int(nrow(athletes))
print("IDs done")
# relabel PRs (sometimes races were entered twice, or it measures SRs instead)
for(i in unique(athletes$athlete_event)) {
events <- athletes[athletes$athlete_event == i,]
pr <- Inf
rec <- sapply(events$tis, function(x) {
if (x <= pr) {
pr <<- x
return("PR")
}
else {
return(NA)
}
})
athletes[athletes$athlete_event == i,]$record <- rec
#print(athletes[athletes$athlete_event == i,]$record)
}
print("PRs relabeled")
# find days till next PR
PRs <- athletes[which(athletes$record == "PR"),]
events <- unique(PRs$athlete_event)
athletes$next_PR <- rep(NA, nrow(athletes))
n_PR <- rep(99999, nrow(PRs))
for (event in events) {
entries <- which(PRs$athlete_event == event)
races <- PRs[entries,]$datedays
n_PR[entries[1:(length(entries) - 1)]] <- ifelse(length(races) > 1, races[2:length(races)] - races[1:(length(races) - 1)],
99999)
}
athletes[which(athletes$record == "PR"),]$next_PR <- n_PR
print("found next PRs")
# find improvement from last PR
athletes$improvement <- rep(NA, nrow(athletes))
diffs <- rep(NA, nrow(PRs))
for (event in events) {
entries <- which(PRs$athlete_event == event)
times <- athletes[entries,]$tis
diffs[entries[1:length(entries)-1]] <- ifelse(length(times) > 1, times[1:length(times) - 1] - times[2:length(times)], NA)
}
athletes[which(athletes$record == "PR"),]$improvement <- diffs
print("found improvements")
output <- paste("processed/", x, sep = "")
write.csv(athletes, output)
print("wrote out file")
| /cruncher/yc.R | no_license | ksunder11/CC_2018 | R | false | false | 3,037 | r | x <- "Youth Clubs.csv"
f <- paste("outputs/", x, sep = "")
raw <- read.csv(f, header=TRUE, sep = ",")
print(f)
# remove outliers, DNS, DNF, etc.
athletes <- raw[grep("\\.", as.character(raw$time)),]
# convert date into total days since 1/1/2000
year <- c(athletes$date %% (10^4))
day <- c(floor((athletes$date %% (10^6)) / 10^4))
month <- c(floor((athletes$date %% (10^8)) / 10^6))
athletes$datedays = ((year - 2000) * 365) + (30 * month) + day
print("datedays done")
#convert runtime into seconds
athletes$time <- gsub("[^0-9.:]", "", as.character(athletes$time)) # remove letters
splittimes <- sapply(athletes$time, function(x) unlist(strsplit(x, "[. :]")))
time_in_seconds <- sapply(splittimes, function(x) { # convert into seconds
time <- as.integer(x)
time[is.na(time)] <- 0
time_in_seconds <- ifelse(length(time) == 3, sum(time * c(60, 1, 0.01)), sum(time * c(1, 0.01)))
})
athletes$tis <- unlist(time_in_seconds)
print("tis done")
# create IDs for event and name
athletes$nameID <- as.numeric(as.factor(athletes$name))
athletes$eventID <- as.numeric(as.factor(athletes$event))
athletes$athlete_event <- paste(as.character(athletes$nameID), as.character(athletes$eventID), sep = "-")
# order by athlete, then event, then date and ID them for future ref
athletes <- athletes[order(athletes$athlete_event, athletes$datedays),]
athletes$ID <- seq.int(nrow(athletes))
print("IDs done")
# relabel PRs (sometimes races were entered twice, or it measures SRs instead)
for(i in unique(athletes$athlete_event)) {
events <- athletes[athletes$athlete_event == i,]
pr <- Inf
rec <- sapply(events$tis, function(x) {
if (x <= pr) {
pr <<- x
return("PR")
}
else {
return(NA)
}
})
athletes[athletes$athlete_event == i,]$record <- rec
#print(athletes[athletes$athlete_event == i,]$record)
}
print("PRs relabeled")
# find days till next PR
PRs <- athletes[which(athletes$record == "PR"),]
events <- unique(PRs$athlete_event)
athletes$next_PR <- rep(NA, nrow(athletes))
n_PR <- rep(99999, nrow(PRs))
for (event in events) {
entries <- which(PRs$athlete_event == event)
races <- PRs[entries,]$datedays
n_PR[entries[1:(length(entries) - 1)]] <- ifelse(length(races) > 1, races[2:length(races)] - races[1:(length(races) - 1)],
99999)
}
athletes[which(athletes$record == "PR"),]$next_PR <- n_PR
print("found next PRs")
# find improvement from last PR
athletes$improvement <- rep(NA, nrow(athletes))
diffs <- rep(NA, nrow(PRs))
for (event in events) {
entries <- which(PRs$athlete_event == event)
times <- athletes[entries,]$tis
diffs[entries[1:length(entries)-1]] <- ifelse(length(times) > 1, times[1:length(times) - 1] - times[2:length(times)], NA)
}
athletes[which(athletes$record == "PR"),]$improvement <- diffs
print("found improvements")
output <- paste("processed/", x, sep = "")
write.csv(athletes, output)
print("wrote out file")
|
##Regresion de variables R y L usando Support Vector Regression
##Se realiza una busqueda de parametros para cost y gamma
##Se reducen los datos usando correlaciones
##No se realizo 10FCV
##Se uso 10% de los datos como prueba
library(caret)
library(e1071)
library(ggplot2)
library(Metrics)
data <- read.csv("data_regression.csv")
label.R <- data[,"R"]
label.L <- data[,"L"]
data <- data[, -c((ncol(data)-1):(ncol(data)))]
#Reducir dimension eliminando columnas con alta correlacion
zv <- apply(data, 2, function(x) length(unique(x)) == 1)
data <- data[,-zv]
high_correlation <- findCorrelation(cor(data), cutoff = 0.95)
selected_features <- c(1:ncol(data))[-high_correlation]
data <- data[,selected_features]
#Tomar 10% como datos de prueba
test <- sample(x = nrow(data), size = nrow(data)/10, replace = FALSE)
##Regresion para R
#model.R <- svm(x = data[-test,], y = label.R[-test])
tuned.R <- tune(svm, train.x = data[-test,], train.y = label.R[-test], ranges = list(cost = 2^(-2:2), gamma = 2^(-2:2), kernel = c("radial", "linear")))
model.R <- tuned.R$best.model
predicted.R <- predict(model.R, data[test,])
#Calcular metricas
mae.R <- mae(actual = label.R[test], predicted = predicted.R)
mse.R <- mse(actual = label.R[test], predicted = predicted.R)
#Graficar resultados
graph_label.R <- data.frame(x = c(1:length(test)), y = label.R[test], z = "Real value")
graph_predicted.R <- data.frame(x = c(1:length(test)), y = predicted.R, z = "Predicted")
graph.R <- rbind(graph_label.R, graph_predicted.R)
ggplot(graph.R, aes(x=x, y=y, group=z)) + geom_point(aes(color = z)) + geom_line(aes(color = z)) + labs(x = "Prueba", y = "R", colour = "Value") + ylim(c(8.5, 11.5))
ggsave("svr_opt_R2.png")
##Regresion para L
#model.L <- svm(x = data[-test,], y = label.L[-test])
tuned.L <- tune(svm, train.x = data[-test,], train.y = label.L[-test], ranges = list(cost = 2^(-2:2), gamma = 2^(-2:2), kernel = c("radial", "linear")))
model.L <- tuned.L$best.model
predicted.L <- predict(model.L, data[test,])
#Calcular metricas
mae.L <- mae(actual = label.L[test], predicted = predicted.L)
mse.L <- mse(actual = label.L[test], predicted = predicted.L)
#Graficar resultados
graph_label.L <- data.frame(x = c(1:length(test)), y = label.L[test], z = "Real value")
graph_predicted.L <- data.frame(x = c(1:length(test)), y = predicted.L, z = "Predicted")
graph.L <- rbind(graph_label.L, graph_predicted.L)
ggplot(graph.L, aes(x=x, y=y, group=z)) + geom_point(aes(color = z)) + geom_line(aes(color = z)) + labs(x = "Prueba", y = "L", colour = "Value")
ggsave("svr_opt_L2.png")
| /optimizacion/svr_opt.R | no_license | AldanaDiego/memoria | R | false | false | 2,570 | r | ##Regresion de variables R y L usando Support Vector Regression
##Se realiza una busqueda de parametros para cost y gamma
##Se reducen los datos usando correlaciones
##No se realizo 10FCV
##Se uso 10% de los datos como prueba
library(caret)
library(e1071)
library(ggplot2)
library(Metrics)
data <- read.csv("data_regression.csv")
label.R <- data[,"R"]
label.L <- data[,"L"]
data <- data[, -c((ncol(data)-1):(ncol(data)))]
#Reducir dimension eliminando columnas con alta correlacion
zv <- apply(data, 2, function(x) length(unique(x)) == 1)
data <- data[,-zv]
high_correlation <- findCorrelation(cor(data), cutoff = 0.95)
selected_features <- c(1:ncol(data))[-high_correlation]
data <- data[,selected_features]
#Tomar 10% como datos de prueba
test <- sample(x = nrow(data), size = nrow(data)/10, replace = FALSE)
##Regresion para R
#model.R <- svm(x = data[-test,], y = label.R[-test])
tuned.R <- tune(svm, train.x = data[-test,], train.y = label.R[-test], ranges = list(cost = 2^(-2:2), gamma = 2^(-2:2), kernel = c("radial", "linear")))
model.R <- tuned.R$best.model
predicted.R <- predict(model.R, data[test,])
#Calcular metricas
mae.R <- mae(actual = label.R[test], predicted = predicted.R)
mse.R <- mse(actual = label.R[test], predicted = predicted.R)
#Graficar resultados
graph_label.R <- data.frame(x = c(1:length(test)), y = label.R[test], z = "Real value")
graph_predicted.R <- data.frame(x = c(1:length(test)), y = predicted.R, z = "Predicted")
graph.R <- rbind(graph_label.R, graph_predicted.R)
ggplot(graph.R, aes(x=x, y=y, group=z)) + geom_point(aes(color = z)) + geom_line(aes(color = z)) + labs(x = "Prueba", y = "R", colour = "Value") + ylim(c(8.5, 11.5))
ggsave("svr_opt_R2.png")
##Regresion para L
#model.L <- svm(x = data[-test,], y = label.L[-test])
tuned.L <- tune(svm, train.x = data[-test,], train.y = label.L[-test], ranges = list(cost = 2^(-2:2), gamma = 2^(-2:2), kernel = c("radial", "linear")))
model.L <- tuned.L$best.model
predicted.L <- predict(model.L, data[test,])
#Calcular metricas
mae.L <- mae(actual = label.L[test], predicted = predicted.L)
mse.L <- mse(actual = label.L[test], predicted = predicted.L)
#Graficar resultados
graph_label.L <- data.frame(x = c(1:length(test)), y = label.L[test], z = "Real value")
graph_predicted.L <- data.frame(x = c(1:length(test)), y = predicted.L, z = "Predicted")
graph.L <- rbind(graph_label.L, graph_predicted.L)
ggplot(graph.L, aes(x=x, y=y, group=z)) + geom_point(aes(color = z)) + geom_line(aes(color = z)) + labs(x = "Prueba", y = "L", colour = "Value")
ggsave("svr_opt_L2.png")
|
library(fda)
### Name: fourier
### Title: Fourier Basis Function Values
### Aliases: fourier
### Keywords: smooth
### ** Examples
# set up a set of 11 argument values
x <- seq(0,1,0.1)
names(x) <- paste("x", 0:10, sep="")
# compute values for five Fourier basis functions
# with the default period (1) and derivative (0)
(basismat <- fourier(x, 5))
# Create a false Fourier basis, i.e., nbasis = 1
# = a constant function
fourier(x, 1)
| /data/genthat_extracted_code/fda/examples/fourier.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 449 | r | library(fda)
### Name: fourier
### Title: Fourier Basis Function Values
### Aliases: fourier
### Keywords: smooth
### ** Examples
# set up a set of 11 argument values
x <- seq(0,1,0.1)
names(x) <- paste("x", 0:10, sep="")
# compute values for five Fourier basis functions
# with the default period (1) and derivative (0)
(basismat <- fourier(x, 5))
# Create a false Fourier basis, i.e., nbasis = 1
# = a constant function
fourier(x, 1)
|
## another trimming function
.packageName <- 'mousetrack'
trim1 <-function(vect1, vect2, thresh){
o1 = tail(vect1, 1) # get final value of trajectory
o2 = tail(vect2, 1)
flipo1 = vect1[length(vect1):1] # take original vector, and reverse
flipo2 = vect2[length(vect2):1]
# how far (in Euclidean distance, pixels) is x,y (reversed) from the final click at each time step
dists = sqrt((flipo1-o1)^2 + (flipo2-o2)^2)
latindx = which(dists > thresh)[1] # where is the x,y (reversed) trajectory THRESH pixels (aprox) from the final click
latindx2 = length(vect1)-latindx # where is the x,y (non-reversed) trajectory TRHESH pixels from the final click
trimmed1 = vect1[latindx2:length(vect1)] # based on latindex2, grab the x,y (non-reversed) trajectory for the last THRESH pixels.
trimmed2 = vect2[latindx2:length(vect2)]
return( list(latindx = latindx2, trimmed1 = trimmed1, trimmed2 = trimmed2) )
}
| /mousetrack/R/trim1.R | no_license | ingted/R-Examples | R | false | false | 947 | r | ## another trimming function
.packageName <- 'mousetrack'
trim1 <-function(vect1, vect2, thresh){
o1 = tail(vect1, 1) # get final value of trajectory
o2 = tail(vect2, 1)
flipo1 = vect1[length(vect1):1] # take original vector, and reverse
flipo2 = vect2[length(vect2):1]
# how far (in Euclidean distance, pixels) is x,y (reversed) from the final click at each time step
dists = sqrt((flipo1-o1)^2 + (flipo2-o2)^2)
latindx = which(dists > thresh)[1] # where is the x,y (reversed) trajectory THRESH pixels (aprox) from the final click
latindx2 = length(vect1)-latindx # where is the x,y (non-reversed) trajectory TRHESH pixels from the final click
trimmed1 = vect1[latindx2:length(vect1)] # based on latindex2, grab the x,y (non-reversed) trajectory for the last THRESH pixels.
trimmed2 = vect2[latindx2:length(vect2)]
return( list(latindx = latindx2, trimmed1 = trimmed1, trimmed2 = trimmed2) )
}
|
library(Lahman)
library(tidyverse)
library(dslabs)
ds_theme_set()
data(Teams)
Teams %>% filter(yearID %in% c(1961:2001)) %>%
mutate(RG = R/G, ABG = AB/G, WG = W/G, EG = E/G, X2G = X3B/G, X3G = X2B/G) %>% ggplot(aes(X3G,X2G))+
geom_point()
new_teams <- Teams %>% filter(yearID %in% c(1961:2001)) %>%
mutate(RG = R/G, ABG = AB/G, WG = W/G, EG = E/G, X2G = X3B/G, X3G = X2B/G)
cor(new_teams$X2G, new_teams$X3G) | /baseball_cor.R | no_license | kljunziga/test_harvardx | R | false | false | 420 | r | library(Lahman)
library(tidyverse)
library(dslabs)
ds_theme_set()
data(Teams)
Teams %>% filter(yearID %in% c(1961:2001)) %>%
mutate(RG = R/G, ABG = AB/G, WG = W/G, EG = E/G, X2G = X3B/G, X3G = X2B/G) %>% ggplot(aes(X3G,X2G))+
geom_point()
new_teams <- Teams %>% filter(yearID %in% c(1961:2001)) %>%
mutate(RG = R/G, ABG = AB/G, WG = W/G, EG = E/G, X2G = X3B/G, X3G = X2B/G)
cor(new_teams$X2G, new_teams$X3G) |
library(TMB)
library(VAST)
AICoutput <- data.frame(AIC=NA,survey=NA,q=NA,k=NA,k2=NA)
#Initial the parameter lists for the models you are testing
ParHatList <- list()
icnt <- 1
for(mySurvey in c(0,1)){ #If 1 then make and offset for survey type
for(q_d in c(0,1)){
for(k in c(0,1,2,3)){
for(k2 in c(1)){
#Data
raw <- read.csv("Update_Comb_Catch_wTrawlDist_flat.csv")
# if(subDat!="All"){
# raw <- raw[raw$Survey==subDat,]
# }
#Adjust by wainright paper for JSOES
raw$catch[raw$Gear=="264NRT+MMED_Down" & raw$Extension=="No"] <- raw$catch[raw$Gear=="264NRT+MMED_Down" & raw$Extension=="No"]/0.48
raw$catch[raw$Gear=="264NRT+MMED_Up" & raw$Extension=="No"] <- raw$catch[raw$Gear=="264NRT+MMED_Up" & raw$Extension=="No"]/0.88
#Adjustments for SWFSC - REad Cheryl's email from 9/15/2020
raw$catch[raw$Gear=="264NRT+MMED modified" & raw$Extension=="No"] <- raw$catch[raw$Gear=="264NRT+MMED modified" & raw$Extension=="No"]/0.48
raw$catch[raw$Gear=="264NRT+MMED" & raw$Extension=="No"] <- raw$catch[raw$Gear=="264NRT+MMED" & raw$Extension=="No"]/0.88
#Get rid of any blanks
raw <- raw[!apply(raw,1,function(x)return(sum(is.na(x)))),]
Q_ik <- raw[,c('Top20m_Temp','Top20m_Salinity')] #rep(1,nrow(raw))
for(i in 1:ncol(Q_ik)){
Q_ik[is.na(Q_ik[,i]),i] <- mean(na.omit(Q_ik[,i]))
Q_ik[,i] <- (Q_ik[,i]-mean(Q_ik[,i]))/sd(Q_ik[,i])
}
if(mySurvey==1){
if(q_d==1){
Q_ik[,ncol(Q_ik)+1] <- 0
Q_ik[raw$Survey=="JSOES",ncol(Q_ik)] <- 1
}
if(q_d==0){
Q_ik <- rep(0,nrow(raw))
Q_ik[raw$Survey=="JSOES"] <- 1
}
}
#Decisions for VAST analysis. This follows the structure of Thorson (2019)
#1)A multi region input looks like this
strata.limits <- data.frame(
'STRATA' = c("Coastwide","CA","OR","WA"),
'north_border' = c(49.0, 42.0, 46.0, 49.0),
'south_border' = c(37.0, 37.0, 42.0, 46.0)
)
#2) Single size class for now
c_iz <- rep(0,dim(raw)[1]) #This needs to be numeric starting at 0
# c_iz <- as.numeric(raw$Survey)-1
#3) CPUE for now must change it to numbers
b_i <- raw$catch
Mesh.Method <- "samples" #mesh
grid_size_km <- 100
n_x <- 175 #number of knots. This is really important. To few and the model won't converge, too many and it will take forever.
Kmeans_Config = list( "randomseed"=1, "nstart"=100, "iter.max"=1e1 )
Aniso <- FALSE #isotropic
#10) Area offsets
a_i <- raw$TrawlDist_km*0.02 #distance times net width, see Cheryl's email from Aug. 10th.
#12)The observation model is
ObsModel = c(2,0) # Distribution for data, and link-function for linear predictors
FieldConfig <- rep(1,4)
RhoConfig <- c("Beta1" = k #Temporal corr. encounter covariate intercepts
,"Beta2" = k #Temporal corr. for positive catch covariate intercepts
,"Epsilon1"= k2 #Temporal corr. for encounter probability intercepts
,"Epsilon2" = k2) #Temporal corr. for positive catch intercepts
settings <- make_settings(
n_x = n_x
,Region = "california_current"
,purpose = "index"
,strata.limits = strata.limits
,FieldConfig = FieldConfig
,RhoConfig = RhoConfig
# ,OverdispersionConfig = OverdispersionConfig
,ObsModel = ObsModel
,knot_method = "samples"
,bias.correct = FALSE
# ,Options = Options
)
if(q_d==0 & mySurvey==0){
fit <- tryCatch(fit_model(settings = settings
,Lat_i = raw$Lat
,Lon_i = raw$Lon
,t_i = raw$Year
,b_i = b_i #Number of squid captured.
,a_i = a_i
,silent = FALSE
,getsd=FALSE
),error=function(e) NULL)
}
if(q_d==1 | mySurvey==1){
fit <- tryCatch(fit_model(settings = settings
,Lat_i = raw$Lat
,Lon_i = raw$Lon
,t_i = raw$Year
,b_i = b_i #Number of squid captured.
,a_i = a_i
,silent = FALSE
,Q_ik = as.matrix(Q_ik)
,getsd = FALSE
),error=function(e) NULL)
}
if(!is.null(fit$parameter_estimates$AIC))
AICoutput[icnt,] <- c(fit$parameter_estimates$AIC,mySurvey,q_d,k,k2)
if(is.null(fit$parameter_estimates$AIC))
AICoutput[icnt,] <- c(NA,mySurvey,q_d,k,k2)
icnt <- icnt + 1
}
}
}
}
# save(AICoutput,file="log_AICoutput.rData")
| /Model_AIC.r | no_license | bchasco/squid | R | false | false | 5,352 | r | library(TMB)
library(VAST)
AICoutput <- data.frame(AIC=NA,survey=NA,q=NA,k=NA,k2=NA)
#Initial the parameter lists for the models you are testing
ParHatList <- list()
icnt <- 1
for(mySurvey in c(0,1)){ #If 1 then make and offset for survey type
for(q_d in c(0,1)){
for(k in c(0,1,2,3)){
for(k2 in c(1)){
#Data
raw <- read.csv("Update_Comb_Catch_wTrawlDist_flat.csv")
# if(subDat!="All"){
# raw <- raw[raw$Survey==subDat,]
# }
#Adjust by wainright paper for JSOES
raw$catch[raw$Gear=="264NRT+MMED_Down" & raw$Extension=="No"] <- raw$catch[raw$Gear=="264NRT+MMED_Down" & raw$Extension=="No"]/0.48
raw$catch[raw$Gear=="264NRT+MMED_Up" & raw$Extension=="No"] <- raw$catch[raw$Gear=="264NRT+MMED_Up" & raw$Extension=="No"]/0.88
#Adjustments for SWFSC - REad Cheryl's email from 9/15/2020
raw$catch[raw$Gear=="264NRT+MMED modified" & raw$Extension=="No"] <- raw$catch[raw$Gear=="264NRT+MMED modified" & raw$Extension=="No"]/0.48
raw$catch[raw$Gear=="264NRT+MMED" & raw$Extension=="No"] <- raw$catch[raw$Gear=="264NRT+MMED" & raw$Extension=="No"]/0.88
#Get rid of any blanks
raw <- raw[!apply(raw,1,function(x)return(sum(is.na(x)))),]
Q_ik <- raw[,c('Top20m_Temp','Top20m_Salinity')] #rep(1,nrow(raw))
for(i in 1:ncol(Q_ik)){
Q_ik[is.na(Q_ik[,i]),i] <- mean(na.omit(Q_ik[,i]))
Q_ik[,i] <- (Q_ik[,i]-mean(Q_ik[,i]))/sd(Q_ik[,i])
}
if(mySurvey==1){
if(q_d==1){
Q_ik[,ncol(Q_ik)+1] <- 0
Q_ik[raw$Survey=="JSOES",ncol(Q_ik)] <- 1
}
if(q_d==0){
Q_ik <- rep(0,nrow(raw))
Q_ik[raw$Survey=="JSOES"] <- 1
}
}
#Decisions for VAST analysis. This follows the structure of Thorson (2019)
#1)A multi region input looks like this
strata.limits <- data.frame(
'STRATA' = c("Coastwide","CA","OR","WA"),
'north_border' = c(49.0, 42.0, 46.0, 49.0),
'south_border' = c(37.0, 37.0, 42.0, 46.0)
)
#2) Single size class for now
c_iz <- rep(0,dim(raw)[1]) #This needs to be numeric starting at 0
# c_iz <- as.numeric(raw$Survey)-1
#3) CPUE for now must change it to numbers
b_i <- raw$catch
Mesh.Method <- "samples" #mesh
grid_size_km <- 100
n_x <- 175 #number of knots. This is really important. To few and the model won't converge, too many and it will take forever.
Kmeans_Config = list( "randomseed"=1, "nstart"=100, "iter.max"=1e1 )
Aniso <- FALSE #isotropic
#10) Area offsets
a_i <- raw$TrawlDist_km*0.02 #distance times net width, see Cheryl's email from Aug. 10th.
#12)The observation model is
ObsModel = c(2,0) # Distribution for data, and link-function for linear predictors
FieldConfig <- rep(1,4)
RhoConfig <- c("Beta1" = k #Temporal corr. encounter covariate intercepts
,"Beta2" = k #Temporal corr. for positive catch covariate intercepts
,"Epsilon1"= k2 #Temporal corr. for encounter probability intercepts
,"Epsilon2" = k2) #Temporal corr. for positive catch intercepts
settings <- make_settings(
n_x = n_x
,Region = "california_current"
,purpose = "index"
,strata.limits = strata.limits
,FieldConfig = FieldConfig
,RhoConfig = RhoConfig
# ,OverdispersionConfig = OverdispersionConfig
,ObsModel = ObsModel
,knot_method = "samples"
,bias.correct = FALSE
# ,Options = Options
)
if(q_d==0 & mySurvey==0){
fit <- tryCatch(fit_model(settings = settings
,Lat_i = raw$Lat
,Lon_i = raw$Lon
,t_i = raw$Year
,b_i = b_i #Number of squid captured.
,a_i = a_i
,silent = FALSE
,getsd=FALSE
),error=function(e) NULL)
}
if(q_d==1 | mySurvey==1){
fit <- tryCatch(fit_model(settings = settings
,Lat_i = raw$Lat
,Lon_i = raw$Lon
,t_i = raw$Year
,b_i = b_i #Number of squid captured.
,a_i = a_i
,silent = FALSE
,Q_ik = as.matrix(Q_ik)
,getsd = FALSE
),error=function(e) NULL)
}
if(!is.null(fit$parameter_estimates$AIC))
AICoutput[icnt,] <- c(fit$parameter_estimates$AIC,mySurvey,q_d,k,k2)
if(is.null(fit$parameter_estimates$AIC))
AICoutput[icnt,] <- c(NA,mySurvey,q_d,k,k2)
icnt <- icnt + 1
}
}
}
}
# save(AICoutput,file="log_AICoutput.rData")
|
library(tidyverse)
library(pdftools)
# load utility
source("R/utility.R")
# Specify FILE
FILE <- "20200417-sitrep-88-covid-191b6cccd94f8b4f219377bff55719a6ed.pdf"
DATE <- as.Date(
str_c(str_sub(FILE, 1L, 4L), "-",
str_sub(FILE, 5L, 6L), "-",
str_sub(FILE, 7L, 8L))
)
# Extract text
sr <- pdf_text(str_c("pdf/", FILE))
str(sr)
# Split into lines
lines <- sr %>%
make_lines()
# Table 1. or 2.
table_start2 <- str_which(lines, "^Table \\d. Countries")
table_end2 <- str_which(lines, "Grand total")
lines_table2 <- lines[table_start2:table_end2] %>%
remove_dis_char() %>%
str_remove_all("†") %>%
stringi::stri_trans_general("latin-ascii")
pattern <- "^\\s*([a-zA-z\\(\\),]+[^a-zA-z\\(\\),])*\\s*(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+[a-zA-z]+"
df_table2 <- read_chr_vec3(lines_table2, pattern = pattern)
# less than 2074529 by 712
# must be "International conveyance (Diamond Princess)
df_table2 %>%
summarize(total = sum(cum_conf))
df_table2 <- bind_rows(df_table2,
tibble(
area = "International conveyance (Diamond Princess)",
cum_conf = 712,
new_conf = 0,
cum_deaths = 13,
new_deaths = 1
)
)
# correct long area names
df_table2 <- df_table2 %>%
correct_area()
df_table2$area
# Manually correct area names
# Democratic Republic: Laos and DRC
df_table2[(df_table2$area == "Democratic Republic"), ]
df_table2[(df_table2$area == "Democratic Republic"), "area"] <-
"Laos"
# df_table2[(df_table2$area == "the)"), ]
# df_table2[(df_table2$area == "the)"), "area"] <- "Northern Mariana Islands"
df_table2[(df_table2$area == ""), ]
df_table2[(df_table2$area == ""), "area"] <- "Kosovo"
df_table2[(df_table2$area == "of)"), ]
df_table2[(df_table2$area == "of)"), "area"] <- "Iran"
df_table2[(df_table2$area == "State of)"), ]
df_table2[(df_table2$area == "State of)"), "area"] <- "Bolivia"
df_table2[(df_table2$area == "Republic of)"), ]
df_table2[(df_table2$area == "Republic of)"), "area"] <- "Venezuela"
df_table2[(df_table2$area == "Islands"), ]
df_table2[(df_table2$area == "Islands"), "area"] <-
c("U.S. Virgin Islands", "Turks and Caicos Islands")
df_table2[(df_table2$area == "and Saba"), ]
df_table2[(df_table2$area == "and Saba"), "area"] <- "Bonaire, Sint Eustatius and Saba"
df_table2[(df_table2$area == "(Malvinas)"), ]
df_table2[(df_table2$area == "(Malvinas)"), "area"] <- "Falkland Islands"
df_table2[(df_table2$area == "Miquelon"), ]
df_table2[(df_table2$area == "Miquelon"), "area"] <- "Saint Pierre and Miquelon"
df_table2[(df_table2$area == "Principe"), ]
df_table2[(df_table2$area == "Principe"), "area"] <- "Sao Tome and Principe"
# add publish_date
df_table2 <- df_table2 %>%
select(area, new_conf, new_deaths, cum_conf, cum_deaths) %>%
mutate(publish_date = DATE)
# df_table1
df_table1 <- df_table2 %>%
filter(area == "China") %>%
rename(region = area)
df_table1[df_table1$region == "China", "region"] <- "Total"
# load
load("data/tables.rdata")
# check new entry
length(unique(table2$area))
length(df_table2$area)
setdiff(df_table2$area, unique(table2$area)) # Yemen
# merge table1 and table2
table1 <- bind_rows(table1, df_table1)
table2 <- bind_rows(table2, df_table2)
table1 <- table1 %>%
select(publish_date, region, new_conf, new_deaths, cum_conf, cum_deaths)
table2 <- table2 %>%
select(publish_date, area, new_conf, new_deaths, cum_conf, cum_deaths)
# update area category
df_table2$area
area_cat <- tibble(
area = df_table2$area,
cat = c(
rep("China", 1),
rep("South East Asia, excl China", 18),
rep("Europe", 60),
rep("South East Asia, excl China", 10),
rep("Eastern Mediterranean", 22),
rep("Americas", 54),
rep("Sub-Saharan Africa", 47),
rep("International conveyance", 1)
)
)
# check
length(unique(table2$area))
length(unique(area_cat$area))
setdiff(unique(table2$area), unique(area_cat$area))
# save
table1 %>%
write.csv("data/table1.csv", row.names = FALSE)
table2 %>%
write.csv("data/table2.csv", row.names = FALSE)
save(table1, table2, area_cat, file = "data/tables.rdata")
| /old-codes/update_sr_88.R | permissive | mitsuoxv/covid | R | false | false | 4,229 | r | library(tidyverse)
library(pdftools)
# load utility
source("R/utility.R")
# Specify FILE
FILE <- "20200417-sitrep-88-covid-191b6cccd94f8b4f219377bff55719a6ed.pdf"
DATE <- as.Date(
str_c(str_sub(FILE, 1L, 4L), "-",
str_sub(FILE, 5L, 6L), "-",
str_sub(FILE, 7L, 8L))
)
# Extract text
sr <- pdf_text(str_c("pdf/", FILE))
str(sr)
# Split into lines
lines <- sr %>%
make_lines()
# Table 1. or 2.
table_start2 <- str_which(lines, "^Table \\d. Countries")
table_end2 <- str_which(lines, "Grand total")
lines_table2 <- lines[table_start2:table_end2] %>%
remove_dis_char() %>%
str_remove_all("†") %>%
stringi::stri_trans_general("latin-ascii")
pattern <- "^\\s*([a-zA-z\\(\\),]+[^a-zA-z\\(\\),])*\\s*(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+[a-zA-z]+"
df_table2 <- read_chr_vec3(lines_table2, pattern = pattern)
# less than 2074529 by 712
# must be "International conveyance (Diamond Princess)
df_table2 %>%
summarize(total = sum(cum_conf))
df_table2 <- bind_rows(df_table2,
tibble(
area = "International conveyance (Diamond Princess)",
cum_conf = 712,
new_conf = 0,
cum_deaths = 13,
new_deaths = 1
)
)
# correct long area names
df_table2 <- df_table2 %>%
correct_area()
df_table2$area
# Manually correct area names
# Democratic Republic: Laos and DRC
df_table2[(df_table2$area == "Democratic Republic"), ]
df_table2[(df_table2$area == "Democratic Republic"), "area"] <-
"Laos"
# df_table2[(df_table2$area == "the)"), ]
# df_table2[(df_table2$area == "the)"), "area"] <- "Northern Mariana Islands"
df_table2[(df_table2$area == ""), ]
df_table2[(df_table2$area == ""), "area"] <- "Kosovo"
df_table2[(df_table2$area == "of)"), ]
df_table2[(df_table2$area == "of)"), "area"] <- "Iran"
df_table2[(df_table2$area == "State of)"), ]
df_table2[(df_table2$area == "State of)"), "area"] <- "Bolivia"
df_table2[(df_table2$area == "Republic of)"), ]
df_table2[(df_table2$area == "Republic of)"), "area"] <- "Venezuela"
df_table2[(df_table2$area == "Islands"), ]
df_table2[(df_table2$area == "Islands"), "area"] <-
c("U.S. Virgin Islands", "Turks and Caicos Islands")
df_table2[(df_table2$area == "and Saba"), ]
df_table2[(df_table2$area == "and Saba"), "area"] <- "Bonaire, Sint Eustatius and Saba"
df_table2[(df_table2$area == "(Malvinas)"), ]
df_table2[(df_table2$area == "(Malvinas)"), "area"] <- "Falkland Islands"
df_table2[(df_table2$area == "Miquelon"), ]
df_table2[(df_table2$area == "Miquelon"), "area"] <- "Saint Pierre and Miquelon"
df_table2[(df_table2$area == "Principe"), ]
df_table2[(df_table2$area == "Principe"), "area"] <- "Sao Tome and Principe"
# add publish_date
df_table2 <- df_table2 %>%
select(area, new_conf, new_deaths, cum_conf, cum_deaths) %>%
mutate(publish_date = DATE)
# df_table1
df_table1 <- df_table2 %>%
filter(area == "China") %>%
rename(region = area)
df_table1[df_table1$region == "China", "region"] <- "Total"
# load
load("data/tables.rdata")
# check new entry
length(unique(table2$area))
length(df_table2$area)
setdiff(df_table2$area, unique(table2$area)) # Yemen
# merge table1 and table2
table1 <- bind_rows(table1, df_table1)
table2 <- bind_rows(table2, df_table2)
table1 <- table1 %>%
select(publish_date, region, new_conf, new_deaths, cum_conf, cum_deaths)
table2 <- table2 %>%
select(publish_date, area, new_conf, new_deaths, cum_conf, cum_deaths)
# update area category
df_table2$area
area_cat <- tibble(
area = df_table2$area,
cat = c(
rep("China", 1),
rep("South East Asia, excl China", 18),
rep("Europe", 60),
rep("South East Asia, excl China", 10),
rep("Eastern Mediterranean", 22),
rep("Americas", 54),
rep("Sub-Saharan Africa", 47),
rep("International conveyance", 1)
)
)
# check
length(unique(table2$area))
length(unique(area_cat$area))
setdiff(unique(table2$area), unique(area_cat$area))
# save
table1 %>%
write.csv("data/table1.csv", row.names = FALSE)
table2 %>%
write.csv("data/table2.csv", row.names = FALSE)
save(table1, table2, area_cat, file = "data/tables.rdata")
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#function makeCacheMatrix creates a list of values: it sets the matrix,
#gets the matrix, sets the inverted matrix, and gets the inverted matrix
makeCacheMatrix <- function(x = matrix()) {
#sets m to NULL
m <- NULL
#sets the global value of x to y, and m to NULL
setmatrix <- function(y) {
x <<- y
m <<- NULL
}
#function that gets the value of matrix x
getmatrix <- function() x
#function that sets the global value of m to inv (inverse)
setinv <- function(inv) m <<- inv
#function that gets the value of inverse of the matrix x
getinv <- function() m
list(setmatrix = setmatrix, getmatrix = getmatrix,
setinv = setinv, getinv = getinv)
}
#The function checks if the x has already a stored inverted matrix
#if x contains the inverted matrix, it returns the value
#if x does not contain the inverted matrix, the function calculates it
#via the solve function, tores the calculation, and prints it
cacheSolve <- function(x, ...) {
#checking if there is a non-null value of inverse of the matrix x
m <- x$getinv()
if(!is.null(m)){
message('Checking cached data')
#there was a non-null value and the function returns it
return(m)
}
#the function set the value of 'data' to the matrix x, stored in getmatix
data <- x$getmatrix()
#setting the value of m to the inverted matrix stored in 'data'
m <- solve(data, ...)
#setinv value is set to m
x$setinv(m)
#returning the inverted matrix
m
}
| /cachematrix.R | no_license | Dorota-D/ProgrammingAssignment2 | R | false | false | 1,817 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#function makeCacheMatrix creates a list of values: it sets the matrix,
#gets the matrix, sets the inverted matrix, and gets the inverted matrix
makeCacheMatrix <- function(x = matrix()) {
#sets m to NULL
m <- NULL
#sets the global value of x to y, and m to NULL
setmatrix <- function(y) {
x <<- y
m <<- NULL
}
#function that gets the value of matrix x
getmatrix <- function() x
#function that sets the global value of m to inv (inverse)
setinv <- function(inv) m <<- inv
#function that gets the value of inverse of the matrix x
getinv <- function() m
list(setmatrix = setmatrix, getmatrix = getmatrix,
setinv = setinv, getinv = getinv)
}
#The function checks if the x has already a stored inverted matrix
#if x contains the inverted matrix, it returns the value
#if x does not contain the inverted matrix, the function calculates it
#via the solve function, tores the calculation, and prints it
cacheSolve <- function(x, ...) {
#checking if there is a non-null value of inverse of the matrix x
m <- x$getinv()
if(!is.null(m)){
message('Checking cached data')
#there was a non-null value and the function returns it
return(m)
}
#the function set the value of 'data' to the matrix x, stored in getmatix
data <- x$getmatrix()
#setting the value of m to the inverted matrix stored in 'data'
m <- solve(data, ...)
#setinv value is set to m
x$setinv(m)
#returning the inverted matrix
m
}
|
A <- matrix ( c(1, 2, -1,
-1, 3, -1,
2, 2, 4), nrow=3, byrow=TRUE)
B <- matrix ( c(3, 2, -1,
2, 3, 1,
-1, 1,3),nrow=3, byrow=TRUE)
C <- matrix ( c(2, 0,
-1, 1,
3, 2), nrow=3, byrow=TRUE)
# Matrix A
A
#Matrix B
B
# Matrix C
C
#1a
A + B
#1b
A - 2*B
#1c
t(A) + B
# 1d
A + C
# 1e
t(A + B)
#1f
t(3*t(A) - 2*B)
#2
P <- matrix ( c(3, 2, 1,
2, 5, -1,
1, -1, 3), nrow=3, byrow = TRUE)
Q <- matrix ( c(1, 2, 2, 1,
1, 1, 1, 4,
1, 1, 2, 1), nrow=3,byrow = TRUE)
R <- matrix( c(1, 2,
-5, 2,
3, -1,
-2, 2), nrow=4, byrow = TRUE)
x <- c(1, 0, -1)
y <- c(2, 3, 2)
z <- c(-1, -2, -3, -4)
#2a
P*Q
#2b
P*Q*R
#2c
Q*t(R)
#2d
y*t(x)
#2e
t(x)*y
#2f
t(x)*P*y
#2h
P*(x + y)
#5
a <- matrix ( c(4, 2, 0,
5, 3, 0,
6, 9, 2), nrow=3, byrow=TRUE)
b <- matrix ( c( 1, 0.8, 0.5,
0.8, 1, 0.6,
0.5, 0.6, 1), nrow=3, byrow=TRUE)
c <- matrix ( c(5, 0, 0,
0, 3, 0,
0, 0, 1), nrow=3, byrow=TRUE)
d <- matrix ( c(1, 4, -1,
3, 12, -3,
0, 35, 7), nrow=3, byrow=TRUE)
e <- matrix ( c(2, 0, 4, 0,
0, 3, 0, 5,
5, 0, 1, 0,
0, 4, 0, 1), nrow=4, byrow=TRUE)
f <- matrix ( c(2, 0, 1, 1, 1,
0, 2, 3, 3, 3,
1, 3, 1, 0, 0,
1, 3, 0, 1, 0,
1, 3, 0, 0, 1), nrow=5, byrow=TRUE)
#5a
det(a)
#5b
det(b)
#5c
det(c)
#5d
det(d)
#5e
det(e)
#5f
det(f)
#6
seis_a <- matrix ( c(5, 1, -2,
2, 6, 3,
-1, 0, 3), nrow=3, byrow=TRUE)
seis_b <- matrix ( c("a", "b", "b",
"b", "a", "b",
"b", "b", "a"), nrow=3, byrow=TRUE)
seis_c <- matrix ( c(5, 0, 0,
0, 8, 6,
0, 6, 5), nrow=3, byrow=TRUE)
seis_d <- matrix ( c(4, 3, 2, 1,
0, 3, 2, 1,
0, 0, 2, 1,
0, 0, 0, 1), nrow=4, byrow=TRUE)
#6a
solve(seis_a)
#6b
solve(seis_b)
#6c
solve(seis_c)
#6d
solve(seis_d)
#7
siete_a <- matrix ( c(1, 0, 2, 1,
1, 1, 2, 0,
1, -1, 2, 2,
1, 1, 2, 0), nrow=4, byrow=TRUE)
siete_b <- matrix ( c( 1, 2, 3, 4, 5,
1, 0, -1, 3, 1,
2, 1, 1, 0, 1,
0, 3, 8, -5, 3,
-1, 2, 6, -2, 3,
1, 1, 2, -3, 0), nrow=6, byrow=TRUE)
#7a
siete_a <- qr(siete_a)
siete_a$rank
#7b
siete_b <- qr(siete_b)
siete_b$rank
#12
magnitude.vector <- function (vector){
magnitude <- sqrt(vector[1]**2 + vector[2]**2 + vector[3]**2 + vector[4]**2)
return (magnitude)
}
t <- c(0.5, 0.5, 0.5, 0.5)
u <- c(1, 0, -1, 0)
v <- c(sqrt(2)/2, 0, sqrt (2)/2, 0)
if (t%*%u == 0){
magnitude_t <- magnitude.vector(t)
magnitude_u <- magnitude.vector(u)
normal_t <- t/magnitude_t
normal_u <- u/magnitude_u
cat("t y u son ortogonales y su vectores ortonormales son normal_t " , normal_t ," y normal_u", normal_u)
}else{
"t y u NO son ortogonales"
}
if (t%*%v == 0){
"t y v son ortogonales"
2+2
}else{
"t y v NO son ortogonales"
}
if (u%*%v == 0){
"u y v son ortogonales"
}else{
"t y u NO son ortogonales"
}
#13
Trece <- matrix ( c( sqrt(3)/2, 1/2, 0,
-sqrt(2)/2, sqrt(6)/4, -sqrt(2)/2,
-sqrt(2)/2, sqrt(6)/4, sqrt(2)/2), nrow=3, byrow=TRUE)
Trece
| /Libro capitulo 1/Tarea 1/Tarea1.R | no_license | YosefGuevara012/Multivariable-analysis | R | false | false | 3,746 | r |
A <- matrix ( c(1, 2, -1,
-1, 3, -1,
2, 2, 4), nrow=3, byrow=TRUE)
B <- matrix ( c(3, 2, -1,
2, 3, 1,
-1, 1,3),nrow=3, byrow=TRUE)
C <- matrix ( c(2, 0,
-1, 1,
3, 2), nrow=3, byrow=TRUE)
# Matrix A
A
#Matrix B
B
# Matrix C
C
#1a
A + B
#1b
A - 2*B
#1c
t(A) + B
# 1d
A + C
# 1e
t(A + B)
#1f
t(3*t(A) - 2*B)
#2
P <- matrix ( c(3, 2, 1,
2, 5, -1,
1, -1, 3), nrow=3, byrow = TRUE)
Q <- matrix ( c(1, 2, 2, 1,
1, 1, 1, 4,
1, 1, 2, 1), nrow=3,byrow = TRUE)
R <- matrix( c(1, 2,
-5, 2,
3, -1,
-2, 2), nrow=4, byrow = TRUE)
x <- c(1, 0, -1)
y <- c(2, 3, 2)
z <- c(-1, -2, -3, -4)
#2a
P*Q
#2b
P*Q*R
#2c
Q*t(R)
#2d
y*t(x)
#2e
t(x)*y
#2f
t(x)*P*y
#2h
P*(x + y)
#5
a <- matrix ( c(4, 2, 0,
5, 3, 0,
6, 9, 2), nrow=3, byrow=TRUE)
b <- matrix ( c( 1, 0.8, 0.5,
0.8, 1, 0.6,
0.5, 0.6, 1), nrow=3, byrow=TRUE)
c <- matrix ( c(5, 0, 0,
0, 3, 0,
0, 0, 1), nrow=3, byrow=TRUE)
d <- matrix ( c(1, 4, -1,
3, 12, -3,
0, 35, 7), nrow=3, byrow=TRUE)
e <- matrix ( c(2, 0, 4, 0,
0, 3, 0, 5,
5, 0, 1, 0,
0, 4, 0, 1), nrow=4, byrow=TRUE)
f <- matrix ( c(2, 0, 1, 1, 1,
0, 2, 3, 3, 3,
1, 3, 1, 0, 0,
1, 3, 0, 1, 0,
1, 3, 0, 0, 1), nrow=5, byrow=TRUE)
#5a
det(a)
#5b
det(b)
#5c
det(c)
#5d
det(d)
#5e
det(e)
#5f
det(f)
#6
seis_a <- matrix ( c(5, 1, -2,
2, 6, 3,
-1, 0, 3), nrow=3, byrow=TRUE)
seis_b <- matrix ( c("a", "b", "b",
"b", "a", "b",
"b", "b", "a"), nrow=3, byrow=TRUE)
seis_c <- matrix ( c(5, 0, 0,
0, 8, 6,
0, 6, 5), nrow=3, byrow=TRUE)
seis_d <- matrix ( c(4, 3, 2, 1,
0, 3, 2, 1,
0, 0, 2, 1,
0, 0, 0, 1), nrow=4, byrow=TRUE)
#6a
solve(seis_a)
#6b
solve(seis_b)
#6c
solve(seis_c)
#6d
solve(seis_d)
#7
siete_a <- matrix ( c(1, 0, 2, 1,
1, 1, 2, 0,
1, -1, 2, 2,
1, 1, 2, 0), nrow=4, byrow=TRUE)
siete_b <- matrix ( c( 1, 2, 3, 4, 5,
1, 0, -1, 3, 1,
2, 1, 1, 0, 1,
0, 3, 8, -5, 3,
-1, 2, 6, -2, 3,
1, 1, 2, -3, 0), nrow=6, byrow=TRUE)
#7a
siete_a <- qr(siete_a)
siete_a$rank
#7b
siete_b <- qr(siete_b)
siete_b$rank
#12
magnitude.vector <- function (vector){
magnitude <- sqrt(vector[1]**2 + vector[2]**2 + vector[3]**2 + vector[4]**2)
return (magnitude)
}
t <- c(0.5, 0.5, 0.5, 0.5)
u <- c(1, 0, -1, 0)
v <- c(sqrt(2)/2, 0, sqrt (2)/2, 0)
if (t%*%u == 0){
magnitude_t <- magnitude.vector(t)
magnitude_u <- magnitude.vector(u)
normal_t <- t/magnitude_t
normal_u <- u/magnitude_u
cat("t y u son ortogonales y su vectores ortonormales son normal_t " , normal_t ," y normal_u", normal_u)
}else{
"t y u NO son ortogonales"
}
if (t%*%v == 0){
"t y v son ortogonales"
2+2
}else{
"t y v NO son ortogonales"
}
if (u%*%v == 0){
"u y v son ortogonales"
}else{
"t y u NO son ortogonales"
}
#13
Trece <- matrix ( c( sqrt(3)/2, 1/2, 0,
-sqrt(2)/2, sqrt(6)/4, -sqrt(2)/2,
-sqrt(2)/2, sqrt(6)/4, sqrt(2)/2), nrow=3, byrow=TRUE)
Trece
|
#' returns a phylogenetic tree painted for ouwie and data
#'
#' @param phy a phylogenetic tree of class phylo (ape)
#' @param clades: a list of clades defined by species pairs,
#' e.g. list(c(sp1, sp2), c(sp3, sp4)), or just a vector c(sp1, sp2)
#' the descendents of the MRCA of sp1 and sp2 define the regime
#' later clades are painted over earlier ones, so should be in
#' order of nesting or otherwise non-overlapping.
#' @data the data.frame of characters (see data in ?treedata)
#' @show_plot logical, should I plot the resulting tree
#' @return a list with the phy, updated to have regimes at node labels
#' (for OUwie) and the data matrix formatted with regimes, for OUwie,
#' and a list of colors corresponding to edges, that can be used for plotting.
#' @import ape
#' @import geiger
#' @export
paint_phy <- function(phy, data, clades, show_plot=TRUE){
# drop unmatched tips & characters
# may produce difficulties on already-dropped trees?
pruned <- treedata(phy, data)
phy <- pruned$phy
data <- pruned$data
# one regime or multiple?
if(is.list(clades)){
sp1 <- sapply(clades, function(x) x[1])
sp2 <- sapply(clades, function(x) x[2])
n <- length(clades)
} else if(is.character(clades)){
sp1 <- clades[1]
sp2 <- clades[2]
n <- 1
} else {
error("clades input not recognized")
}
desc <- vector("list", length=n)
regimes <- rep(0, length(phy$edge[,1]))
colors <- rep(1, length(phy$edge[,1]))
for(i in 1:n){
# Create the painting
desc[[i]] <- get_clade(phy, sp1[i], sp2[i])
regimes[desc[[i]]] <- i
colors[desc[[i]]] <- i+1
}
# pick out tips and nodes
tips <- !(phy$edge[,2] %in% phy$edge[,1])
regime_tips <- regimes[tips]
regime_nodes <- c(0, regimes[!tips]) # adds root back on
# stick them on the tree
phy$node.label <- as.character(regime_nodes)
Reg <- data.frame(Reg=as.integer(regime_tips),
row.names=phy$tip.label)
data <- merge(Reg, as.data.frame(data), by="row.names")
names(data)[1] <- "Genus_species"
# optionally plot the tree to verify the painting
if(show_plot){
plot(phy, edge.color=colors)
nodelabels(pch=21, bg=c("black", colors[!tips]))
tiplabels(pch=21, bg=colors[tips])
}
list(phy=phy, data=data, colors=colors)
}
#' A function that returns the edge numbers of all branches
#' descended from the mrca of sp1 and sp2.
#' @param phy a phylogenetic tree of class phylo (ape)
#' @param sp1 the tip.label of the 1st species
#' @param sp1 the tip.label of the 2nd species
#' the descendents of the MRCA of sp1 and sp2 define the regime
get_clade <- function(phy, sp1, sp2){
M <- mrca(phy)
who <- M[sp1, sp2]
desc <- which(phy$edge[,1] %in% who)
l1 <- length(desc)
l2 <- length(desc)+1
while(l1!=l2){
l1 <- length(desc)
who <- phy$edge[desc, 2]
desc <- unique(c(desc, which(phy$edge[,1] %in% who)))
l2 <- length(desc)
}
desc
}
| /demo/method2/method2_tools.R | permissive | cboettig/wrightscape | R | false | false | 2,933 | r |
#' returns a phylogenetic tree painted for ouwie and data
#'
#' @param phy a phylogenetic tree of class phylo (ape)
#' @param clades: a list of clades defined by species pairs,
#' e.g. list(c(sp1, sp2), c(sp3, sp4)), or just a vector c(sp1, sp2)
#' the descendents of the MRCA of sp1 and sp2 define the regime
#' later clades are painted over earlier ones, so should be in
#' order of nesting or otherwise non-overlapping.
#' @data the data.frame of characters (see data in ?treedata)
#' @show_plot logical, should I plot the resulting tree
#' @return a list with the phy, updated to have regimes at node labels
#' (for OUwie) and the data matrix formatted with regimes, for OUwie,
#' and a list of colors corresponding to edges, that can be used for plotting.
#' @import ape
#' @import geiger
#' @export
paint_phy <- function(phy, data, clades, show_plot=TRUE){
# drop unmatched tips & characters
# may produce difficulties on already-dropped trees?
pruned <- treedata(phy, data)
phy <- pruned$phy
data <- pruned$data
# one regime or multiple?
if(is.list(clades)){
sp1 <- sapply(clades, function(x) x[1])
sp2 <- sapply(clades, function(x) x[2])
n <- length(clades)
} else if(is.character(clades)){
sp1 <- clades[1]
sp2 <- clades[2]
n <- 1
} else {
error("clades input not recognized")
}
desc <- vector("list", length=n)
regimes <- rep(0, length(phy$edge[,1]))
colors <- rep(1, length(phy$edge[,1]))
for(i in 1:n){
# Create the painting
desc[[i]] <- get_clade(phy, sp1[i], sp2[i])
regimes[desc[[i]]] <- i
colors[desc[[i]]] <- i+1
}
# pick out tips and nodes
tips <- !(phy$edge[,2] %in% phy$edge[,1])
regime_tips <- regimes[tips]
regime_nodes <- c(0, regimes[!tips]) # adds root back on
# stick them on the tree
phy$node.label <- as.character(regime_nodes)
Reg <- data.frame(Reg=as.integer(regime_tips),
row.names=phy$tip.label)
data <- merge(Reg, as.data.frame(data), by="row.names")
names(data)[1] <- "Genus_species"
# optionally plot the tree to verify the painting
if(show_plot){
plot(phy, edge.color=colors)
nodelabels(pch=21, bg=c("black", colors[!tips]))
tiplabels(pch=21, bg=colors[tips])
}
list(phy=phy, data=data, colors=colors)
}
#' A function that returns the edge numbers of all branches
#' descended from the mrca of sp1 and sp2.
#' @param phy a phylogenetic tree of class phylo (ape)
#' @param sp1 the tip.label of the 1st species
#' @param sp1 the tip.label of the 2nd species
#' the descendents of the MRCA of sp1 and sp2 define the regime
get_clade <- function(phy, sp1, sp2){
M <- mrca(phy)
who <- M[sp1, sp2]
desc <- which(phy$edge[,1] %in% who)
l1 <- length(desc)
l2 <- length(desc)+1
while(l1!=l2){
l1 <- length(desc)
who <- phy$edge[desc, 2]
desc <- unique(c(desc, which(phy$edge[,1] %in% who)))
l2 <- length(desc)
}
desc
}
|
WFO.browse <- function(
taxon=NULL, WFO.file=NULL, WFO.data=NULL,
accepted.only = FALSE, acceptedNameUsageID.match = TRUE, ...
)
{
if (is.null(WFO.data) == TRUE) {
message(paste("Reading WFO data"))
WFO.data <- data.table::fread(WFO.file, encoding="UTF-8")
}
# if nothing provided, then give list of all the families
rank.found1 <- as.logical(0)
if (is.null(taxon) == TRUE) {
rank.found <- "all"
rank.found1 <- as.logical(1)
# changed for World Flora Online DEC 2021 release
right.level <- (WFO.data$taxonRank == "family" |
WFO.data$taxonRank == "Family" |
WFO.data$taxonRank == "FAMILY")
result <- WFO.data[right.level, ]
result <- result[, c("taxonID", "scientificName", "scientificNameAuthorship", "taxonRank", "taxonomicStatus", "acceptedNameUsageID")]
result <- result[order(result$scientificName), ]
cat(paste("Results are a list of all families", "\n", sep=""))
}else{
WFO.found <- WFO.one(WFO.match(taxon, WFO.file=NULL, WFO.data=WFO.data, ...))
if (nrow(WFO.found) == 0) {stop("no matches found")}
taxon.found <- WFO.found$scientificName
rank.found <- WFO.found$taxonRank
cat(paste("Submitted name ", taxon, " was matched with: ", taxon.found, " of taxonRank: ", rank.found, "\n", sep=""))
}
if (rank.found %in% c("species", "Species", "SPECIES", "nothospecies")) {
rank.found1 <- as.logical(1)
browse.found <- WFO.data[WFO.data$genus==WFO.found$genus & WFO.data$specificEpithet==WFO.found$specificEpithet,]
right.level <- browse.found$verbatimTaxonRank != ""
browse.found1 <- browse.found[right.level, ]
browse.found <- browse.found1[order(browse.found1$scientificName), ]
result <- browse.found[, c("taxonID", "scientificName", "scientificNameAuthorship", "taxonRank", "taxonomicStatus", "acceptedNameUsageID")]
}
# changed for World Flora Online DEC 2021 release
if (rank.found %in% c("genus", "Genus", "GENUS")) {
rank.found1 <- as.logical(1)
browse.found <- WFO.data[WFO.data$genus==taxon.found,]
right.level <- (browse.found$taxonRank == "SPECIES" | browse.found$taxonRank == "Species" | browse.found$taxonRank == "species" | browse.found$taxonRank == "nothospecies")
browse.found1 <- browse.found[right.level, ]
browse.found <- browse.found1[order(browse.found1$scientificName), ]
result <- browse.found[, c("taxonID", "scientificName", "scientificNameAuthorship", "taxonRank", "taxonomicStatus", "acceptedNameUsageID")]
}
# changed for World Flora Online DEC 2021 release
if (rank.found %in% c("family", "Family", "FAMILY")) {
rank.found1 <- as.logical(1)
browse.found <- WFO.data[WFO.data$family==taxon.found,]
# changed for World Flora Online DEC 2021 release
right.level <- (browse.found$taxonRank == "genus" |
browse.found$taxonRank == "Genus" |
browse.found$taxonRank == "GENUS")
browse.found1 <- browse.found[right.level, ]
browse.found <- browse.found1[order(browse.found1$scientificName), ]
result <- browse.found[, c("taxonID", "scientificName", "scientificNameAuthorship", "taxonRank", "taxonomicStatus", "acceptedNameUsageID")]
}
# changed for World Flora Online DEC 2021 release
if (rank.found %in% c("order", "ORDER")) {
.WorldFlora <- new.env()
utils::data("vascular.families", package="WorldFlora", envir=.WorldFlora)
WFO.families <- eval(as.name("vascular.families"), envir=.WorldFlora)
order.match <- WFO.families[WFO.families$Order == taxon.found, ]
return(order.match)
}
if (rank.found1 == FALSE) {stop("Listings are only provided for families, genera and species")}
if (nrow(result) == 0) {
message("No taxa at the next level")
return(NULL)
}else{
if (accepted.only == TRUE) {
# changed for World Flora Online DEC 2021 release
result <- result[result$taxonomicStatus %in% c("Accepted", "ACCEPTED"), ]
}else{
if (acceptedNameUsageID.match == TRUE) {
result$New.name <- rep("", nrow(result))
for (i in 1:nrow(result)) {
UsageID <- as.character(result[i, "acceptedNameUsageID"])
if (UsageID != "") {
WFO.match1 <- WFO.data[WFO.data$taxonID == UsageID, ]
if (nrow(WFO.match1) == 0) {
warning(paste("WARNING: no data for ", UsageID, " from ", result[i, "scientificName"]))
}else if (nrow(WFO.match1) > 1) {
warning(paste("WARNING: more than 1 row of matches for ", UsageID, " from ", result[i, "scientificName"]))
}else{
result[i, "New.name"] <- WFO.match1[1, "scientificName"]
}
}
}
}
}
}
return(result)
}
| /R/WFO.browse.R | no_license | cran/WorldFlora | R | false | false | 5,250 | r | WFO.browse <- function(
taxon=NULL, WFO.file=NULL, WFO.data=NULL,
accepted.only = FALSE, acceptedNameUsageID.match = TRUE, ...
)
{
if (is.null(WFO.data) == TRUE) {
message(paste("Reading WFO data"))
WFO.data <- data.table::fread(WFO.file, encoding="UTF-8")
}
# if nothing provided, then give list of all the families
rank.found1 <- as.logical(0)
if (is.null(taxon) == TRUE) {
rank.found <- "all"
rank.found1 <- as.logical(1)
# changed for World Flora Online DEC 2021 release
right.level <- (WFO.data$taxonRank == "family" |
WFO.data$taxonRank == "Family" |
WFO.data$taxonRank == "FAMILY")
result <- WFO.data[right.level, ]
result <- result[, c("taxonID", "scientificName", "scientificNameAuthorship", "taxonRank", "taxonomicStatus", "acceptedNameUsageID")]
result <- result[order(result$scientificName), ]
cat(paste("Results are a list of all families", "\n", sep=""))
}else{
WFO.found <- WFO.one(WFO.match(taxon, WFO.file=NULL, WFO.data=WFO.data, ...))
if (nrow(WFO.found) == 0) {stop("no matches found")}
taxon.found <- WFO.found$scientificName
rank.found <- WFO.found$taxonRank
cat(paste("Submitted name ", taxon, " was matched with: ", taxon.found, " of taxonRank: ", rank.found, "\n", sep=""))
}
if (rank.found %in% c("species", "Species", "SPECIES", "nothospecies")) {
rank.found1 <- as.logical(1)
browse.found <- WFO.data[WFO.data$genus==WFO.found$genus & WFO.data$specificEpithet==WFO.found$specificEpithet,]
right.level <- browse.found$verbatimTaxonRank != ""
browse.found1 <- browse.found[right.level, ]
browse.found <- browse.found1[order(browse.found1$scientificName), ]
result <- browse.found[, c("taxonID", "scientificName", "scientificNameAuthorship", "taxonRank", "taxonomicStatus", "acceptedNameUsageID")]
}
# changed for World Flora Online DEC 2021 release
if (rank.found %in% c("genus", "Genus", "GENUS")) {
rank.found1 <- as.logical(1)
browse.found <- WFO.data[WFO.data$genus==taxon.found,]
right.level <- (browse.found$taxonRank == "SPECIES" | browse.found$taxonRank == "Species" | browse.found$taxonRank == "species" | browse.found$taxonRank == "nothospecies")
browse.found1 <- browse.found[right.level, ]
browse.found <- browse.found1[order(browse.found1$scientificName), ]
result <- browse.found[, c("taxonID", "scientificName", "scientificNameAuthorship", "taxonRank", "taxonomicStatus", "acceptedNameUsageID")]
}
# changed for World Flora Online DEC 2021 release
if (rank.found %in% c("family", "Family", "FAMILY")) {
rank.found1 <- as.logical(1)
browse.found <- WFO.data[WFO.data$family==taxon.found,]
# changed for World Flora Online DEC 2021 release
right.level <- (browse.found$taxonRank == "genus" |
browse.found$taxonRank == "Genus" |
browse.found$taxonRank == "GENUS")
browse.found1 <- browse.found[right.level, ]
browse.found <- browse.found1[order(browse.found1$scientificName), ]
result <- browse.found[, c("taxonID", "scientificName", "scientificNameAuthorship", "taxonRank", "taxonomicStatus", "acceptedNameUsageID")]
}
# changed for World Flora Online DEC 2021 release
if (rank.found %in% c("order", "ORDER")) {
.WorldFlora <- new.env()
utils::data("vascular.families", package="WorldFlora", envir=.WorldFlora)
WFO.families <- eval(as.name("vascular.families"), envir=.WorldFlora)
order.match <- WFO.families[WFO.families$Order == taxon.found, ]
return(order.match)
}
if (rank.found1 == FALSE) {stop("Listings are only provided for families, genera and species")}
if (nrow(result) == 0) {
message("No taxa at the next level")
return(NULL)
}else{
if (accepted.only == TRUE) {
# changed for World Flora Online DEC 2021 release
result <- result[result$taxonomicStatus %in% c("Accepted", "ACCEPTED"), ]
}else{
if (acceptedNameUsageID.match == TRUE) {
result$New.name <- rep("", nrow(result))
for (i in 1:nrow(result)) {
UsageID <- as.character(result[i, "acceptedNameUsageID"])
if (UsageID != "") {
WFO.match1 <- WFO.data[WFO.data$taxonID == UsageID, ]
if (nrow(WFO.match1) == 0) {
warning(paste("WARNING: no data for ", UsageID, " from ", result[i, "scientificName"]))
}else if (nrow(WFO.match1) > 1) {
warning(paste("WARNING: more than 1 row of matches for ", UsageID, " from ", result[i, "scientificName"]))
}else{
result[i, "New.name"] <- WFO.match1[1, "scientificName"]
}
}
}
}
}
}
return(result)
}
|
#缺失值处理
lactate<-c(0.2, 3.6, 4.2, NA, 6.1, 2.5)
is.na(lactate)
which(is.na(lactate)) #is.na可以寻找NA值,which返回index
x1<-c(1,4,3,NA,7,8,NA,7,7)
x1 < 0 #当变量中存在NA,并进行逻辑判断时,NA不判别,而是以NA体现
x1 == NA #NA不可以和0进行比较,NA也不可以和其它数值进行比较
is.na(x1) <- which(x1==7) #函数is.na可以用于创建NA值
#带有缺失值的计算
mean(lactate)
sum(lactate)
var(lactate)
sd(lactate)
mean(lactate,na.rm = TRUE) #na.rm作为逻辑判据,确定是否在计算前去除NA值
sd(lactate,na.rm = TRUE)
#回归方程里的缺失值处理
ptid<-c(1,2,3,4,5,6)
data_test = data.frame(ptid,lactate)
model.omit<-lm(ptid ~ lactate, data=data_test, na.action = na.omit)
#lm为线性回归函数,‘~’前后分别表示因变量和自变量,data明确需要操作的data frame,
#na.action 用于明确对NA值进行的操作,na.omit表示删去缺失值
model.exclude<-lm(ptid ~ lactate, data=data_test, na.action = na.exclude)
#na.exclude表计算中示去除NA值,但数据展现时依旧显示NA
resid(model.omit)#计算残差
resid(model.exclude)
fitted(model.omit)#计算拟合值
fitted(model.exclude)
#带有缺失值的数据框
sex<-c('m','f',NA,'f','m','m') #NA在不同的变量类型中,其表现形式也有所不同
lactate<-c(0.2,3.3,4.5,NA,6.1,5.6)
data_modify<-data.frame(ptid,sex,lactate)
data_modify
na.fail(data_modify)
#na.fail可判定变量中是否有缺失值,如果有NA则返回报错语句;如果没有NA,则返回变量的内容
#注意na.fail与is.na区别!!!
#is.na是对变量里的每一个值进行逻辑判断,而na.fail则是对变量进行整体判断
na.omit(data_modify)
#省略变量中的缺失值
complete.cases(data_modify)
#对于data frame,确定每一个rou是否完整。没有NA为TRUE,有NA为False
complete_data<-data_modify[complete.cases(data_modify),]
complete_data
#对data frame自身进行操作,使data frame完成判定条件后展现出来
na.fail(complete_data)
#缺失值的寻找定位
is.na(data_modify)
#再啰嗦一下
#is.na对每一个值进行判定,而na.fail则返回整体的情况,只确定是否有NA
which(is.na(data_modify))
#which返回判定条件为真的index
#注意data frame是从第一列开始,先自上而下,再从左至右进行排序
unique(unlist(lapply(data_modify,function (x) which(is.na(x)))))
#lapply(x,FUN)lapply的返回值是和一个和X有相同的长度的list对象,
#这个list对象中的每个元素是将函数FUN应用到X的每一个元素。
#其中X为List对象(该list的每个元素都是一个向量)
#其他类型的对象会被R通过函数as.list()自动转换为list类型。
#此处,data_modify中的每一列为一个向量进行操作,返回的值为向量中对应的index
#unlist是将list全部展开为数值
#unique去除变量中的重复值
#删除缺失超过10%的变量
missing_percent<-unlist(lapply(data_modify, function(x) sum(is.na(x))))/nrow(data_modify)
#注意sum在这里只针对判定条件为TRUE的因素进行计算
#nrow及ncol返回data frame行与列的数量
missing_percent
data_tenth <- data_modify[,missing_percent<=0.1]
data_tenth
#根据判定条件,实现只显示缺失小于10%的factor
#data frame[i,j]表示取第i行第j列的元素;i,j也可以用条件判定来替代
#有意义的缺失值
data_discrete<-data.frame(ptid=c(1,2,3,4,5),
lactate=c('low',NA,'moderate',NA,'high'),
death=c('y','y',NA,'n','y'))
data_discrete
na.fail(data_discrete)
#乳酸值为分类变量,根据临床实践经验,乳酸值的缺失是由于患者病情稳定而不需要抽动脉血气
#因为乳酸是动脉血气里的一个项目
#此处如果将缺失值的病人删除会损失大量的信息
library(tree)
new_data_discrete<-na.tree.replace(data_discrete)
na.fail(new_data_discrete)
#na.tree.replace 针对的是分类字符型变量
#可在data frame中添加一个新的名为NA的level,以此避免针对NA的运算对NA level发生作用
library(gam)
new_data_discrete_continueNum<-na.gam.replace(data_modify)
new_data_discrete_continueNum
na.fail(new_data_discrete_continueNum)
#gam对连续变量的NA进行操作,用平均值来替代NA值,
#字符型变量中的NA也会被去字符化
| /2 缺失值处理.R | no_license | GalaChen/R | R | false | false | 4,567 | r | #缺失值处理
lactate<-c(0.2, 3.6, 4.2, NA, 6.1, 2.5)
is.na(lactate)
which(is.na(lactate)) #is.na可以寻找NA值,which返回index
x1<-c(1,4,3,NA,7,8,NA,7,7)
x1 < 0 #当变量中存在NA,并进行逻辑判断时,NA不判别,而是以NA体现
x1 == NA #NA不可以和0进行比较,NA也不可以和其它数值进行比较
is.na(x1) <- which(x1==7) #函数is.na可以用于创建NA值
#带有缺失值的计算
mean(lactate)
sum(lactate)
var(lactate)
sd(lactate)
mean(lactate,na.rm = TRUE) #na.rm作为逻辑判据,确定是否在计算前去除NA值
sd(lactate,na.rm = TRUE)
#回归方程里的缺失值处理
ptid<-c(1,2,3,4,5,6)
data_test = data.frame(ptid,lactate)
model.omit<-lm(ptid ~ lactate, data=data_test, na.action = na.omit)
#lm为线性回归函数,‘~’前后分别表示因变量和自变量,data明确需要操作的data frame,
#na.action 用于明确对NA值进行的操作,na.omit表示删去缺失值
model.exclude<-lm(ptid ~ lactate, data=data_test, na.action = na.exclude)
#na.exclude表计算中示去除NA值,但数据展现时依旧显示NA
resid(model.omit)#计算残差
resid(model.exclude)
fitted(model.omit)#计算拟合值
fitted(model.exclude)
#带有缺失值的数据框
sex<-c('m','f',NA,'f','m','m') #NA在不同的变量类型中,其表现形式也有所不同
lactate<-c(0.2,3.3,4.5,NA,6.1,5.6)
data_modify<-data.frame(ptid,sex,lactate)
data_modify
na.fail(data_modify)
#na.fail可判定变量中是否有缺失值,如果有NA则返回报错语句;如果没有NA,则返回变量的内容
#注意na.fail与is.na区别!!!
#is.na是对变量里的每一个值进行逻辑判断,而na.fail则是对变量进行整体判断
na.omit(data_modify)
#省略变量中的缺失值
complete.cases(data_modify)
#对于data frame,确定每一个rou是否完整。没有NA为TRUE,有NA为False
complete_data<-data_modify[complete.cases(data_modify),]
complete_data
#对data frame自身进行操作,使data frame完成判定条件后展现出来
na.fail(complete_data)
#缺失值的寻找定位
is.na(data_modify)
#再啰嗦一下
#is.na对每一个值进行判定,而na.fail则返回整体的情况,只确定是否有NA
which(is.na(data_modify))
#which返回判定条件为真的index
#注意data frame是从第一列开始,先自上而下,再从左至右进行排序
unique(unlist(lapply(data_modify,function (x) which(is.na(x)))))
#lapply(x,FUN)lapply的返回值是和一个和X有相同的长度的list对象,
#这个list对象中的每个元素是将函数FUN应用到X的每一个元素。
#其中X为List对象(该list的每个元素都是一个向量)
#其他类型的对象会被R通过函数as.list()自动转换为list类型。
#此处,data_modify中的每一列为一个向量进行操作,返回的值为向量中对应的index
#unlist是将list全部展开为数值
#unique去除变量中的重复值
#删除缺失超过10%的变量
missing_percent<-unlist(lapply(data_modify, function(x) sum(is.na(x))))/nrow(data_modify)
#注意sum在这里只针对判定条件为TRUE的因素进行计算
#nrow及ncol返回data frame行与列的数量
missing_percent
data_tenth <- data_modify[,missing_percent<=0.1]
data_tenth
#根据判定条件,实现只显示缺失小于10%的factor
#data frame[i,j]表示取第i行第j列的元素;i,j也可以用条件判定来替代
#有意义的缺失值
data_discrete<-data.frame(ptid=c(1,2,3,4,5),
lactate=c('low',NA,'moderate',NA,'high'),
death=c('y','y',NA,'n','y'))
data_discrete
na.fail(data_discrete)
#乳酸值为分类变量,根据临床实践经验,乳酸值的缺失是由于患者病情稳定而不需要抽动脉血气
#因为乳酸是动脉血气里的一个项目
#此处如果将缺失值的病人删除会损失大量的信息
library(tree)
new_data_discrete<-na.tree.replace(data_discrete)
na.fail(new_data_discrete)
#na.tree.replace 针对的是分类字符型变量
#可在data frame中添加一个新的名为NA的level,以此避免针对NA的运算对NA level发生作用
library(gam)
new_data_discrete_continueNum<-na.gam.replace(data_modify)
new_data_discrete_continueNum
na.fail(new_data_discrete_continueNum)
#gam对连续变量的NA进行操作,用平均值来替代NA值,
#字符型变量中的NA也会被去字符化
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ComputerolMeanMaxMinDeriv.R
\name{ComputerolMeanMaxMinDeriv}
\alias{ComputerolMeanMaxMinDeriv}
\title{ComputerolMeanMaxMinDeriv}
\usage{
ComputerolMeanMaxMinDeriv(
df,
date_column = "START_DATE",
excludeTansf = "START_DATE"
)
}
\arguments{
\item{df}{The dataset}
\item{date_column}{The date column. Default is 'START_DATE'.}
\item{excludeTansf}{The double column not to take into account.}
}
\value{
The dataframe with the transformed features
}
\description{
Function to compute rolMean, rolMin, rolMax and rolDeriv on double columns of a dataset
}
| /man/ComputerolMeanMaxMinDeriv.Rd | no_license | thomasferte/PredictCovidOpen | R | false | true | 636 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ComputerolMeanMaxMinDeriv.R
\name{ComputerolMeanMaxMinDeriv}
\alias{ComputerolMeanMaxMinDeriv}
\title{ComputerolMeanMaxMinDeriv}
\usage{
ComputerolMeanMaxMinDeriv(
df,
date_column = "START_DATE",
excludeTansf = "START_DATE"
)
}
\arguments{
\item{df}{The dataset}
\item{date_column}{The date column. Default is 'START_DATE'.}
\item{excludeTansf}{The double column not to take into account.}
}
\value{
The dataframe with the transformed features
}
\description{
Function to compute rolMean, rolMin, rolMax and rolDeriv on double columns of a dataset
}
|
# Temperature Almond Indicators in California
# 1/2016
#
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
#---------------------- TMIN INDICATORS
# avg monthly Tmin for Feb < 4C
if (almonT == 1){
dum <- array(0,dim=c(lon_num,lat_num))
dum2 <- array(0,dim=c(lon_num,lat_num))
dum3 <- array(0,dim=c(ob_lon_num,ob_lat_num))
dum4 <- array(0,dim=c(ob_lon_num,ob_lat_num))
im = 2 # month is Feb
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1) }
# if model simulation then model matrix, if obs then use obs matrix
ifelse(flag_obs == 0,dum <- apply(Temp_val[,,inS:inE],c(1,2),mean),
dum3 <- apply(Ob_temp[,,inS:inE],c(1,2),mean))
ifelse(flag_obs==0,dum2<-apply(dum,c(1,2),function(x){sum(x<39.2)}) + dum2,
dum4<-apply(dum3,c(1,2),function(x){sum(x<39.2)}) + dum4)
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} # for years
print("finish almon 1")
}
# Minimum Temperature Thresholds - set for the blossom period
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 2){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
#for (im in 1:12) {
im=2
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]+days_monlp[im+1]-1,inE<-inS+days_mon[im]+days_mon[im+1]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
#
if(flag_obs==0) {
for (ithres in 1:Al2_Num_thre) {
mod_dum_t[,,ithres]<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<Al2_Thres_val[ithres])} )+ mod_dum_t[,,ithres]
} # for loop
} # flag_obs ==0
if (flag_obs==1) {
for (ithres in 1:Al2_Num_thre) {
Ob_dum_t[,,ithres]<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<Al2_Thres_val[ithres])} )+Ob_dum_t[,,ithres]
} # for loop
} # flag_obs == 1
#
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
#
print("finish Tmin Blossom thresholds")
} # almon 2
# Minimum Temperature Thresholds - set for the nut period
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 3){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
#for (im in 1:12) {
inS = 0
inE = 0
im=4
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
# ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
# ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+152-1,inE<-inS+152-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
# if ((im == 4) | (im==5) | (im==6) | (im==7) | (im==8) ) {
if(flag_obs==0) {
for (ithres in 1:Al3_Num_thre) {
mod_dum2_t[,,ithres]<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<Al3_Thres_val[ithres])} )+ mod_dum2_t[,,ithres]
} # for loop
} # flag_obs ==0
if (flag_obs==1) {
for (ithres in 1:Al3_Num_thre) {
Ob_dum2_t[,,ithres]<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<Al3_Thres_val[ithres])} )+Ob_dum2_t[,,ithres]
} # for loop
} # flag_obs == 1
# } # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
}# years
#} #months
print("finish Tmin Nut thresholds")
} # almon 3
# Minimum Temperature below Tmin for dormant stage
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 4){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
Ob_dum <- array(0,dim=c(lon_num,lat_num))
dum <- array(0,dim=c(lon_num,lat_num))
dum2 <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im ==11) | (im==12) | (im==1) ) {
if(flag_obs==0) {
dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<29)} )+ dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<29)} )+Ob_dum
} # flag_obs == 1
} # im = dormant months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
} #months
print("finish Tmin Dormant")
} # almon 4
# Minimum Temperature below Tmin for dormant stage
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 5){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
dum <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im ==11) | (im==12) | (im==1) ) {
if(flag_obs==0) {
dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<45)} )+ dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<45)} )+Ob_dum
} # flag_obs == 1
} # im = dormant months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tmin Dormant")
} # almon 5
#------------------ TMAX INDICATORS
# Maximum Temperature below 45 for dormant stage
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 6){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
mod_dum <-array(0,dim=c(lon_num,lat_num)) # store data
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im ==11) | (im==12) | (im==1) ) {
if(flag_obs==0) {
mod_dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<45)} )+ mod_dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<45)} )+Ob_dum
} # flag_obs == 1
} # im = dormant months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tmax Dormant")
} # almon 6
# Max Temperature Thresholds - set for the blossom period
# Temp_val <-- Pr_Tmax_val, etc.
#
if (almonT == 7){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im == 2) | (im==3)) {
if(flag_obs==0) {
for (ithres in 1:Al7_Num_thre) {
mod_dum3_t[,,ithres]<-apply(Temp_val[,,inE:inS],c(1,2),function(x){sum(x>Al7_Thres_val[ithres])} )+mod_dum3_t[,,ithres]
} # for loop
} # flag_obs ==0
if (flag_obs==1) {
for (ithres in 1:Al7_Num_thre) {
Ob_dum3_t[,,ithres]<-apply(Ob_temp[,,inE:inS],c(1,2),function(x){sum(x>Al7_Thres_val[ithres])} )+Ob_dum3_t[,,ithres]
} # for loop
} # flag_obs == 1
} # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tmax Blossom thresholds")
} # almon 7
# Max Temperature Thresholds - set for the nut period
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 8){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
mod_dum <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im == 3) | (im==4) ) {
if(flag_obs==0) {
mod_dum<-apply(Temp_val[,,inE:inS],c(1,2),function(x){sum(x>75)} )+ mod_dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inE:inS],c(1,2),function(x){sum(x>75)} )+Ob_dum
} # flag_obs == 1
} # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tmin Nut thresholds")
} # almon 8
#------------------------------------------- Tavg
# Avg Temperature for dormant
# Temp_val <-- Pr_Tavg_val, etc.
#
if (almonT == 9){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tavg dormant")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
dum <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im == 11) | (im==12) | (im==1) ) {
if(flag_obs==0) {
dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<45)} )+ dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<45)} )+Ob_dum
} # flag_obs == 1
} # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tavg dormant")
} # almon 9
#---------
# Avg Temperature for blossom
# Temp_val <-- Pr_Tavg_val, etc.
#
if (almonT == 10){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tavg blossom")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
dum <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im == 2) | (im==3) ) {
if(flag_obs==0) {
dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x>=55)} )+ dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x>=55)} )+Ob_dum
} # flag_obs == 1
} # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tavg blossom")
} # almon 10
| /ClimDa/Scripts/US_only/Temp/Functions/almond_temp_indicators.R | no_license | ranimurali/GSA-ClimDa | R | false | false | 16,472 | r | # Temperature Almond Indicators in California
# 1/2016
#
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
#---------------------- TMIN INDICATORS
# avg monthly Tmin for Feb < 4C
if (almonT == 1){
dum <- array(0,dim=c(lon_num,lat_num))
dum2 <- array(0,dim=c(lon_num,lat_num))
dum3 <- array(0,dim=c(ob_lon_num,ob_lat_num))
dum4 <- array(0,dim=c(ob_lon_num,ob_lat_num))
im = 2 # month is Feb
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1) }
# if model simulation then model matrix, if obs then use obs matrix
ifelse(flag_obs == 0,dum <- apply(Temp_val[,,inS:inE],c(1,2),mean),
dum3 <- apply(Ob_temp[,,inS:inE],c(1,2),mean))
ifelse(flag_obs==0,dum2<-apply(dum,c(1,2),function(x){sum(x<39.2)}) + dum2,
dum4<-apply(dum3,c(1,2),function(x){sum(x<39.2)}) + dum4)
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} # for years
print("finish almon 1")
}
# Minimum Temperature Thresholds - set for the blossom period
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 2){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
#for (im in 1:12) {
im=2
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]+days_monlp[im+1]-1,inE<-inS+days_mon[im]+days_mon[im+1]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
#
if(flag_obs==0) {
for (ithres in 1:Al2_Num_thre) {
mod_dum_t[,,ithres]<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<Al2_Thres_val[ithres])} )+ mod_dum_t[,,ithres]
} # for loop
} # flag_obs ==0
if (flag_obs==1) {
for (ithres in 1:Al2_Num_thre) {
Ob_dum_t[,,ithres]<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<Al2_Thres_val[ithres])} )+Ob_dum_t[,,ithres]
} # for loop
} # flag_obs == 1
#
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
#
print("finish Tmin Blossom thresholds")
} # almon 2
# Minimum Temperature Thresholds - set for the nut period
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 3){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
#for (im in 1:12) {
inS = 0
inE = 0
im=4
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
# ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
# ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+152-1,inE<-inS+152-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
# if ((im == 4) | (im==5) | (im==6) | (im==7) | (im==8) ) {
if(flag_obs==0) {
for (ithres in 1:Al3_Num_thre) {
mod_dum2_t[,,ithres]<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<Al3_Thres_val[ithres])} )+ mod_dum2_t[,,ithres]
} # for loop
} # flag_obs ==0
if (flag_obs==1) {
for (ithres in 1:Al3_Num_thre) {
Ob_dum2_t[,,ithres]<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<Al3_Thres_val[ithres])} )+Ob_dum2_t[,,ithres]
} # for loop
} # flag_obs == 1
# } # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
}# years
#} #months
print("finish Tmin Nut thresholds")
} # almon 3
# Minimum Temperature below Tmin for dormant stage
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 4){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
Ob_dum <- array(0,dim=c(lon_num,lat_num))
dum <- array(0,dim=c(lon_num,lat_num))
dum2 <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im ==11) | (im==12) | (im==1) ) {
if(flag_obs==0) {
dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<29)} )+ dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<29)} )+Ob_dum
} # flag_obs == 1
} # im = dormant months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
} #months
print("finish Tmin Dormant")
} # almon 4
# Minimum Temperature below Tmin for dormant stage
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 5){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
dum <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im ==11) | (im==12) | (im==1) ) {
if(flag_obs==0) {
dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<45)} )+ dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<45)} )+Ob_dum
} # flag_obs == 1
} # im = dormant months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tmin Dormant")
} # almon 5
#------------------ TMAX INDICATORS
# Maximum Temperature below 45 for dormant stage
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 6){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
mod_dum <-array(0,dim=c(lon_num,lat_num)) # store data
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im ==11) | (im==12) | (im==1) ) {
if(flag_obs==0) {
mod_dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<45)} )+ mod_dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<45)} )+Ob_dum
} # flag_obs == 1
} # im = dormant months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tmax Dormant")
} # almon 6
# Max Temperature Thresholds - set for the blossom period
# Temp_val <-- Pr_Tmax_val, etc.
#
if (almonT == 7){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im == 2) | (im==3)) {
if(flag_obs==0) {
for (ithres in 1:Al7_Num_thre) {
mod_dum3_t[,,ithres]<-apply(Temp_val[,,inE:inS],c(1,2),function(x){sum(x>Al7_Thres_val[ithres])} )+mod_dum3_t[,,ithres]
} # for loop
} # flag_obs ==0
if (flag_obs==1) {
for (ithres in 1:Al7_Num_thre) {
Ob_dum3_t[,,ithres]<-apply(Ob_temp[,,inE:inS],c(1,2),function(x){sum(x>Al7_Thres_val[ithres])} )+Ob_dum3_t[,,ithres]
} # for loop
} # flag_obs == 1
} # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tmax Blossom thresholds")
} # almon 7
# Max Temperature Thresholds - set for the nut period
# Temp_val <-- Pr_Tmin_val, etc.
#
if (almonT == 8){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tmin Blossom thresholds")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
mod_dum <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im == 3) | (im==4) ) {
if(flag_obs==0) {
mod_dum<-apply(Temp_val[,,inE:inS],c(1,2),function(x){sum(x>75)} )+ mod_dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inE:inS],c(1,2),function(x){sum(x>75)} )+Ob_dum
} # flag_obs == 1
} # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tmin Nut thresholds")
} # almon 8
#------------------------------------------- Tavg
# Avg Temperature for dormant
# Temp_val <-- Pr_Tavg_val, etc.
#
if (almonT == 9){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tavg dormant")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
dum <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im == 11) | (im==12) | (im==1) ) {
if(flag_obs==0) {
dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x<45)} )+ dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x<45)} )+Ob_dum
} # flag_obs == 1
} # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tavg dormant")
} # almon 9
#---------
# Avg Temperature for blossom
# Temp_val <-- Pr_Tavg_val, etc.
#
if (almonT == 10){
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
print("start Tavg blossom")
Ob_dum <- array(0,dim=c(ob_lon_num,ob_lat_num))
dum <- array(0,dim=c(lon_num,lat_num))
for (im in 1:12) {
inS = 0
inE = 0
ifelse (flag_obs ==1,tot_yrs <- bl_years_tot, tot_yrs<-years_tot)
# calculate total monthly precipitation for each year in the period
for (iyr in 1:tot_yrs) {
# set start(inS) and end(inE) indices to the right array index
# depending if it's a leap year (ind_yrs)
if (iyr == 1) {
ifelse(ind_yrs[iyr]>0,inS<-month_st_lp[im],inS<-month_st[im])
ifelse(ind_yrs[iyr]>0,inE<-inS+days_monlp[im]-1,inE<-inS+days_mon[im]-1)
}
# sum up the number of days at or below the threshold and average over tot_yrs
if ((im == 2) | (im==3) ) {
if(flag_obs==0) {
dum<-apply(Temp_val[,,inS:inE],c(1,2),function(x){sum(x>=55)} )+ dum
} # flag_obs ==0
if (flag_obs==1) {
Ob_dum<-apply(Ob_temp[,,inS:inE],c(1,2),function(x){sum(x>=55)} )+Ob_dum
} # flag_obs == 1
} # im = bloom months
# to go to the first and last day of the month for the next year
# the index above has already taken leap year into account for first year
if (iyr == 1) {
inS <- inS + 365
inE <- inE + 365
}
else {
inS <- inS + 365 + ind_yrs[iyr]
inE <- inE + 365 + ind_yrs[iyr]
}
} #years
}#months
print("finish Tavg blossom")
} # almon 10
|
datacsv1 = read.csv(file.choose(), header=T) #file.choose() opens window to select file
datacsv2 = read.csv(file="c:/Uesrs/xyz/data.csv", header = T)
datacsv3 = read.table(file.choose(), header=T, sep=",") #This allows us to use seperator other than comma
datacsv4 = read.table(file.choose(), header=T, sep=",",nrow=10) #Gets only 10 rows
datacsv5 = read.table(file.choose(), header=T, sep=",",nrow=10, skip=5) #Skip first 5 rows and gets only 10 rows
| /LoadData.R | permissive | RamVellanki/R-Practice | R | false | false | 453 | r | datacsv1 = read.csv(file.choose(), header=T) #file.choose() opens window to select file
datacsv2 = read.csv(file="c:/Uesrs/xyz/data.csv", header = T)
datacsv3 = read.table(file.choose(), header=T, sep=",") #This allows us to use seperator other than comma
datacsv4 = read.table(file.choose(), header=T, sep=",",nrow=10) #Gets only 10 rows
datacsv5 = read.table(file.choose(), header=T, sep=",",nrow=10, skip=5) #Skip first 5 rows and gets only 10 rows
|
library(CEGO)
### Name: mutationSelfAdapt
### Title: Self-adaptive mutation operator
### Aliases: mutationSelfAdapt
### ** Examples
seed=0
N=5
require(ParamHelpers)
#distance
dF <- distancePermutationHamming
#mutation
mFs <- c(mutationPermutationSwap,mutationPermutationInterchange,
mutationPermutationInsert,mutationPermutationReversal)
rFs <- c(recombinationPermutationCycleCrossover,recombinationPermutationOrderCrossover1,
recombinationPermutationPositionBased,recombinationPermutationAlternatingPosition)
mF <- mutationSelfAdapt
selfAdaptiveParameters <- makeParamSet(
makeNumericParam("mutationRate", lower=1/N,upper=1, default=1/N),
makeDiscreteParam("mutationOperator", values=1:4, default=expression(sample(4,1))),
#1: swap, 2: interchange, 3: insert, 4: reversal mutation
makeDiscreteParam("recombinationOperator", values=1:4, default=expression(sample(4,1)))
#1: CycleX, 2: OrderX, 3: PositionX, 4: AlternatingPosition
)
#recombination
rF <- recombinationSelfAdapt
#creation
cF <- function()sample(N)
#objective function
lF <- landscapeGeneratorUNI(1:N,dF)
#start optimization
set.seed(seed)
res <- optimEA(,lF,list(parameters=list(mutationFunctions=mFs,recombinationFunctions=rFs),
creationFunction=cF,mutationFunction=mF,recombinationFunction=rF,
popsize=15,budget=100,targetY=0,verbosity=1,selfAdaption=selfAdaptiveParameters,
vectorized=TRUE)) ##target function is "vectorized", expects list as input
res$xbest
| /data/genthat_extracted_code/CEGO/examples/mutationSelfAdapt.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,444 | r | library(CEGO)
### Name: mutationSelfAdapt
### Title: Self-adaptive mutation operator
### Aliases: mutationSelfAdapt
### ** Examples
seed=0
N=5
require(ParamHelpers)
#distance
dF <- distancePermutationHamming
#mutation
mFs <- c(mutationPermutationSwap,mutationPermutationInterchange,
mutationPermutationInsert,mutationPermutationReversal)
rFs <- c(recombinationPermutationCycleCrossover,recombinationPermutationOrderCrossover1,
recombinationPermutationPositionBased,recombinationPermutationAlternatingPosition)
mF <- mutationSelfAdapt
selfAdaptiveParameters <- makeParamSet(
makeNumericParam("mutationRate", lower=1/N,upper=1, default=1/N),
makeDiscreteParam("mutationOperator", values=1:4, default=expression(sample(4,1))),
#1: swap, 2: interchange, 3: insert, 4: reversal mutation
makeDiscreteParam("recombinationOperator", values=1:4, default=expression(sample(4,1)))
#1: CycleX, 2: OrderX, 3: PositionX, 4: AlternatingPosition
)
#recombination
rF <- recombinationSelfAdapt
#creation
cF <- function()sample(N)
#objective function
lF <- landscapeGeneratorUNI(1:N,dF)
#start optimization
set.seed(seed)
res <- optimEA(,lF,list(parameters=list(mutationFunctions=mFs,recombinationFunctions=rFs),
creationFunction=cF,mutationFunction=mF,recombinationFunction=rF,
popsize=15,budget=100,targetY=0,verbosity=1,selfAdaption=selfAdaptiveParameters,
vectorized=TRUE)) ##target function is "vectorized", expects list as input
res$xbest
|
rsmc.sym <- function(object,symchoice,hpsim,h0sim,ITER,corrgrid,comment){
datapoolx <- append(as.vector(object$f$data[,1]),as.vector(object$g$data[,1]))
datapoolx <- rep(datapoolx,append(object$f$counts,object$g$counts))
datapooly <- append(as.vector(object$f$data[,2]),as.vector(object$g$data[,2]))
datapooly <- rep(datapooly,append(object$f$counts,object$g$counts))
n1 <- sum(object$f$counts)
n2 <- sum(object$g$counts)
datapool <- data.frame(cbind(datapoolx,datapooly))
range.n <- 1:nrow(datapool)
res <- sqrt(length(corrgrid))
WIN <- object$f$WIN
rmat <- matrix(as.vector(t(object$rsM))[corrgrid],res,res,byrow=T)
mcmat_upper <- matrix(1,res,res)
if(comment) pb <- txtProgressBar(0,ITER-1,style=3)
for(i in 1:(ITER-1)){
casetemp <- sample(range.n,n1)
contemp <- range.n[-casetemp]
if(symchoice=="f"){
dat <- casetemp
} else if(symchoice=="g"){
dat <- contemp
} else {
dat <- range.n
}
if(is.null(hpsim)){
hp <- object$f$pdef$pilotH
} else if(is.function(hpsim)){
hp <- hpsim(datapool[casetemp,],datapool[contemp,])
if(!is.numeric(hp)) stop("if a function, 'hpsim' must return a numeric vector of length 1 or 2")
if(length(hp)!=1&&length(hp)!=2){
stop("if a function, 'h0sim' must return a numeric vector of length 1 or 2")
} else if (length(hp)==2){
hp <- hp[1]
warning("'hpsim' length == 2, using first value only for symmetric pilot density bandwidth")
}
} else if(is.numeric(hpsim)){
hp <- hpsim
if(length(hp)>1){
hp <- hp[1]
warning("'hpsim' length > 1, using first value only for symmetric pilot density bandwidth")
}
} else {
stop("invalid 'hpsim' argument")
}
if(is.null(h0sim)){
h0f <- object$f$globalH
h0g <- object$g$globalH
} else if(is.function(h0sim)){
h0 <- h0sim(datapool[casetemp,],datapool[contemp,])
if(!is.numeric(h0)) stop("if a function, 'h0sim' must return a numeric vector of length 1 or 2")
if(length(h0)==1){
h0f <- h0g <- h0
} else if(length(h0)==2){
h0f <- h0[1]
h0g <- h0[2]
} else {
stop("if a function, 'h0sim' must return a numeric vector of length 1 or 2")
}
} else if(is.numeric(h0sim)){
if(length(h0sim)!=2 && length(h0sim)!=1) stop("if numeric, 'h0sim' must be a vector of length 1 or 2")
if(length(h0sim)==1){
h0f <- h0g <- h0sim
} else {
h0f <- h0sim[1]
h0g <- h0sim[2]
}
} else {
stop("invalid 'h0sim' argument")
}
if(length(dat)==(n1+n2)){
pil <- bivariate.density(data=datapool[dat,],pilotH=hp,adaptive=F,res=res,WIN=WIN,comment=F)
gam <- exp(mean(log(1/sqrt(c(pil$zSpec)))))
} else {
pil <- bivariate.density(data=datapool[dat,],pilotH=hp,adaptive=F,res=res,WIN=WIN,comment=F,atExtraCoords=datapool[-dat,])
gam <- exp(mean(log(1/sqrt(c(pil$zSpec,pil$zExtra)))))
}
casedens <- bivariate.density(data=datapool[casetemp,],pdef=pil,globalH=h0f,adaptive=T,res=res,WIN=WIN,gamma=gam,comment=F)
condens <- bivariate.density(data=datapool[contemp,],pdef=pil,globalH=h0g,adaptive=T,res=res,WIN=WIN,gamma=gam,comment=F)
risktemp <-risk(casedens,condens,log=object$log,plotit=F)$rsM
mcmat_upper <- mcmat_upper+(risktemp>=rmat)
if(comment) setTxtProgressBar(pb,i)
}
if(comment) close(pb)
return(mcmat_upper/ITER)
}
| /sparr/R/rsmc.sym.R | no_license | ingted/R-Examples | R | false | false | 3,315 | r | rsmc.sym <- function(object,symchoice,hpsim,h0sim,ITER,corrgrid,comment){
datapoolx <- append(as.vector(object$f$data[,1]),as.vector(object$g$data[,1]))
datapoolx <- rep(datapoolx,append(object$f$counts,object$g$counts))
datapooly <- append(as.vector(object$f$data[,2]),as.vector(object$g$data[,2]))
datapooly <- rep(datapooly,append(object$f$counts,object$g$counts))
n1 <- sum(object$f$counts)
n2 <- sum(object$g$counts)
datapool <- data.frame(cbind(datapoolx,datapooly))
range.n <- 1:nrow(datapool)
res <- sqrt(length(corrgrid))
WIN <- object$f$WIN
rmat <- matrix(as.vector(t(object$rsM))[corrgrid],res,res,byrow=T)
mcmat_upper <- matrix(1,res,res)
if(comment) pb <- txtProgressBar(0,ITER-1,style=3)
for(i in 1:(ITER-1)){
casetemp <- sample(range.n,n1)
contemp <- range.n[-casetemp]
if(symchoice=="f"){
dat <- casetemp
} else if(symchoice=="g"){
dat <- contemp
} else {
dat <- range.n
}
if(is.null(hpsim)){
hp <- object$f$pdef$pilotH
} else if(is.function(hpsim)){
hp <- hpsim(datapool[casetemp,],datapool[contemp,])
if(!is.numeric(hp)) stop("if a function, 'hpsim' must return a numeric vector of length 1 or 2")
if(length(hp)!=1&&length(hp)!=2){
stop("if a function, 'h0sim' must return a numeric vector of length 1 or 2")
} else if (length(hp)==2){
hp <- hp[1]
warning("'hpsim' length == 2, using first value only for symmetric pilot density bandwidth")
}
} else if(is.numeric(hpsim)){
hp <- hpsim
if(length(hp)>1){
hp <- hp[1]
warning("'hpsim' length > 1, using first value only for symmetric pilot density bandwidth")
}
} else {
stop("invalid 'hpsim' argument")
}
if(is.null(h0sim)){
h0f <- object$f$globalH
h0g <- object$g$globalH
} else if(is.function(h0sim)){
h0 <- h0sim(datapool[casetemp,],datapool[contemp,])
if(!is.numeric(h0)) stop("if a function, 'h0sim' must return a numeric vector of length 1 or 2")
if(length(h0)==1){
h0f <- h0g <- h0
} else if(length(h0)==2){
h0f <- h0[1]
h0g <- h0[2]
} else {
stop("if a function, 'h0sim' must return a numeric vector of length 1 or 2")
}
} else if(is.numeric(h0sim)){
if(length(h0sim)!=2 && length(h0sim)!=1) stop("if numeric, 'h0sim' must be a vector of length 1 or 2")
if(length(h0sim)==1){
h0f <- h0g <- h0sim
} else {
h0f <- h0sim[1]
h0g <- h0sim[2]
}
} else {
stop("invalid 'h0sim' argument")
}
if(length(dat)==(n1+n2)){
pil <- bivariate.density(data=datapool[dat,],pilotH=hp,adaptive=F,res=res,WIN=WIN,comment=F)
gam <- exp(mean(log(1/sqrt(c(pil$zSpec)))))
} else {
pil <- bivariate.density(data=datapool[dat,],pilotH=hp,adaptive=F,res=res,WIN=WIN,comment=F,atExtraCoords=datapool[-dat,])
gam <- exp(mean(log(1/sqrt(c(pil$zSpec,pil$zExtra)))))
}
casedens <- bivariate.density(data=datapool[casetemp,],pdef=pil,globalH=h0f,adaptive=T,res=res,WIN=WIN,gamma=gam,comment=F)
condens <- bivariate.density(data=datapool[contemp,],pdef=pil,globalH=h0g,adaptive=T,res=res,WIN=WIN,gamma=gam,comment=F)
risktemp <-risk(casedens,condens,log=object$log,plotit=F)$rsM
mcmat_upper <- mcmat_upper+(risktemp>=rmat)
if(comment) setTxtProgressBar(pb,i)
}
if(comment) close(pb)
return(mcmat_upper/ITER)
}
|
install.packages('readxl')
library(readxl)
matchknn<-read.csv("C:/Users/chandrasen.wadikar/Desktop/matches.csv")
matchknn
mean(matchknn)
matchdata$season
vect<-c(1,2,3,NA,NaN)
is.na(vect)
is.nan(vect)
View(matchknn)
NonNAindex <- which(!is.na(z))
firstNonNA <- min(NonNAindex)
# set the next 3 observations to NA
is.na(z) <- seq(firstNonNA, length.out=3)
matchknn$season<-factor(matchknn$season)
is.factor(matchknn$season)
matchknn.sample<-sample(2,nrow(matchknn),replace = TRUE, prob = c(.8,.2))
matchknn.train<-matchknn[matchknn.sample==1,]
matchknn.test<-matchknn[matchknn.sample==2,]
matchknn.train1<-matchknn.train[,c(1:3)]
matchknn.test1<-matchknn.test[,c(1:3)]
matchknn.train.lbl<-matchknn.train[,4]
library(class)
matchknn_pred<-knn(train = matchknn.train1,test = matchknn.test1,cl=matchknn.train.lbl,k=8)
| /matchprediction.R | no_license | chandrasenwadikar/Chandrasen | R | false | false | 862 | r | install.packages('readxl')
library(readxl)
matchknn<-read.csv("C:/Users/chandrasen.wadikar/Desktop/matches.csv")
matchknn
mean(matchknn)
matchdata$season
vect<-c(1,2,3,NA,NaN)
is.na(vect)
is.nan(vect)
View(matchknn)
NonNAindex <- which(!is.na(z))
firstNonNA <- min(NonNAindex)
# set the next 3 observations to NA
is.na(z) <- seq(firstNonNA, length.out=3)
matchknn$season<-factor(matchknn$season)
is.factor(matchknn$season)
matchknn.sample<-sample(2,nrow(matchknn),replace = TRUE, prob = c(.8,.2))
matchknn.train<-matchknn[matchknn.sample==1,]
matchknn.test<-matchknn[matchknn.sample==2,]
matchknn.train1<-matchknn.train[,c(1:3)]
matchknn.test1<-matchknn.test[,c(1:3)]
matchknn.train.lbl<-matchknn.train[,4]
library(class)
matchknn_pred<-knn(train = matchknn.train1,test = matchknn.test1,cl=matchknn.train.lbl,k=8)
|
#' ggpmap visualization of species occurences
#'
#' @export
#' @template args
#' @param zoom zoom level for map. Adjust depending on how your data look.
#' @param color Default color of your points.
#' @param size point size, Default: 3
#' @param maptype (character) map theme. see `get_map` in `ggmap`
#' for options. Default: none
#' @param source (character) Google Maps ("google"), OpenStreetMap ("osm"),
#' Stamen Maps ("stamen"), or CloudMade maps ("cloudmade"). Default: `osm`
#' @param point_color Default color of your points. Deprecated, use `color`
#' @param ... Ignored
#' @details Does not support adding a convex hull via [hull()]
#'
#' @note **BEWARE**: this may error for you with a message like
#' _GeomRasterAnn was built with an incompatible version of ggproto_. This
#' is fixed in the dev version of `ggmap`, but not in the CRAN version.
#' Apologies for the problem.
#'
#' @examples \dontrun{
#' # BEWARE: this may error for you with a message like
#' # "GeomRasterAnn was built with an incompatible version of ggproto".
#' # This is fixed in the dev version of `ggmap`, but not in the CRAN
#' # version. Apologies for the problem.
#'
#' ## spocc
#' library("spocc")
#' gd <- occ(query = 'Accipiter striatus', from = 'gbif', limit=75,
#' has_coords = TRUE)
#' map_ggmap(gd)
#' map_ggmap(gd$gbif)
#'
#' ## rgbif
#' library("rgbif")
#' ### occ_search() output
#' res <- occ_search(scientificName = "Puma concolor", limit = 100)
#' map_ggmap(res)
#'
#' ### occ_data() output
#' res <- occ_data(scientificName = "Puma concolor", limit = 100)
#' map_ggmap(res)
#'
#' #### many taxa
#' res <- occ_data(scientificName = c("Puma concolor", "Quercus lobata"),
#' limit = 30)
#' map_ggmap(res)
#'
#'
#' ## data.frame
#' df <- data.frame(name = c('Poa annua', 'Puma concolor', 'Foo bar'),
#' longitude = c(-120, -121, -123),
#' latitude = c(41, 42, 45), stringsAsFactors = FALSE)
#' map_ggmap(df)
#'
#' ### usage of occ2sp()
#' #### SpatialPointsDataFrame
#' spdat <- occ2sp(gd)
#' map_ggmap(spdat)
#'
#' # many species, each gets a different color
#' library("spocc")
#' spp <- c('Danaus plexippus', 'Accipiter striatus', 'Pinus contorta')
#' dat <- occ(spp, from = 'gbif', limit = 30, has_coords = TRUE,
#' gbifopts = list(country = 'US'))
#' map_ggmap(dat)
#' map_ggmap(dat, zoom = 5)
#' map_ggmap(dat, color = '#6B944D')
#' map_ggmap(dat, color = c('#976AAE', '#6B944D', '#BD5945'))
#' }
map_ggmap <- function(x, zoom = 3, point_color = "#86161f", color = NULL,
size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
UseMethod("map_ggmap")
}
#' @export
map_ggmap.occdat <- function(x, zoom = 3, point_color = "#86161f", color = NULL,
size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
check_inputs(match.call())
x <- spocc::occ2df(x)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.occdatind <- function(x, zoom = 3, point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude',
lat = 'latitude', maptype = "terrain",
source = "google", ...) {
check_inputs(match.call())
x <- spocc::occ2df(x)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.gbif <- function(x, zoom = 3, point_color = "#86161f", color = NULL,
size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
check_inputs(match.call())
x <- if ("data" %in% names(x)) x$data else bdt(lapply(x, function(z) z$data))
x <- guess_latlon(x, lon = 'decimalLongitude', lat = 'decimalLatitude')
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.gbif_data <- function(x, zoom = 3, point_color = "#86161f", color = NULL,
size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
check_inputs(match.call())
x <- if ("data" %in% names(x)) x$data else bdt(lapply(x, function(z) z$data))
x <- guess_latlon(x, lon = 'decimalLongitude', lat = 'decimalLatitude')
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.data.frame <- function(x, zoom = 3, point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude',
lat = 'latitude', maptype = "terrain",
source = "google", ...) {
check_inputs(match.call())
x <- guess_latlon(x, lat, lon)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.SpatialPoints <- function(x, zoom = 3, point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude',
lat = 'latitude', maptype = "terrain",
source = "google", ...) {
check_inputs(match.call())
x <- data.frame(x)
x <- guess_latlon(x, lat, lon)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.SpatialPointsDataFrame <- function(x, zoom = 3,
point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
check_inputs(match.call())
x <- data.frame(x)
x <- guess_latlon(x, lat, lon)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.default <- function(x, zoom = 3, point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
stop(
sprintf("map_ggmap does not support input of class '%s'", class(x)),
call. = FALSE
)
}
## helpers ---------------------
map_center <- function(x) {
min_lat <- min(x$latitude, na.rm = TRUE)
max_lat <- max(x$latitude, na.rm = TRUE)
min_long <- min(x$longitude, na.rm = TRUE)
max_long <- max(x$longitude, na.rm = TRUE)
center_lat <- min_lat + (max_lat - min_lat)/2
center_long <- min_long + (max_long - min_long)/2
c(lon = center_long, lat = center_lat)
}
map_bbox <- function(x) {
min_lat <- min(x$latitude, na.rm = TRUE)
max_lat <- max(x$latitude, na.rm = TRUE)
min_long <- min(x$longitude, na.rm = TRUE)
max_long <- max(x$longitude, na.rm = TRUE)
c(left = min_long, bottom = min_lat, right = max_long, top = max_lat)
}
map_ggmapper <- function(x, zoom, color, size, maptype, source) {
check4pkg("ggmap")
x <- x[stats::complete.cases(x$latitude, x$longitude), ]
x <- x[!x$latitude == 0 & !x$longitude == 0, ]
species_map <- ggmap::get_map(location = map_center(x), zoom = zoom,
maptype = maptype, source = source)
latitude <- longitude <- name <- NA
ggmap::ggmap(species_map) +
geom_point(data = x[, c("latitude", "longitude", "name")],
aes(x = longitude, y = latitude, colour = name), size = size) +
pick_colors(x, color) +
ggtitle(paste0("Distribution of ", unique(x$name))) +
labs(x = "Longitude", y = "Latitude")
}
| /R/map_ggmap.R | permissive | nemochina2008/mapr | R | false | false | 7,243 | r | #' ggpmap visualization of species occurences
#'
#' @export
#' @template args
#' @param zoom zoom level for map. Adjust depending on how your data look.
#' @param color Default color of your points.
#' @param size point size, Default: 3
#' @param maptype (character) map theme. see `get_map` in `ggmap`
#' for options. Default: none
#' @param source (character) Google Maps ("google"), OpenStreetMap ("osm"),
#' Stamen Maps ("stamen"), or CloudMade maps ("cloudmade"). Default: `osm`
#' @param point_color Default color of your points. Deprecated, use `color`
#' @param ... Ignored
#' @details Does not support adding a convex hull via [hull()]
#'
#' @note **BEWARE**: this may error for you with a message like
#' _GeomRasterAnn was built with an incompatible version of ggproto_. This
#' is fixed in the dev version of `ggmap`, but not in the CRAN version.
#' Apologies for the problem.
#'
#' @examples \dontrun{
#' # BEWARE: this may error for you with a message like
#' # "GeomRasterAnn was built with an incompatible version of ggproto".
#' # This is fixed in the dev version of `ggmap`, but not in the CRAN
#' # version. Apologies for the problem.
#'
#' ## spocc
#' library("spocc")
#' gd <- occ(query = 'Accipiter striatus', from = 'gbif', limit=75,
#' has_coords = TRUE)
#' map_ggmap(gd)
#' map_ggmap(gd$gbif)
#'
#' ## rgbif
#' library("rgbif")
#' ### occ_search() output
#' res <- occ_search(scientificName = "Puma concolor", limit = 100)
#' map_ggmap(res)
#'
#' ### occ_data() output
#' res <- occ_data(scientificName = "Puma concolor", limit = 100)
#' map_ggmap(res)
#'
#' #### many taxa
#' res <- occ_data(scientificName = c("Puma concolor", "Quercus lobata"),
#' limit = 30)
#' map_ggmap(res)
#'
#'
#' ## data.frame
#' df <- data.frame(name = c('Poa annua', 'Puma concolor', 'Foo bar'),
#' longitude = c(-120, -121, -123),
#' latitude = c(41, 42, 45), stringsAsFactors = FALSE)
#' map_ggmap(df)
#'
#' ### usage of occ2sp()
#' #### SpatialPointsDataFrame
#' spdat <- occ2sp(gd)
#' map_ggmap(spdat)
#'
#' # many species, each gets a different color
#' library("spocc")
#' spp <- c('Danaus plexippus', 'Accipiter striatus', 'Pinus contorta')
#' dat <- occ(spp, from = 'gbif', limit = 30, has_coords = TRUE,
#' gbifopts = list(country = 'US'))
#' map_ggmap(dat)
#' map_ggmap(dat, zoom = 5)
#' map_ggmap(dat, color = '#6B944D')
#' map_ggmap(dat, color = c('#976AAE', '#6B944D', '#BD5945'))
#' }
map_ggmap <- function(x, zoom = 3, point_color = "#86161f", color = NULL,
size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
UseMethod("map_ggmap")
}
#' @export
map_ggmap.occdat <- function(x, zoom = 3, point_color = "#86161f", color = NULL,
size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
check_inputs(match.call())
x <- spocc::occ2df(x)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.occdatind <- function(x, zoom = 3, point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude',
lat = 'latitude', maptype = "terrain",
source = "google", ...) {
check_inputs(match.call())
x <- spocc::occ2df(x)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.gbif <- function(x, zoom = 3, point_color = "#86161f", color = NULL,
size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
check_inputs(match.call())
x <- if ("data" %in% names(x)) x$data else bdt(lapply(x, function(z) z$data))
x <- guess_latlon(x, lon = 'decimalLongitude', lat = 'decimalLatitude')
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.gbif_data <- function(x, zoom = 3, point_color = "#86161f", color = NULL,
size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
check_inputs(match.call())
x <- if ("data" %in% names(x)) x$data else bdt(lapply(x, function(z) z$data))
x <- guess_latlon(x, lon = 'decimalLongitude', lat = 'decimalLatitude')
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.data.frame <- function(x, zoom = 3, point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude',
lat = 'latitude', maptype = "terrain",
source = "google", ...) {
check_inputs(match.call())
x <- guess_latlon(x, lat, lon)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.SpatialPoints <- function(x, zoom = 3, point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude',
lat = 'latitude', maptype = "terrain",
source = "google", ...) {
check_inputs(match.call())
x <- data.frame(x)
x <- guess_latlon(x, lat, lon)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.SpatialPointsDataFrame <- function(x, zoom = 3,
point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
check_inputs(match.call())
x <- data.frame(x)
x <- guess_latlon(x, lat, lon)
map_ggmapper(x, zoom, color, size, maptype, source)
}
#' @export
map_ggmap.default <- function(x, zoom = 3, point_color = "#86161f",
color = NULL, size = 3, lon = 'longitude', lat = 'latitude',
maptype = "terrain", source = "google", ...) {
stop(
sprintf("map_ggmap does not support input of class '%s'", class(x)),
call. = FALSE
)
}
## helpers ---------------------
map_center <- function(x) {
min_lat <- min(x$latitude, na.rm = TRUE)
max_lat <- max(x$latitude, na.rm = TRUE)
min_long <- min(x$longitude, na.rm = TRUE)
max_long <- max(x$longitude, na.rm = TRUE)
center_lat <- min_lat + (max_lat - min_lat)/2
center_long <- min_long + (max_long - min_long)/2
c(lon = center_long, lat = center_lat)
}
map_bbox <- function(x) {
min_lat <- min(x$latitude, na.rm = TRUE)
max_lat <- max(x$latitude, na.rm = TRUE)
min_long <- min(x$longitude, na.rm = TRUE)
max_long <- max(x$longitude, na.rm = TRUE)
c(left = min_long, bottom = min_lat, right = max_long, top = max_lat)
}
map_ggmapper <- function(x, zoom, color, size, maptype, source) {
check4pkg("ggmap")
x <- x[stats::complete.cases(x$latitude, x$longitude), ]
x <- x[!x$latitude == 0 & !x$longitude == 0, ]
species_map <- ggmap::get_map(location = map_center(x), zoom = zoom,
maptype = maptype, source = source)
latitude <- longitude <- name <- NA
ggmap::ggmap(species_map) +
geom_point(data = x[, c("latitude", "longitude", "name")],
aes(x = longitude, y = latitude, colour = name), size = size) +
pick_colors(x, color) +
ggtitle(paste0("Distribution of ", unique(x$name))) +
labs(x = "Longitude", y = "Latitude")
}
|
# DID
# July 2021
#
# This file takes all the input variables and then runs a regression on them
#
# Inputs:
# Unclear
#
# Outputs:
# Unclear
#### Set up environment ----
library(tidyverse)
library(data.table)
library(did)
setwd("C:/Users/unnav/Dropbox/Coding/Inequality-and-Hate-Speech/d")
### Read in data and assemble final dataset ----
tweets = fread("tweets_final.csv", stringsAsFactors = F)
controls = fread("controls.csv", stringsAsFactors = F)
tweets2 = tweets %>%
filter(state != "" & !is.na(state) & state != "District of Columbia") %>%
select(id, state, date) %>%
rowwise() %>%
mutate(state_abb = state.abb[which(state.name == state)]) %>%
ungroup() %>%
filter(date >= as.Date("2014-10-01"))
tweets2 = tweets2 %>%
mutate(year = as.numeric(substr(date, 1, 4))) %>%
group_by(year, state_abb) %>%
summarise(num_tweets = n()) %>%
mutate(num_tweets = ifelse(year == 2014, num_tweets*4, num_tweets))
final_data = tweets2 %>%
inner_join(controls, by = c("year", "state_abb" = "state")) %>%
mutate(state_id = as.numeric(as.factor(state_abb))) %>%
group_by(state_id) %>%
mutate(first.treat = ifelse(time-lag(time) == 1 | (year == 2014 & time == 2), year, NA),
first.treat = ifelse(control == 1, 0, first.treat)) %>%
fill(first.treat, .direction = "downup")
final_data_ols = final_data %>%
mutate(time = time - 1)
### using DID package ----
temp = att_gt(yname = "num_tweets",
tname = "year",
idname = "state_id",
gname = "first.treat",
data = final_data,
xformla = ~ num_hate_groups + unemp_rate + pct_male + pct_female +
`Below 18` + `18 to 34` + `34 to 56` + `56 to 65` + `65 plus` +
bach + gpd + hs + med_inc + pct_white + pct_black + pct_AIAN +
pct_asian + pct_NHPI + pct_two_plus,
allow_unbalanced_panel = T)
### idk just running OLS
did_reg = lm(num_tweets ~ treat + time + treat*time + num_hate_groups + pct_male +
unemp_rate + `Below 18` + `18 to 34` + `34 to 56` + `56 to 65` +
bach + hs + gpd + med_inc,
data = final_data_ols)
summary(did_reg)
did_reg2 = lm(num_tweets ~ treat + time + treat*time + num_hate_groups +
unemp_rate + `Below 18` + `18 to 34` + `34 to 56` + `56 to 65` +
bach + hs + gpd + med_inc,
data = final_data_ols)
summary(did_reg2)
did_reg3 = lm(num_tweets ~ treat + time + treat*time + num_hate_groups +
`Below 18` + `18 to 34` + `34 to 56` + `56 to 65` +
bach + hs + gpd + med_inc,
data = final_data_ols)
summary(did_reg3)
did_reg_no_ctrl = lm(num_tweets ~ treat + time + treat*time,
data = final_data_ols)
summary(did_reg_no_ctrl)
| /p/did.R | no_license | unnavav/Inequality-and-Hate-Speech | R | false | false | 2,821 | r | # DID
# July 2021
#
# This file takes all the input variables and then runs a regression on them
#
# Inputs:
# Unclear
#
# Outputs:
# Unclear
#### Set up environment ----
library(tidyverse)
library(data.table)
library(did)
setwd("C:/Users/unnav/Dropbox/Coding/Inequality-and-Hate-Speech/d")
### Read in data and assemble final dataset ----
tweets = fread("tweets_final.csv", stringsAsFactors = F)
controls = fread("controls.csv", stringsAsFactors = F)
tweets2 = tweets %>%
filter(state != "" & !is.na(state) & state != "District of Columbia") %>%
select(id, state, date) %>%
rowwise() %>%
mutate(state_abb = state.abb[which(state.name == state)]) %>%
ungroup() %>%
filter(date >= as.Date("2014-10-01"))
tweets2 = tweets2 %>%
mutate(year = as.numeric(substr(date, 1, 4))) %>%
group_by(year, state_abb) %>%
summarise(num_tweets = n()) %>%
mutate(num_tweets = ifelse(year == 2014, num_tweets*4, num_tweets))
final_data = tweets2 %>%
inner_join(controls, by = c("year", "state_abb" = "state")) %>%
mutate(state_id = as.numeric(as.factor(state_abb))) %>%
group_by(state_id) %>%
mutate(first.treat = ifelse(time-lag(time) == 1 | (year == 2014 & time == 2), year, NA),
first.treat = ifelse(control == 1, 0, first.treat)) %>%
fill(first.treat, .direction = "downup")
final_data_ols = final_data %>%
mutate(time = time - 1)
### using DID package ----
temp = att_gt(yname = "num_tweets",
tname = "year",
idname = "state_id",
gname = "first.treat",
data = final_data,
xformla = ~ num_hate_groups + unemp_rate + pct_male + pct_female +
`Below 18` + `18 to 34` + `34 to 56` + `56 to 65` + `65 plus` +
bach + gpd + hs + med_inc + pct_white + pct_black + pct_AIAN +
pct_asian + pct_NHPI + pct_two_plus,
allow_unbalanced_panel = T)
### idk just running OLS
did_reg = lm(num_tweets ~ treat + time + treat*time + num_hate_groups + pct_male +
unemp_rate + `Below 18` + `18 to 34` + `34 to 56` + `56 to 65` +
bach + hs + gpd + med_inc,
data = final_data_ols)
summary(did_reg)
did_reg2 = lm(num_tweets ~ treat + time + treat*time + num_hate_groups +
unemp_rate + `Below 18` + `18 to 34` + `34 to 56` + `56 to 65` +
bach + hs + gpd + med_inc,
data = final_data_ols)
summary(did_reg2)
did_reg3 = lm(num_tweets ~ treat + time + treat*time + num_hate_groups +
`Below 18` + `18 to 34` + `34 to 56` + `56 to 65` +
bach + hs + gpd + med_inc,
data = final_data_ols)
summary(did_reg3)
did_reg_no_ctrl = lm(num_tweets ~ treat + time + treat*time,
data = final_data_ols)
summary(did_reg_no_ctrl)
|
expect_error(
imf_data(database_id = "IFS",
indicator = "GG_GALM_G01_XDC",
country = "all",
freq = "A",
start = 1900, end = 2020
),
NA
)
expect_equal(
ncol(imf_data(database_id = "WHDREO", indicator = "PCPI_PCH",
freq = "A", country = c("MX"))),
3
)
| /tests/testthat/test-next.R | no_license | romainfrancois/imfr-1 | R | false | false | 346 | r | expect_error(
imf_data(database_id = "IFS",
indicator = "GG_GALM_G01_XDC",
country = "all",
freq = "A",
start = 1900, end = 2020
),
NA
)
expect_equal(
ncol(imf_data(database_id = "WHDREO", indicator = "PCPI_PCH",
freq = "A", country = c("MX"))),
3
)
|
lookup_table <- function(aa_query, aa_subject) {
# construct a look up table
index_lookup <- rep(NA, length(aa_query))
j <- 1
for(i in seq_along(index_lookup)) {
query_current <- aa_query[i]
subject_current <- aa_subject[j]
if(grepl("[X-]", query_current)) {
next
}
while(grepl("[X-]", subject_current)) {
j <- j + 1
subject_current <- aa_subject[j]
if(is.na(subject_current)) {
subject_current <- "END"
}
}
if(query_current == subject_current) {
# if matches, record
index_lookup[i] <- j
j <- j + 1
} else {
stop(query_current, " at position ", i, " and " , subject_current, " at position ", j, " do not match!")
}
}
index_lookup
} | /src/funcs.R | no_license | RabadanLab/pamler | R | false | false | 812 | r | lookup_table <- function(aa_query, aa_subject) {
# construct a look up table
index_lookup <- rep(NA, length(aa_query))
j <- 1
for(i in seq_along(index_lookup)) {
query_current <- aa_query[i]
subject_current <- aa_subject[j]
if(grepl("[X-]", query_current)) {
next
}
while(grepl("[X-]", subject_current)) {
j <- j + 1
subject_current <- aa_subject[j]
if(is.na(subject_current)) {
subject_current <- "END"
}
}
if(query_current == subject_current) {
# if matches, record
index_lookup[i] <- j
j <- j + 1
} else {
stop(query_current, " at position ", i, " and " , subject_current, " at position ", j, " do not match!")
}
}
index_lookup
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{psid_controls}
\alias{psid_controls}
\title{PSID controls (2490 observations)}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 2490 rows and 11 columns.}
\source{
\href{http://users.nber.org/~rdehejia/nswdata2.html}{Dehejia's NBER website.}
}
\usage{
psid_controls
}
\description{
Non-experimental comparison data file constructed by Lalonde
from the Population Survey of Income Dynamics.
}
\keyword{datasets}
| /man/psid_controls.Rd | no_license | jjchern/lalonde | R | false | true | 560 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{psid_controls}
\alias{psid_controls}
\title{PSID controls (2490 observations)}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 2490 rows and 11 columns.}
\source{
\href{http://users.nber.org/~rdehejia/nswdata2.html}{Dehejia's NBER website.}
}
\usage{
psid_controls
}
\description{
Non-experimental comparison data file constructed by Lalonde
from the Population Survey of Income Dynamics.
}
\keyword{datasets}
|
#######################################################################
#######################################################################
#######################################################################
# Plotting many plots:
DEFAULTDEBUGMODE = TRUE;
makeMultiPlot.all <- function(res, outfile.dir = "./",
plotter.params = list(),
plot.device.name = "png",
plotting.device.params = list(),
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots=TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
get.summary.table(res, outfile = paste0(outfile.dir,"summary.table.txt"), debugMode = debugMode);
makeMultiPlot.basic(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots,...);
makeMultiPlot.colorBySample(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
makeMultiPlot.colorByGroup(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
makeMultiPlot.colorByLane(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
makeMultiPlot.highlightSample.all(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
makeMultiPlot.highlightSample.colorByLane.all(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.highlightSample.all <- function(res, outfile.dir = "./",
plotter.params = list(),
plot.device.name = "png",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots=TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
for(curr.sample in unique(res@decoder$sample.ID)){
makeMultiPlot.highlightSample(res = res,
curr.sample = curr.sample,
outfile.dir = outfile.dir,
verbose = FALSE,
plotter.params = plotter.params,
plot.device.name = plot.device.name,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,
insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,
maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots,
...);
if(verbose) message(paste0(curr.sample," complete!"));
}
}
makeMultiPlot.highlightSample.colorByLane.all <- function(res, outfile.dir = "./",
plotter.params = list(),
plot.device.name = "png",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots=TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
for(curr.sample in unique(res@decoder$sample.ID)){
makeMultiPlot.highlightSample.colorByLane(res = res,
outfile.dir = outfile.dir,
curr.sample = curr.sample,
verbose = FALSE,
plotter.params = plotter.params,
plot.device.name = plot.device.name,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,
insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,
maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots,
...);
if(verbose) message(paste0(curr.sample," complete!"));
}
}
#######################################################################
#######################################################################
#######################################################################
# Summary Plot:
makeMultiPlot.withPlotter <- function(plotter, res = plotter$res, outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-custom",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
plotter;
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
# makeMultiPlot.GENERIC(res = res,
# build.plotter.function = build.plotter.function,
# outfile = outfile,
# outfile.dir = outfile.dir,
# outfile.prefix = outfile.prefix,
# outfile.ext = outfile.ext,
# plot.device.name = plot.device.name,
# plotting.device.params = plotting.device.params,
# debugMode = debugMode,
# verbose = verbose,
# cdf.bySample = FALSE,
# cdf.plotIntercepts = FALSE,
# rasterize.large.plots = rasterize.large.plots,
# rasterize.medium.plots = rasterize.medium.plots,
# raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
# separatePlots = separatePlots,
# nvc.highlight.points = nvc.mark.points,
# fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,
# insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,
# maxColumns = maxColumns,
# ...);
}
makeMultiPlot.basic <- function(res,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-basic",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.basic(res, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.colorByGroup <- function(res,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-colorByGroup",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.colorByGroup(res, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.colorByLane <- function(res,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-colorByLane",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.colorByLane(res, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.colorBySample <- function(res,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-colorByLane",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.colorBySample(res, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.highlightSample <- function(res, curr.sample,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = paste0("plot-sampleHL-",curr.sample),
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.highlightSample(curr.sample,res, merge.offset.outgroup = FALSE, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.highlightSample.colorByLane <- function(res, curr.sample,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = paste0("plot-sampleHL-coloredByLane-",curr.sample),
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.highlightSample.colorByLane(curr.sample,res, merge.offset.outgroup = FALSE, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
############################################################################################################
############################################################################################################
############################################################################################################
############################################################################################################
supportedDevices <- c("png","CairoPNG","tiff","jpeg","CairoPDF","pdf","svg","curr");
supportedVectorDevices <- c("pdf","CairoPDF","svg");
forPrintDevices <- c("pdf","CairoPDF");
getBestLayout <- function(plotWidths, maxColumns = NULL, maxRows = NULL, nrow = NULL){
ct <- sum(plotWidths);
plotCt <- length(plotWidths);
#message("plotWidths = ",plotWidths,", maxColumns = ",maxColumns,", maxRows = ",maxRows,", nrow = ",nrow);
if(is.null(maxColumns)){
maxColumns <- ceiling(sqrt(sum(plotWidths)));
}
if(is.null(maxRows)){
if(! is.null(nrow)){
maxRows <- nrow;
} else {
maxRows <- Inf;
}
}
soFar <- 1;
mv <- c();
while(soFar <= plotCt && (is.null(mv) || nrow(mv) < maxRows)){
v <- rep(soFar,plotWidths[soFar]);
soFar = soFar + 1;
while(soFar <= plotCt && length(v) + plotWidths[soFar] <= maxColumns){
v <- c(v,rep(soFar,plotWidths[soFar]));
soFar = soFar + 1;
}
if(length(v) < maxColumns){
v <- c(v, rep(0, maxColumns - length(v) ) )
}
mv <- rbind(mv,v);
}
ht <- nrow(mv);
wd <- ncol(mv);
mat <- mv;
if( (! is.null(nrow)) ){
while(nrow(mat) < nrow){
mat <- rbind(mat,rep(0,ncol(mat)));
}
}
initLayoutFunct <- function(){
#message("initialized layout!");
layout(mat);
};
#message("ht = ",ht,"wd = ",wd);
#print(mat);
return(list(ht = ht, wd = wd,initLayoutFunct=initLayoutFunct,mat=mat));
}
############################################################################################################
############################################################################################################
############################################################################################################
############################################################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
QoRTs.open.plotting.device <- function(filename, plot.device.name = "png", device.params = list()){
device.params.final <- device.params;
device.params.final[["filename"]] <- filename;
funct.do.nothing <- function(){
#do nothing!
}
if(is.null(plot.device.name)){
return(funct.do.nothing);
} else if(plot.device.name == "png"){
do.call(grDevices::png,device.params.final);
return(dev.off)
} else if(plot.device.name == "CairoPNG"){
requireNamespace("Cairo");
do.call(Cairo::CairoPNG,device.params.final);
return(dev.off)
} else if(plot.device.name == "CairoPDF" | plot.device.name == "pdf"){
#do nothing!
return(funct.do.nothing);
} else {
stop(paste0("FATAL ERROR: QoRTs.open.plotting.device: Unrecognized device type: ",plot.device.name));
}
}
#######################################################################
#######################################################################
#######################################################################
#######################################################################
######### INTERNAL FUNCTIONS:
#######################################################################
#######################################################################
#######################################################################
#######################################################################
SKIP.FOR.RNA.DATA <- c("onTarget.rates","onTarget.counts","overlap.mismatch.byAvgQual");
SKIP.FOR.EXOME.DATA <- c(
"genebody.coverage.umquartileExpressionGenes","genebody.coverage.lowExpressionGenes",
"sj.locus.ct","sj.event.proportionByType","sj.event.rate",
"norm.factors","norm.vs.TC","SpliceProfile","overlapMismatch.byQual.avg"
);
PLOTTING.FUNCTION.COMMAND.LIST <- list(
legend = list(wd=1,FUN=function(plotter,debugMode,rast,params,...){ makePlot.legend.box(plotter, debugMode = debugMode, ...) }),
qual.pair.min =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"min", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["ht"]],plot=plot, ...)}),
qual.pair.lowerQuartile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"lowerQuartile", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
qual.pair.median =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"median", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot,...)}),
qual.pair.upperQuartile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"upperQuartile", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot,...)}),
qual.pair.max =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"max", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot,...)}),
clippingProfile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.clipping(plotter, debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
DeletionProfile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarOp.byCycle(plotter,"Del", debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
InsertionProfile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarOp.byCycle(plotter,"Ins", debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
SpliceProfile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarOp.byCycle(plotter,"Splice", debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
InsertionLengthHisto =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarLength.distribution(plotter,"Ins", log.y = TRUE, debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
DeletionLengthHisto =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarLength.distribution(plotter,"Del", log.y = TRUE, debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
gc =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.gc(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
missingness.rate =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.missingness.rate(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot,...)}),
dropped.rate =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.dropped.rates(plotter, debugMode = debugMode,plot=plot, ...)}),
insert.size =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.insert.size(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]], xlim = params[["insertSize.plot.xlim"]],plot=plot, ...)}),
overlap.coverage =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlap.coverage(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
readLengthDist =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.readLengthDist(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byCycle =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byCycle(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
#overlapMismatch.byQual.avg =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byQual.avg(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byQual.min =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byQual.min( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byQual.read =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byQual.read( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byBase =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byBase(plotter, debugMode = debugMode,plot=plot, ...)}),
overlapMismatch.size =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.size( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byBase.atScore=list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byBase.atScore( plotter,atScore=41, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
referenceMismatch.byCycle =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byCycle( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
referenceMismatch.byScore =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byScore( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
referenceMismatch.byBase =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byBase( plotter, debugMode = debugMode,plot=plot, ...)}),
referenceMismatch.byBase.atScore.R1=list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byBase.atScore( plotter,atScore=41,forRead="R1", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
referenceMismatch.byBase.atScore.R2=list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byBase.atScore( plotter,atScore=41,forRead="R2", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
gene.diversity =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.gene.cdf(plotter, sampleWise = params[["cdf.bySample"]], plot.intercepts = params[["cdf.plotIntercepts"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
genebody.coverage.allGenes =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.genebody(plotter, geneset="Overall", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
genebody.coverage.umquartileExpressionGenes =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.genebody(plotter, geneset="50-75", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
genebody.coverage.lowExpressionGenes =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.genebody(plotter, geneset="0-50", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
geneAssignmentRates =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.gene.assignment.rates(plotter, debugMode = debugMode,plot=plot, ...)}),
sj.locus.ct =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.splice.junction.loci.counts(plotter, debugMode = debugMode,plot=plot, ...)}),
sj.event.proportionByType =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.splice.junction.event.proportionsByType(plotter, debugMode = debugMode,plot=plot, ...)}),
sj.event.rate =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.splice.junction.event.ratesPerRead(plotter, debugMode = debugMode,plot=plot, ...)}),
mapping.rates =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.mapping.rates(plotter, debugMode = debugMode,plot=plot, ...)}),
chrom.rates =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.chrom.type.rates(plotter, chromosome.name.style = params[["chromosome.name.style"]], exclude.autosomes = params[["exclude.autosomes.chrom.rate.plot"]], debugMode = debugMode,plot=plot, ...)}),
norm.factors =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.norm.factors(plotter, debugMode = debugMode,plot=plot, ...)}),
norm.vs.TC =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.norm.factors.vs.TC(plotter, debugMode = debugMode,plot=plot, ...)}),
strandedness.test =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.strandedness.test(plotter, debugMode = debugMode,plot=plot, ...)}),
onTarget.counts =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.onTarget.counts(plotter, debugMode = debugMode,plot=plot, ...)}),
onTarget.rates =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.onTarget.rates(plotter, debugMode = debugMode,plot=plot, ...)}),
NVC.lead.clip =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.NVC.lead.clip(plotter, clip.amt = params[["clip.amt"]], points.highlighted = params[["nvc.highlight.points"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
NVC.tail.clip =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.NVC.tail.clip(plotter, clip.amt = 12, points.highlighted = params[["nvc.highlight.points"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
NVC.raw =list(wd=2,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.raw.NVC(plotter, points.highlighted = params[["nvc.highlight.points"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = 2* rast[["wd"]],plot=plot, ...)}),
NVC.aligned =list(wd=2,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.minus.clipping.NVC(plotter, points.highlighted = params[["nvc.highlight.points"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = 2* rast[["wd"]],plot=plot, ...)})
);
makeMultiPlot.GENERIC.v10 <- function(res,
build.plotter.function,
outfile = NULL,
outfile.dir,
outfile.prefix,
outfile.ext = NULL,
plot.device.name,
plotting.device.params = list(),
debugMode = DEFAULTDEBUGMODE, verbose = TRUE,
fig.res = 150,
fig.base.height.inches = 7,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = fig.res*fig.base.height.inches,
raster.width = raster.height,
raster.res = fig.res,
separatePlots = FALSE,
splitPlots = FALSE,
nvc.highlight.points = TRUE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
insertSize.plot.xlim=NULL,
cdf.bySample = TRUE,
cdf.plotIntercepts = FALSE,
clip.amt = 12,
makePlots = NULL,
skipPlots = NULL,
sequencing.type = c("RNA","Exome","Genome"),
skipMissingDataPlots = TRUE,
labelPlots = TRUE,
maxColumns = NULL,
maxRows = NULL,
plot = TRUE,
... ){
sequencing.type <- match.arg(sequencing.type);
#isExome <- sequencing.type == "Exome";
plotter <- build.plotter.function();
if(is.null(skipPlots)){
if(sequencing.type == "Exome"){
skipPlots <- SKIP.FOR.EXOME.DATA;
} else if(sequencing.type == "RNA"){
skipPlots <- SKIP.FOR.RNA.DATA;
} else {
#Not yet supported!
}
makePlots <- names(PLOTTING.FUNCTION.COMMAND.LIST)[! names(PLOTTING.FUNCTION.COMMAND.LIST) %in% skipPlots]
if(debugMode) message("Skipping: \"",paste0(skipPlots,collapse="\",\""),"\"");
}
height.per.px <- fig.res * fig.base.height.inches;
width.per.px <- fig.res * fig.base.height.inches;
height.per.inches <- fig.base.height.inches;
width.per.inches <- fig.base.height.inches;
if(is.null(rasterize.large.plots)){
if(plot.device.name %in% supportedVectorDevices){
if(check.rasterize.or.warn("rasterize.large.plots")){
message("Default: rasterizing large plots")
rasterize.large.plots = TRUE;
} else {
rasterize.large.plots = FALSE;
}
} else {
rasterize.large.plots = FALSE;
}
}
if(is.null(rasterize.medium.plots)){
if(plot.device.name %in% forPrintDevices){
if(check.rasterize.or.warn("rasterize.medium.plots")){
message("Default: rasterizing medium plots")
rasterize.medium.plots = TRUE;
} else {
rasterize.medium.plots = FALSE;
}
} else {
rasterize.medium.plots = FALSE;
}
}
if(rasterize.large.plots || rasterize.medium.plots){
check.rasterize.or.die("rasterize.large.plots");
if((! plot.device.name %in% supportedVectorDevices) & (plot.device.name != "curr")){
warning("rasterize.large.plots = TRUE should not be used with raster file formats (ie png, tiff, jpeg, etc). This will result in image degradation.");
}
}
if(debugMode) message("Rasterize large plots: ", rasterize.large.plots);
if(debugMode) message("Rasterize medium plots: ", rasterize.medium.plots);
if(skipMissingDataPlots){
runParams <- list( nvc.highlight.points=nvc.highlight.points,
exclude.autosomes.chrom.rate.plot=exclude.autosomes.chrom.rate.plot,
rasterize.large.plots=rasterize.large.plots,
rasterize.medium.plots=rasterize.medium.plots,
chromosome.name.style=chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.plotIntercepts=cdf.plotIntercepts,
cdf.bySample=cdf.bySample,
clip.amt=clip.amt);
rast <- list(big = rasterize.large.plots, med = rasterize.medium.plots,ht = raster.height, wd = raster.width, res = raster.res);
dataMissingPlots <- sapply(makePlots,function(p){
#message("p:",p);
! PLOTTING.FUNCTION.COMMAND.LIST[[p]]$FUN(plotter=plotter,debugMode=debugMode,rast=rast,params=runParams,plot=FALSE)
});
if(debugMode) message("Skipping due to missing data: \"",paste0(makePlots[dataMissingPlots],collapse="\",\""),"\"");
makePlots <- makePlots[! dataMissingPlots]
}
if(plot.device.name == "curr"){
if(debugMode) message("Plotting to the currently-open device...");
default.params <- list();
dev.params <- list();
devOpenFunct <- function(f,w,height.mult,width.mult){};
devCloseFunct <- function(){};
} else if(plot.device.name == "png"){
if(is.null(outfile.ext)) outfile.ext = ".png";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(png,dev.params)
};
devCloseFunct <- function(){dev.off()};
} else if(plot.device.name == "tiff"){
if(is.null(outfile.ext)) outfile.ext = ".tiff";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(tiff,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "jpeg"){
if(is.null(outfile.ext)) outfile.ext = ".jpg";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(jpeg,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "tiff"){
if(is.null(outfile.ext)) outfile.ext = ".tiff";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(tiff,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "CairoPNG"){
if(! requireNamespace("Cairo", quietly=TRUE)) stop("Error: package Cairo not found. Install package Cairo or set plot.device.name to something other than CairoPNG or CairoPDF.");
if(is.null(outfile.ext)) outfile.ext = ".png";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
requireNamespace("Cairo", quietly=TRUE)
do.call(Cairo::CairoPNG,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "svg"){
if(is.null(outfile.ext)) outfile.ext = ".svg";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.inches * height.mult, width = width.per.inches * width.mult * w, pointsize = 24);
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(svg,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "pdf"){
if(is.null(outfile.ext)) outfile.ext = ".pdf";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(file = f, height = 0, width = 0, pointsize = 10, paper = "letter");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(pdf,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "CairoPDF"){
if(! requireNamespace("Cairo",quietly=TRUE)) stop("Error: package Cairo not found. Install package Cairo or set plot.device.name to something other than CairoPNG or CairoPDF.");
if(is.null(outfile.ext)) outfile.ext = ".pdf";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(file = f, height = 0, width = 0, pointsize = 10, paper = "letter");
dev.params <- overmerge.list(default.params,plotting.device.params);
requireNamespace("Cairo", quietly=TRUE)
do.call(Cairo::CairoPDF,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else {
stop(paste0("Error: graphics device \"",plot.device.name,"\" not supported! Legal options are: [",paste0(supportedDevices,collapse=","),"] Set plot.device.name to \"curr\" to plot to the currently-open and/or default device."));
}
#Default arrangement for multipage PDF files:
multiPage <- (plot.device.name == "CairoPDF" || plot.device.name == "pdf");
if(is.null(maxColumns) && multiPage){
maxColumns <- 2;
}
if(is.null(maxRows) && multiPage){
maxRows <- 3;
}
if(multiPage){
nrow = maxRows;
} else {
nrow = NULL
}
tryCatch({
#plotter <- build.plotter.function();
if(debugMode) message("Plotting extended...");
INTERNAL.plot.v10( res = res, plotter = plotter,
devOpenFunct = devOpenFunct,
devCloseFunct = devCloseFunct,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
skipPlots = skipPlots,
makePlots = makePlots,
verbose = verbose, debugMode = debugMode,
separatePlots = separatePlots,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
nrow=nrow,
multiPage = multiPage,
cdf.bySample = cdf.bySample,
cdf.plotIntercepts = cdf.plotIntercepts,
nvc.highlight.points = nvc.highlight.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
clip.amt = clip.amt,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = raster.res,
plot=plot,
...);
}, error = function(e){
message(paste0("PLOTTING ERROR: ", e));
devCloseFunct();
}, finally = {
#message("CRASHED!");
if(debugMode) message("Done with plot.");
});
}
INTERNAL.plot.v10 <- function( res,
plotter,
devOpenFunct,
devCloseFunct,
outfile = NULL,
outfile.dir,
outfile.prefix,
outfile.ext = NULL,
verbose = TRUE,
debugMode,
separatePlots = FALSE,
labelPlots = TRUE,
maxColumns = NULL,
maxRows = NULL,
nrow = NULL,
multiPage = FALSE,
makePlots = names(PLOTTING.FUNCTION.COMMAND.LIST),
skipPlots = c(),
skipMissingDataPlots = FALSE,
cex.corner.label = 2,
#makeplot params:
cdf.bySample = FALSE,
nvc.highlight.points = TRUE,
cdf.plotIntercepts = TRUE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
insertSize.plot.xlim=NULL,
clip.amt = 12,
#Rasterization params:
rasterize.large.plots = FALSE,
rasterize.medium.plots = FALSE,
raster.height = 1050,
raster.width = 1050,
raster.res = 150,
plot = TRUE,
#graphics par:
...){
rast <- list(big = rasterize.large.plots, med = rasterize.medium.plots,ht = raster.height, wd = raster.width, res = raster.res);
params <- list( nvc.highlight.points=nvc.highlight.points,
exclude.autosomes.chrom.rate.plot=exclude.autosomes.chrom.rate.plot,
rasterize.large.plots=rasterize.large.plots,
rasterize.medium.plots=rasterize.medium.plots,
chromosome.name.style=chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.plotIntercepts=cdf.plotIntercepts,
cdf.bySample=cdf.bySample,
clip.amt=clip.amt);
if(verbose) {message(paste0("Starting compiled plot..."));}
ts <- timestamp();
a.to.z <- c("a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z")
corner.labels <- c(a.to.z, paste0(a.to.z[1],a.to.z), paste0(a.to.z[2],a.to.z), paste0(a.to.z[3],a.to.z));
plot.corner.label <- function(i){
if(! labelPlots){
#do nothing!
} else {
devlim <- device.limits();
text(devlim[1],devlim[4], corner.labels[i] , cex = cex.corner.label, adj = c(-0.1,1.1), xpd=T);
}
}
plotList <- PLOTTING.FUNCTION.COMMAND.LIST[makePlots];
plotList <- plotList[! names(plotList) %in% skipPlots];
if(skipMissingDataPlots){
plotList <- plotList[ sapply(plotList,function(pf){
pf$FUN(plotter=plotter,debugMode=debugMode,rast=rast,params=params,plot=FALSE)
})];
}
plotWidths <- sapply(plotList,FUN=function(pf){ pf$wd })
devOpenFunctSimple <- devOpenFunct;
devOpenFunct <- function(...){
#message("opening funct!");
devOpenFunctSimple(...);
}
devCloseFunctSimple <- devCloseFunct;
devCloseFunct <- function(...){
#message("Closing funct!");
devCloseFunctSimple(...);
}
if(separatePlots){
#do nothing!
} else if(is.null(maxRows)){
blo <- getBestLayout(plotWidths,maxColumns=maxColumns);
devOpenFunct(outfile,1,blo$ht,blo$wd);
blo$initLayoutFunct();
} else {
blo <- getBestLayout(plotWidths,maxColumns=maxColumns,maxRows=maxRows,nrow=nrow);
layoutCap <- max(blo$mat);
layoutStart <- 1;
if(multiPage){
devOpenFunct(outfile,1,blo$ht,blo$wd);
} else {
devOpenFunct(paste0(outfile.dir,"/",outfile.prefix,".p",1,outfile.ext),1,blo$ht,blo$wd);
}
blo$initLayoutFunct();
}
plotCt = 0;
for(i in seq_along(plotList)){
plotFUN <- plotList[[i]]$FUN;
plotName <- names(plotList)[[i]];
plotWd <- plotWidths[[i]];
if(separatePlots){
devOpenFunct(paste0(outfile.dir,"/",outfile.prefix,".",plotName,outfile.ext),plotWd,1,1);
} else if(is.null(maxRows)){
#do nothing
} else {
if(layoutStart + layoutCap - 1 < i){
blo <- getBestLayout(plotWidths[i:length(plotWidths)],maxColumns=maxColumns,maxRows=maxRows,nrow=nrow);
layoutCap <- max(blo$mat);
layoutStart <- i;
message(" layoutCap = ",layoutCap);
message(" layoutStart = ",layoutStart);
message(" i = ",i);
if(multiPage){
#devOpenFunct(outfile,1,blo$ht,blo$wd);
#do nothing!
} else {
devOpenFunct(paste0(outfile.dir,"/",outfile.prefix,".p",1,outfile.ext),1,blo$ht,blo$wd);
}
blo$initLayoutFunct();
} else {
#do nothing
}
}
plotCt=plotCt+1;
if(plot){
plotFUN(plotter=plotter,debugMode=debugMode,rast=rast,params=params,plot=TRUE,...)
} else {
plot.new(); plot.window(xlim=c(0,1),ylim=c(0,1));
text(0.5,0.5,label=plotName);
}
plot.corner.label(i);
if(separatePlots){
devCloseFunct();
}
}
if(! separatePlots){
devCloseFunct();
}
if(debugMode) message("Finished Multiplot",getTimeAndDiff(ts))
}
| /src/QoRTs/R/compiled.plotting.R | permissive | pythseq/QoRTs | R | false | false | 75,938 | r |
#######################################################################
#######################################################################
#######################################################################
# Plotting many plots:
DEFAULTDEBUGMODE = TRUE;
makeMultiPlot.all <- function(res, outfile.dir = "./",
plotter.params = list(),
plot.device.name = "png",
plotting.device.params = list(),
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots=TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
get.summary.table(res, outfile = paste0(outfile.dir,"summary.table.txt"), debugMode = debugMode);
makeMultiPlot.basic(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots,...);
makeMultiPlot.colorBySample(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
makeMultiPlot.colorByGroup(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
makeMultiPlot.colorByLane(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
makeMultiPlot.highlightSample.all(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
makeMultiPlot.highlightSample.colorByLane.all(res = res, outfile.dir = outfile.dir, plotter.params = plotter.params, plot.device.name = plot.device.name, plotting.device.params = plotting.device.params, rasterize.large.plots = rasterize.large.plots, rasterize.medium.plots=rasterize.medium.plots, raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot, fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots, ...);
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.highlightSample.all <- function(res, outfile.dir = "./",
plotter.params = list(),
plot.device.name = "png",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots=TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
for(curr.sample in unique(res@decoder$sample.ID)){
makeMultiPlot.highlightSample(res = res,
curr.sample = curr.sample,
outfile.dir = outfile.dir,
verbose = FALSE,
plotter.params = plotter.params,
plot.device.name = plot.device.name,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,
insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,
maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots,
...);
if(verbose) message(paste0(curr.sample," complete!"));
}
}
makeMultiPlot.highlightSample.colorByLane.all <- function(res, outfile.dir = "./",
plotter.params = list(),
plot.device.name = "png",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots=TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
for(curr.sample in unique(res@decoder$sample.ID)){
makeMultiPlot.highlightSample.colorByLane(res = res,
outfile.dir = outfile.dir,
curr.sample = curr.sample,
verbose = FALSE,
plotter.params = plotter.params,
plot.device.name = plot.device.name,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,
insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,
maxColumns = maxColumns,maxRows=maxRows,plotList=plotList,labelPlots=labelPlots,
...);
if(verbose) message(paste0(curr.sample," complete!"));
}
}
#######################################################################
#######################################################################
#######################################################################
# Summary Plot:
makeMultiPlot.withPlotter <- function(plotter, res = plotter$res, outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-custom",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
plotter;
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
# makeMultiPlot.GENERIC(res = res,
# build.plotter.function = build.plotter.function,
# outfile = outfile,
# outfile.dir = outfile.dir,
# outfile.prefix = outfile.prefix,
# outfile.ext = outfile.ext,
# plot.device.name = plot.device.name,
# plotting.device.params = plotting.device.params,
# debugMode = debugMode,
# verbose = verbose,
# cdf.bySample = FALSE,
# cdf.plotIntercepts = FALSE,
# rasterize.large.plots = rasterize.large.plots,
# rasterize.medium.plots = rasterize.medium.plots,
# raster.height = raster.height, raster.width = raster.width, chromosome.name.style = chromosome.name.style, exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
# separatePlots = separatePlots,
# nvc.highlight.points = nvc.mark.points,
# fig.res = fig.res, fig.base.height.inches = fig.base.height.inches,
# insertSize.plot.xlim=insertSize.plot.xlim,sequencing.type=sequencing.type,
# maxColumns = maxColumns,
# ...);
}
makeMultiPlot.basic <- function(res,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-basic",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.basic(res, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.colorByGroup <- function(res,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-colorByGroup",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.colorByGroup(res, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.colorByLane <- function(res,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-colorByLane",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.colorByLane(res, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.colorBySample <- function(res,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = "plot-colorByLane",
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.colorBySample(res, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.highlightSample <- function(res, curr.sample,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = paste0("plot-sampleHL-",curr.sample),
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.highlightSample(curr.sample,res, merge.offset.outgroup = FALSE, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
#######################################################################
#######################################################################
#######################################################################
makeMultiPlot.highlightSample.colorByLane <- function(res, curr.sample,
outfile = NULL,
outfile.dir = "./",
outfile.prefix = paste0("plot-sampleHL-coloredByLane-",curr.sample),
outfile.ext = NULL,
plotter.params = list(),
plot.device.name = "curr",
plotting.device.params = list(),
verbose = TRUE,
debugMode = DEFAULTDEBUGMODE ,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = 1050,
raster.width = 1050,
separatePlots = FALSE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
fig.res = 150, fig.base.height.inches = 7,
insertSize.plot.xlim = NULL,
sequencing.type = c("RNA","Exome","Genome"),
nvc.mark.points = TRUE,
maxColumns = NULL,
maxRows = NULL,
plotList = NULL,
labelPlots = TRUE,
plot = TRUE,
...){
sequencing.type <- match.arg(sequencing.type);
build.plotter.function <- function(){
build.plotter.highlightSample.colorByLane(curr.sample,res, merge.offset.outgroup = FALSE, plotter.params = plotter.params);
}
makeMultiPlot.GENERIC.v10(res=res,
build.plotter.function=build.plotter.function,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
plot.device.name=plot.device.name,
plotting.device.params = plotting.device.params,
debugMode = debugMode, verbose = verbose,
fig.res = fig.res,
fig.base.height.inches = fig.base.height.inches,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = fig.res,
separatePlots = separatePlots,
splitPlots = FALSE,
nvc.highlight.points = nvc.mark.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.bySample = FALSE,
cdf.plotIntercepts = FALSE,
makePlots = plotList,
sequencing.type = sequencing.type,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
plot = plot,
... )
}
############################################################################################################
############################################################################################################
############################################################################################################
############################################################################################################
supportedDevices <- c("png","CairoPNG","tiff","jpeg","CairoPDF","pdf","svg","curr");
supportedVectorDevices <- c("pdf","CairoPDF","svg");
forPrintDevices <- c("pdf","CairoPDF");
getBestLayout <- function(plotWidths, maxColumns = NULL, maxRows = NULL, nrow = NULL){
ct <- sum(plotWidths);
plotCt <- length(plotWidths);
#message("plotWidths = ",plotWidths,", maxColumns = ",maxColumns,", maxRows = ",maxRows,", nrow = ",nrow);
if(is.null(maxColumns)){
maxColumns <- ceiling(sqrt(sum(plotWidths)));
}
if(is.null(maxRows)){
if(! is.null(nrow)){
maxRows <- nrow;
} else {
maxRows <- Inf;
}
}
soFar <- 1;
mv <- c();
while(soFar <= plotCt && (is.null(mv) || nrow(mv) < maxRows)){
v <- rep(soFar,plotWidths[soFar]);
soFar = soFar + 1;
while(soFar <= plotCt && length(v) + plotWidths[soFar] <= maxColumns){
v <- c(v,rep(soFar,plotWidths[soFar]));
soFar = soFar + 1;
}
if(length(v) < maxColumns){
v <- c(v, rep(0, maxColumns - length(v) ) )
}
mv <- rbind(mv,v);
}
ht <- nrow(mv);
wd <- ncol(mv);
mat <- mv;
if( (! is.null(nrow)) ){
while(nrow(mat) < nrow){
mat <- rbind(mat,rep(0,ncol(mat)));
}
}
initLayoutFunct <- function(){
#message("initialized layout!");
layout(mat);
};
#message("ht = ",ht,"wd = ",wd);
#print(mat);
return(list(ht = ht, wd = wd,initLayoutFunct=initLayoutFunct,mat=mat));
}
############################################################################################################
############################################################################################################
############################################################################################################
############################################################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
QoRTs.open.plotting.device <- function(filename, plot.device.name = "png", device.params = list()){
device.params.final <- device.params;
device.params.final[["filename"]] <- filename;
funct.do.nothing <- function(){
#do nothing!
}
if(is.null(plot.device.name)){
return(funct.do.nothing);
} else if(plot.device.name == "png"){
do.call(grDevices::png,device.params.final);
return(dev.off)
} else if(plot.device.name == "CairoPNG"){
requireNamespace("Cairo");
do.call(Cairo::CairoPNG,device.params.final);
return(dev.off)
} else if(plot.device.name == "CairoPDF" | plot.device.name == "pdf"){
#do nothing!
return(funct.do.nothing);
} else {
stop(paste0("FATAL ERROR: QoRTs.open.plotting.device: Unrecognized device type: ",plot.device.name));
}
}
#######################################################################
#######################################################################
#######################################################################
#######################################################################
######### INTERNAL FUNCTIONS:
#######################################################################
#######################################################################
#######################################################################
#######################################################################
SKIP.FOR.RNA.DATA <- c("onTarget.rates","onTarget.counts","overlap.mismatch.byAvgQual");
SKIP.FOR.EXOME.DATA <- c(
"genebody.coverage.umquartileExpressionGenes","genebody.coverage.lowExpressionGenes",
"sj.locus.ct","sj.event.proportionByType","sj.event.rate",
"norm.factors","norm.vs.TC","SpliceProfile","overlapMismatch.byQual.avg"
);
PLOTTING.FUNCTION.COMMAND.LIST <- list(
legend = list(wd=1,FUN=function(plotter,debugMode,rast,params,...){ makePlot.legend.box(plotter, debugMode = debugMode, ...) }),
qual.pair.min =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"min", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["ht"]],plot=plot, ...)}),
qual.pair.lowerQuartile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"lowerQuartile", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
qual.pair.median =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"median", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot,...)}),
qual.pair.upperQuartile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"upperQuartile", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot,...)}),
qual.pair.max =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.qual.pair(plotter,"max", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot,...)}),
clippingProfile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.clipping(plotter, debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
DeletionProfile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarOp.byCycle(plotter,"Del", debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
InsertionProfile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarOp.byCycle(plotter,"Ins", debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
SpliceProfile =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarOp.byCycle(plotter,"Splice", debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
InsertionLengthHisto =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarLength.distribution(plotter,"Ins", log.y = TRUE, debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
DeletionLengthHisto =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.cigarLength.distribution(plotter,"Del", log.y = TRUE, debugMode = debugMode,rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
gc =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.gc(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
missingness.rate =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.missingness.rate(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot,...)}),
dropped.rate =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.dropped.rates(plotter, debugMode = debugMode,plot=plot, ...)}),
insert.size =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.insert.size(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]], xlim = params[["insertSize.plot.xlim"]],plot=plot, ...)}),
overlap.coverage =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlap.coverage(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
readLengthDist =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.readLengthDist(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byCycle =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byCycle(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
#overlapMismatch.byQual.avg =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byQual.avg(plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byQual.min =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byQual.min( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byQual.read =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byQual.read( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byBase =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byBase(plotter, debugMode = debugMode,plot=plot, ...)}),
overlapMismatch.size =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.size( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
overlapMismatch.byBase.atScore=list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.overlapMismatch.byBase.atScore( plotter,atScore=41, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
referenceMismatch.byCycle =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byCycle( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
referenceMismatch.byScore =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byScore( plotter, debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
referenceMismatch.byBase =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byBase( plotter, debugMode = debugMode,plot=plot, ...)}),
referenceMismatch.byBase.atScore.R1=list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byBase.atScore( plotter,atScore=41,forRead="R1", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
referenceMismatch.byBase.atScore.R2=list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.referenceMismatch.byBase.atScore( plotter,atScore=41,forRead="R2", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
gene.diversity =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.gene.cdf(plotter, sampleWise = params[["cdf.bySample"]], plot.intercepts = params[["cdf.plotIntercepts"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
genebody.coverage.allGenes =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.genebody(plotter, geneset="Overall", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
genebody.coverage.umquartileExpressionGenes =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.genebody(plotter, geneset="50-75", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
genebody.coverage.lowExpressionGenes =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.genebody(plotter, geneset="0-50", debugMode = debugMode, rasterize.plotting.area = rast[["med"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
geneAssignmentRates =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.gene.assignment.rates(plotter, debugMode = debugMode,plot=plot, ...)}),
sj.locus.ct =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.splice.junction.loci.counts(plotter, debugMode = debugMode,plot=plot, ...)}),
sj.event.proportionByType =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.splice.junction.event.proportionsByType(plotter, debugMode = debugMode,plot=plot, ...)}),
sj.event.rate =list(wd=1,FUN=function(plotter,plot,debugMode,rast,params,...){makePlot.splice.junction.event.ratesPerRead(plotter, debugMode = debugMode,plot=plot, ...)}),
mapping.rates =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.mapping.rates(plotter, debugMode = debugMode,plot=plot, ...)}),
chrom.rates =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.chrom.type.rates(plotter, chromosome.name.style = params[["chromosome.name.style"]], exclude.autosomes = params[["exclude.autosomes.chrom.rate.plot"]], debugMode = debugMode,plot=plot, ...)}),
norm.factors =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.norm.factors(plotter, debugMode = debugMode,plot=plot, ...)}),
norm.vs.TC =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.norm.factors.vs.TC(plotter, debugMode = debugMode,plot=plot, ...)}),
strandedness.test =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.strandedness.test(plotter, debugMode = debugMode,plot=plot, ...)}),
onTarget.counts =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.onTarget.counts(plotter, debugMode = debugMode,plot=plot, ...)}),
onTarget.rates =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.onTarget.rates(plotter, debugMode = debugMode,plot=plot, ...)}),
NVC.lead.clip =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.NVC.lead.clip(plotter, clip.amt = params[["clip.amt"]], points.highlighted = params[["nvc.highlight.points"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
NVC.tail.clip =list(wd=1,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.NVC.tail.clip(plotter, clip.amt = 12, points.highlighted = params[["nvc.highlight.points"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = rast[["wd"]],plot=plot, ...)}),
NVC.raw =list(wd=2,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.raw.NVC(plotter, points.highlighted = params[["nvc.highlight.points"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = 2* rast[["wd"]],plot=plot, ...)}),
NVC.aligned =list(wd=2,FUN=function(plotter,debugMode,rast,params,plot,...){makePlot.minus.clipping.NVC(plotter, points.highlighted = params[["nvc.highlight.points"]],
debugMode = debugMode, rasterize.plotting.area = rast[["big"]], raster.height = rast[["ht"]], raster.width = 2* rast[["wd"]],plot=plot, ...)})
);
makeMultiPlot.GENERIC.v10 <- function(res,
build.plotter.function,
outfile = NULL,
outfile.dir,
outfile.prefix,
outfile.ext = NULL,
plot.device.name,
plotting.device.params = list(),
debugMode = DEFAULTDEBUGMODE, verbose = TRUE,
fig.res = 150,
fig.base.height.inches = 7,
rasterize.large.plots = NULL,
rasterize.medium.plots = NULL,
raster.height = fig.res*fig.base.height.inches,
raster.width = raster.height,
raster.res = fig.res,
separatePlots = FALSE,
splitPlots = FALSE,
nvc.highlight.points = TRUE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
insertSize.plot.xlim=NULL,
cdf.bySample = TRUE,
cdf.plotIntercepts = FALSE,
clip.amt = 12,
makePlots = NULL,
skipPlots = NULL,
sequencing.type = c("RNA","Exome","Genome"),
skipMissingDataPlots = TRUE,
labelPlots = TRUE,
maxColumns = NULL,
maxRows = NULL,
plot = TRUE,
... ){
sequencing.type <- match.arg(sequencing.type);
#isExome <- sequencing.type == "Exome";
plotter <- build.plotter.function();
if(is.null(skipPlots)){
if(sequencing.type == "Exome"){
skipPlots <- SKIP.FOR.EXOME.DATA;
} else if(sequencing.type == "RNA"){
skipPlots <- SKIP.FOR.RNA.DATA;
} else {
#Not yet supported!
}
makePlots <- names(PLOTTING.FUNCTION.COMMAND.LIST)[! names(PLOTTING.FUNCTION.COMMAND.LIST) %in% skipPlots]
if(debugMode) message("Skipping: \"",paste0(skipPlots,collapse="\",\""),"\"");
}
height.per.px <- fig.res * fig.base.height.inches;
width.per.px <- fig.res * fig.base.height.inches;
height.per.inches <- fig.base.height.inches;
width.per.inches <- fig.base.height.inches;
if(is.null(rasterize.large.plots)){
if(plot.device.name %in% supportedVectorDevices){
if(check.rasterize.or.warn("rasterize.large.plots")){
message("Default: rasterizing large plots")
rasterize.large.plots = TRUE;
} else {
rasterize.large.plots = FALSE;
}
} else {
rasterize.large.plots = FALSE;
}
}
if(is.null(rasterize.medium.plots)){
if(plot.device.name %in% forPrintDevices){
if(check.rasterize.or.warn("rasterize.medium.plots")){
message("Default: rasterizing medium plots")
rasterize.medium.plots = TRUE;
} else {
rasterize.medium.plots = FALSE;
}
} else {
rasterize.medium.plots = FALSE;
}
}
if(rasterize.large.plots || rasterize.medium.plots){
check.rasterize.or.die("rasterize.large.plots");
if((! plot.device.name %in% supportedVectorDevices) & (plot.device.name != "curr")){
warning("rasterize.large.plots = TRUE should not be used with raster file formats (ie png, tiff, jpeg, etc). This will result in image degradation.");
}
}
if(debugMode) message("Rasterize large plots: ", rasterize.large.plots);
if(debugMode) message("Rasterize medium plots: ", rasterize.medium.plots);
if(skipMissingDataPlots){
runParams <- list( nvc.highlight.points=nvc.highlight.points,
exclude.autosomes.chrom.rate.plot=exclude.autosomes.chrom.rate.plot,
rasterize.large.plots=rasterize.large.plots,
rasterize.medium.plots=rasterize.medium.plots,
chromosome.name.style=chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.plotIntercepts=cdf.plotIntercepts,
cdf.bySample=cdf.bySample,
clip.amt=clip.amt);
rast <- list(big = rasterize.large.plots, med = rasterize.medium.plots,ht = raster.height, wd = raster.width, res = raster.res);
dataMissingPlots <- sapply(makePlots,function(p){
#message("p:",p);
! PLOTTING.FUNCTION.COMMAND.LIST[[p]]$FUN(plotter=plotter,debugMode=debugMode,rast=rast,params=runParams,plot=FALSE)
});
if(debugMode) message("Skipping due to missing data: \"",paste0(makePlots[dataMissingPlots],collapse="\",\""),"\"");
makePlots <- makePlots[! dataMissingPlots]
}
if(plot.device.name == "curr"){
if(debugMode) message("Plotting to the currently-open device...");
default.params <- list();
dev.params <- list();
devOpenFunct <- function(f,w,height.mult,width.mult){};
devCloseFunct <- function(){};
} else if(plot.device.name == "png"){
if(is.null(outfile.ext)) outfile.ext = ".png";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(png,dev.params)
};
devCloseFunct <- function(){dev.off()};
} else if(plot.device.name == "tiff"){
if(is.null(outfile.ext)) outfile.ext = ".tiff";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(tiff,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "jpeg"){
if(is.null(outfile.ext)) outfile.ext = ".jpg";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(jpeg,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "tiff"){
if(is.null(outfile.ext)) outfile.ext = ".tiff";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(tiff,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "CairoPNG"){
if(! requireNamespace("Cairo", quietly=TRUE)) stop("Error: package Cairo not found. Install package Cairo or set plot.device.name to something other than CairoPNG or CairoPDF.");
if(is.null(outfile.ext)) outfile.ext = ".png";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.px * height.mult, width = width.per.px * width.mult * w, pointsize = 36, units = "px");
dev.params <- overmerge.list(default.params,plotting.device.params);
requireNamespace("Cairo", quietly=TRUE)
do.call(Cairo::CairoPNG,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "svg"){
if(is.null(outfile.ext)) outfile.ext = ".svg";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(filename = f, height = height.per.inches * height.mult, width = width.per.inches * width.mult * w, pointsize = 24);
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(svg,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "pdf"){
if(is.null(outfile.ext)) outfile.ext = ".pdf";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(file = f, height = 0, width = 0, pointsize = 10, paper = "letter");
dev.params <- overmerge.list(default.params,plotting.device.params);
do.call(pdf,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else if(plot.device.name == "CairoPDF"){
if(! requireNamespace("Cairo",quietly=TRUE)) stop("Error: package Cairo not found. Install package Cairo or set plot.device.name to something other than CairoPNG or CairoPDF.");
if(is.null(outfile.ext)) outfile.ext = ".pdf";
if(is.null(outfile)) outfile <- paste0(outfile.dir, outfile.prefix,outfile.ext);
devOpenFunct <- function(f,w,height.mult,width.mult){
default.params <- list(file = f, height = 0, width = 0, pointsize = 10, paper = "letter");
dev.params <- overmerge.list(default.params,plotting.device.params);
requireNamespace("Cairo", quietly=TRUE)
do.call(Cairo::CairoPDF,dev.params)
};
devCloseFunct <- function(){dev.off()}
} else {
stop(paste0("Error: graphics device \"",plot.device.name,"\" not supported! Legal options are: [",paste0(supportedDevices,collapse=","),"] Set plot.device.name to \"curr\" to plot to the currently-open and/or default device."));
}
#Default arrangement for multipage PDF files:
multiPage <- (plot.device.name == "CairoPDF" || plot.device.name == "pdf");
if(is.null(maxColumns) && multiPage){
maxColumns <- 2;
}
if(is.null(maxRows) && multiPage){
maxRows <- 3;
}
if(multiPage){
nrow = maxRows;
} else {
nrow = NULL
}
tryCatch({
#plotter <- build.plotter.function();
if(debugMode) message("Plotting extended...");
INTERNAL.plot.v10( res = res, plotter = plotter,
devOpenFunct = devOpenFunct,
devCloseFunct = devCloseFunct,
outfile = outfile,
outfile.dir=outfile.dir,
outfile.prefix=outfile.prefix,
outfile.ext = outfile.ext,
skipPlots = skipPlots,
makePlots = makePlots,
verbose = verbose, debugMode = debugMode,
separatePlots = separatePlots,
labelPlots = labelPlots,
maxColumns = maxColumns,
maxRows = maxRows,
nrow=nrow,
multiPage = multiPage,
cdf.bySample = cdf.bySample,
cdf.plotIntercepts = cdf.plotIntercepts,
nvc.highlight.points = nvc.highlight.points,
exclude.autosomes.chrom.rate.plot = exclude.autosomes.chrom.rate.plot,
chromosome.name.style = chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
clip.amt = clip.amt,
rasterize.large.plots = rasterize.large.plots,
rasterize.medium.plots = rasterize.medium.plots,
raster.height = raster.height,
raster.width = raster.width,
raster.res = raster.res,
plot=plot,
...);
}, error = function(e){
message(paste0("PLOTTING ERROR: ", e));
devCloseFunct();
}, finally = {
#message("CRASHED!");
if(debugMode) message("Done with plot.");
});
}
INTERNAL.plot.v10 <- function( res,
plotter,
devOpenFunct,
devCloseFunct,
outfile = NULL,
outfile.dir,
outfile.prefix,
outfile.ext = NULL,
verbose = TRUE,
debugMode,
separatePlots = FALSE,
labelPlots = TRUE,
maxColumns = NULL,
maxRows = NULL,
nrow = NULL,
multiPage = FALSE,
makePlots = names(PLOTTING.FUNCTION.COMMAND.LIST),
skipPlots = c(),
skipMissingDataPlots = FALSE,
cex.corner.label = 2,
#makeplot params:
cdf.bySample = FALSE,
nvc.highlight.points = TRUE,
cdf.plotIntercepts = TRUE,
exclude.autosomes.chrom.rate.plot = TRUE,
chromosome.name.style = "UCSC",
insertSize.plot.xlim=NULL,
clip.amt = 12,
#Rasterization params:
rasterize.large.plots = FALSE,
rasterize.medium.plots = FALSE,
raster.height = 1050,
raster.width = 1050,
raster.res = 150,
plot = TRUE,
#graphics par:
...){
rast <- list(big = rasterize.large.plots, med = rasterize.medium.plots,ht = raster.height, wd = raster.width, res = raster.res);
params <- list( nvc.highlight.points=nvc.highlight.points,
exclude.autosomes.chrom.rate.plot=exclude.autosomes.chrom.rate.plot,
rasterize.large.plots=rasterize.large.plots,
rasterize.medium.plots=rasterize.medium.plots,
chromosome.name.style=chromosome.name.style,
insertSize.plot.xlim=insertSize.plot.xlim,
cdf.plotIntercepts=cdf.plotIntercepts,
cdf.bySample=cdf.bySample,
clip.amt=clip.amt);
if(verbose) {message(paste0("Starting compiled plot..."));}
ts <- timestamp();
a.to.z <- c("a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z")
corner.labels <- c(a.to.z, paste0(a.to.z[1],a.to.z), paste0(a.to.z[2],a.to.z), paste0(a.to.z[3],a.to.z));
plot.corner.label <- function(i){
if(! labelPlots){
#do nothing!
} else {
devlim <- device.limits();
text(devlim[1],devlim[4], corner.labels[i] , cex = cex.corner.label, adj = c(-0.1,1.1), xpd=T);
}
}
plotList <- PLOTTING.FUNCTION.COMMAND.LIST[makePlots];
plotList <- plotList[! names(plotList) %in% skipPlots];
if(skipMissingDataPlots){
plotList <- plotList[ sapply(plotList,function(pf){
pf$FUN(plotter=plotter,debugMode=debugMode,rast=rast,params=params,plot=FALSE)
})];
}
plotWidths <- sapply(plotList,FUN=function(pf){ pf$wd })
devOpenFunctSimple <- devOpenFunct;
devOpenFunct <- function(...){
#message("opening funct!");
devOpenFunctSimple(...);
}
devCloseFunctSimple <- devCloseFunct;
devCloseFunct <- function(...){
#message("Closing funct!");
devCloseFunctSimple(...);
}
if(separatePlots){
#do nothing!
} else if(is.null(maxRows)){
blo <- getBestLayout(plotWidths,maxColumns=maxColumns);
devOpenFunct(outfile,1,blo$ht,blo$wd);
blo$initLayoutFunct();
} else {
blo <- getBestLayout(plotWidths,maxColumns=maxColumns,maxRows=maxRows,nrow=nrow);
layoutCap <- max(blo$mat);
layoutStart <- 1;
if(multiPage){
devOpenFunct(outfile,1,blo$ht,blo$wd);
} else {
devOpenFunct(paste0(outfile.dir,"/",outfile.prefix,".p",1,outfile.ext),1,blo$ht,blo$wd);
}
blo$initLayoutFunct();
}
plotCt = 0;
for(i in seq_along(plotList)){
plotFUN <- plotList[[i]]$FUN;
plotName <- names(plotList)[[i]];
plotWd <- plotWidths[[i]];
if(separatePlots){
devOpenFunct(paste0(outfile.dir,"/",outfile.prefix,".",plotName,outfile.ext),plotWd,1,1);
} else if(is.null(maxRows)){
#do nothing
} else {
if(layoutStart + layoutCap - 1 < i){
blo <- getBestLayout(plotWidths[i:length(plotWidths)],maxColumns=maxColumns,maxRows=maxRows,nrow=nrow);
layoutCap <- max(blo$mat);
layoutStart <- i;
message(" layoutCap = ",layoutCap);
message(" layoutStart = ",layoutStart);
message(" i = ",i);
if(multiPage){
#devOpenFunct(outfile,1,blo$ht,blo$wd);
#do nothing!
} else {
devOpenFunct(paste0(outfile.dir,"/",outfile.prefix,".p",1,outfile.ext),1,blo$ht,blo$wd);
}
blo$initLayoutFunct();
} else {
#do nothing
}
}
plotCt=plotCt+1;
if(plot){
plotFUN(plotter=plotter,debugMode=debugMode,rast=rast,params=params,plot=TRUE,...)
} else {
plot.new(); plot.window(xlim=c(0,1),ylim=c(0,1));
text(0.5,0.5,label=plotName);
}
plot.corner.label(i);
if(separatePlots){
devCloseFunct();
}
}
if(! separatePlots){
devCloseFunct();
}
if(debugMode) message("Finished Multiplot",getTimeAndDiff(ts))
}
|
####Log
#
#module wrapper
tableTestUI <- function(id) {
ns <- NS(id)
# tagList(
#module wrapper
#comment
#ui <-
#
fluidPage(
title = "Examples of DataTables",
sidebarLayout(
sidebarPanel(
conditionalPanel(
'input.dataset === "diamonds"',
checkboxGroupInput(ns("show_vars"), "Columns in diamonds to show:",
names(diamonds), selected = names(diamonds))
),
conditionalPanel(
'input.dataset === "mtcars"',
helpText("Click the column header to sort a column.")
),
conditionalPanel(
'input.dataset === "iris"',
helpText("Display 5 records by default.")
, ################################ Show boxes
source("columns.R", local = T)$value #add value other TRUE will be displayed
####################################
)
),
mainPanel(
tabsetPanel(
id = 'dataset',
tabPanel("diamonds", DT::dataTableOutput(ns("mytable1"))),
tabPanel("mtcars", DT::dataTableOutput(ns("mytable2"))),
tabPanel("iris", DT::dataTableOutput(ns("mytable3")))
)
)
# )
)
)
}
tableTest <- function(input, output, session) {
#server <- function(input, output) {
# choose columns to display
diamonds2 = diamonds[sample(nrow(diamonds), 1000),]
output$mytable1 <- DT::renderDataTable({
DT::datatable(diamonds2[, input$show_vars, drop = FALSE])
})
# sorted columns are colored now because CSS are attached to them
output$mytable2 <- DT::renderDataTable({
DT::datatable(mtcars, options = list(orderClasses = TRUE))
})
# customize the length drop-down menu; display 5 rows per page by default
output$mytable3 <- DT::renderDataTable({
DT::datatable(iris, options = list(lengthMenu = c(5, 30, 50), pageLength = 5))
})
}
#shinyApp(ui, server) | /Portfolio_Analytics/Shiny_prototype/Shinydashboard/modules/tableTest.R | no_license | DannyJRa/Portfolio_Analytics | R | false | false | 1,957 | r | ####Log
#
#module wrapper
tableTestUI <- function(id) {
ns <- NS(id)
# tagList(
#module wrapper
#comment
#ui <-
#
fluidPage(
title = "Examples of DataTables",
sidebarLayout(
sidebarPanel(
conditionalPanel(
'input.dataset === "diamonds"',
checkboxGroupInput(ns("show_vars"), "Columns in diamonds to show:",
names(diamonds), selected = names(diamonds))
),
conditionalPanel(
'input.dataset === "mtcars"',
helpText("Click the column header to sort a column.")
),
conditionalPanel(
'input.dataset === "iris"',
helpText("Display 5 records by default.")
, ################################ Show boxes
source("columns.R", local = T)$value #add value other TRUE will be displayed
####################################
)
),
mainPanel(
tabsetPanel(
id = 'dataset',
tabPanel("diamonds", DT::dataTableOutput(ns("mytable1"))),
tabPanel("mtcars", DT::dataTableOutput(ns("mytable2"))),
tabPanel("iris", DT::dataTableOutput(ns("mytable3")))
)
)
# )
)
)
}
tableTest <- function(input, output, session) {
#server <- function(input, output) {
# choose columns to display
diamonds2 = diamonds[sample(nrow(diamonds), 1000),]
output$mytable1 <- DT::renderDataTable({
DT::datatable(diamonds2[, input$show_vars, drop = FALSE])
})
# sorted columns are colored now because CSS are attached to them
output$mytable2 <- DT::renderDataTable({
DT::datatable(mtcars, options = list(orderClasses = TRUE))
})
# customize the length drop-down menu; display 5 rows per page by default
output$mytable3 <- DT::renderDataTable({
DT::datatable(iris, options = list(lengthMenu = c(5, 30, 50), pageLength = 5))
})
}
#shinyApp(ui, server) |
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c3_250_5")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.randomForest", par.vals = list(), predict.type = "prob")
#:# hash
#:# 0f89dd3e167c3328df1a7eba1316530e
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_fri_c3_250_5/classification_binaryClass/0f89dd3e167c3328df1a7eba1316530e/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 698 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c3_250_5")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.randomForest", par.vals = list(), predict.type = "prob")
#:# hash
#:# 0f89dd3e167c3328df1a7eba1316530e
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
#--bestNN----------------------------------------
evalq({
numEns <- 3L
k <- 1L
while (k <= 4) {
foreach(j = 1:4, .combine = "cbind") %do% {
testX1[[k]]$InputTrainScore[ ,j] %>% order(decreasing = TRUE) %>% head(2*numEns + 1)
} -> testX1[[k]]$bestNN
dimnames(testX1[[k]]$bestNN) <- list(NULL, type)
k <- k + 1
}
}, env)
env$testX1$origin$bestNN
env$testX1$repaired$bestNN
env$testX1$removed$bestNN
env$testX1$relabeled$bestNN
#--Averaging--train------------------------
evalq({
k <- 1L
while (k <= 4) {# group
foreach(j = 1:4, .combine = "cbind") %do% {# type
bestNN <- testX1[[k]]$bestNN[ ,j]
predX1[[k]]$pred$InputTrain[ ,bestNN] %>%
apply(1, function(x) sum(x)) %>%
divide_by((2*numEns + 1))
} -> testX1[[k]]$TrainYpred
dimnames(testX1[[k]]$TrainYpred) <- list(NULL, paste0("Y.aver_", type))
k <- k + 1
}
}, env)
#--Averaging--test------------------------
evalq({
k <- 1L
while (k <= 4) {# group
foreach(j = 1:4, .combine = "cbind") %do% {# type
bestNN <- testX1[[k]]$bestNN[ ,j]
predX1[[k]]$pred$InputTest[ ,bestNN] %>%
apply(1, function(x) sum(x)) %>%
divide_by((2*numEns + 1))
} -> testX1[[k]]$TestYpred
dimnames(testX1[[k]]$TestYpred) <- list(NULL, paste0("Y.aver_", type))
k <- k + 1
}
}, env)
#--Averaging--test1------------------------
evalq({
k <- 1L
while (k <= 4) {# group
foreach(j = 1:4, .combine = "cbind") %do% {# type
bestNN <- testX1[[k]]$bestNN[ ,j]
predX1[[k]]$pred$InputTest1[ ,bestNN] %>%
apply(1, function(x) sum(x)) %>%
divide_by((2*numEns + 1))
} -> testX1[[k]]$Test1Ypred
dimnames(testX1[[k]]$Test1Ypred) <- list(NULL, paste0("Y.aver_", type))
k <- k + 1
}
}, env)
#-th_aver------------------------------
evalq({
k <- 1L #origin
#k <- 2L #repaired
#k <- 3L #removed
#k <- 4L #relabeling
type <- qc(half, med, mce, both)
Ytest = X1$train$y
Ytest1 = X1$test$y
Ytest2 = X1$test1$y
while (k <= 4) { # group
foreach(j = 1:4, .combine = "cbind") %do% {# type subset
foreach(i = 1:4, .combine = "c") %do% {# type threshold
GetThreshold(testX1[[k]]$TrainYpred[ ,j], Ytest, type[i])
}
} -> testX1[[k]]$th_aver
dimnames(testX1[[k]]$th_aver) <- list(type, colnames(testX1[[k]]$TrainYpred))
k <- k + 1
}
}, env)
#---Metrics--train-------------------------------------
evalq({
k <- 1L #origin
#k <- 2L #repaired
#k <- 3L #removed
#k <- 4L #relabeling
type <- qc(half, med, mce, both)
while (k <= 4) { # group
foreach(j = 1:4, .combine = "cbind") %do% {# type subset
foreach(i = 1:4, .combine = "c") %do% {# type threshold
ifelse(testX1[[k]]$TrainYpred[ ,j] > testX1[[k]]$th_aver[i,j], 1, 0) -> clAver
Evaluate(actual = Ytest, predicted = clAver)$Metrics$F1 %>%
mean() %>% round(3)
}
} -> testX1[[k]]$TrainScore
dimnames(testX1[[k]]$TrainScore) <- list(type, colnames(testX1[[k]]$TrainYpred))
k <- k + 1
}
}, env)
#---Metrics--test-------------------------------------
evalq({
k <- 1L #origin
#k <- 2L #repaired
#k <- 3L #removed
#k <- 4L #relabeling
type <- qc(half, med, mce, both)
while (k <= 4) { # group
foreach(j = 1:4, .combine = "cbind") %do% {# type subset
foreach(i = 1:4, .combine = "c") %do% {# type threshold
ifelse(testX1[[k]]$TestYpred[ ,j] > testX1[[k]]$th_aver[i,j], 1, 0) -> clAver
Evaluate(actual = Ytest1, predicted = clAver)$Metrics$F1 %>%
mean() %>% round(3)
}
} -> testX1[[k]]$TestScore
dimnames(testX1[[k]]$TestScore) <- list(type, colnames(testX1[[k]]$TestYpred))
k <- k + 1
}
}, env)
#---Metrics--test1-------------------------------------
evalq({
k <- 1L #origin
#k <- 2L #repaired
#k <- 3L #removed
#k <- 4L #relabeling
type <- qc(half, med, mce, both)
while (k <= 4) { # group
foreach(j = 1:4, .combine = "cbind") %do% {# type subset
foreach(i = 1:4, .combine = "c") %do% {# type threshold
ifelse(testX1[[k]]$Test1Ypred[ ,j] > testX1[[k]]$th_aver[i,j], 1, 0) -> clAver
Evaluate(actual = Ytest2, predicted = clAver)$Metrics$F1 %>%
mean() %>% round(3)
}
} -> testX1[[k]]$Test1Score
dimnames(testX1[[k]]$Test1Score) <- list(type, colnames(testX1[[k]]$Test1Ypred))
k <- k + 1
}
}, env)
##===========================================================
| /PartVIII/Averaging.R | no_license | hunglviet/darch12 | R | false | false | 4,565 | r | #--bestNN----------------------------------------
evalq({
numEns <- 3L
k <- 1L
while (k <= 4) {
foreach(j = 1:4, .combine = "cbind") %do% {
testX1[[k]]$InputTrainScore[ ,j] %>% order(decreasing = TRUE) %>% head(2*numEns + 1)
} -> testX1[[k]]$bestNN
dimnames(testX1[[k]]$bestNN) <- list(NULL, type)
k <- k + 1
}
}, env)
env$testX1$origin$bestNN
env$testX1$repaired$bestNN
env$testX1$removed$bestNN
env$testX1$relabeled$bestNN
#--Averaging--train------------------------
evalq({
k <- 1L
while (k <= 4) {# group
foreach(j = 1:4, .combine = "cbind") %do% {# type
bestNN <- testX1[[k]]$bestNN[ ,j]
predX1[[k]]$pred$InputTrain[ ,bestNN] %>%
apply(1, function(x) sum(x)) %>%
divide_by((2*numEns + 1))
} -> testX1[[k]]$TrainYpred
dimnames(testX1[[k]]$TrainYpred) <- list(NULL, paste0("Y.aver_", type))
k <- k + 1
}
}, env)
#--Averaging--test------------------------
evalq({
k <- 1L
while (k <= 4) {# group
foreach(j = 1:4, .combine = "cbind") %do% {# type
bestNN <- testX1[[k]]$bestNN[ ,j]
predX1[[k]]$pred$InputTest[ ,bestNN] %>%
apply(1, function(x) sum(x)) %>%
divide_by((2*numEns + 1))
} -> testX1[[k]]$TestYpred
dimnames(testX1[[k]]$TestYpred) <- list(NULL, paste0("Y.aver_", type))
k <- k + 1
}
}, env)
#--Averaging--test1------------------------
evalq({
k <- 1L
while (k <= 4) {# group
foreach(j = 1:4, .combine = "cbind") %do% {# type
bestNN <- testX1[[k]]$bestNN[ ,j]
predX1[[k]]$pred$InputTest1[ ,bestNN] %>%
apply(1, function(x) sum(x)) %>%
divide_by((2*numEns + 1))
} -> testX1[[k]]$Test1Ypred
dimnames(testX1[[k]]$Test1Ypred) <- list(NULL, paste0("Y.aver_", type))
k <- k + 1
}
}, env)
#-th_aver------------------------------
evalq({
k <- 1L #origin
#k <- 2L #repaired
#k <- 3L #removed
#k <- 4L #relabeling
type <- qc(half, med, mce, both)
Ytest = X1$train$y
Ytest1 = X1$test$y
Ytest2 = X1$test1$y
while (k <= 4) { # group
foreach(j = 1:4, .combine = "cbind") %do% {# type subset
foreach(i = 1:4, .combine = "c") %do% {# type threshold
GetThreshold(testX1[[k]]$TrainYpred[ ,j], Ytest, type[i])
}
} -> testX1[[k]]$th_aver
dimnames(testX1[[k]]$th_aver) <- list(type, colnames(testX1[[k]]$TrainYpred))
k <- k + 1
}
}, env)
#---Metrics--train-------------------------------------
evalq({
k <- 1L #origin
#k <- 2L #repaired
#k <- 3L #removed
#k <- 4L #relabeling
type <- qc(half, med, mce, both)
while (k <= 4) { # group
foreach(j = 1:4, .combine = "cbind") %do% {# type subset
foreach(i = 1:4, .combine = "c") %do% {# type threshold
ifelse(testX1[[k]]$TrainYpred[ ,j] > testX1[[k]]$th_aver[i,j], 1, 0) -> clAver
Evaluate(actual = Ytest, predicted = clAver)$Metrics$F1 %>%
mean() %>% round(3)
}
} -> testX1[[k]]$TrainScore
dimnames(testX1[[k]]$TrainScore) <- list(type, colnames(testX1[[k]]$TrainYpred))
k <- k + 1
}
}, env)
#---Metrics--test-------------------------------------
evalq({
k <- 1L #origin
#k <- 2L #repaired
#k <- 3L #removed
#k <- 4L #relabeling
type <- qc(half, med, mce, both)
while (k <= 4) { # group
foreach(j = 1:4, .combine = "cbind") %do% {# type subset
foreach(i = 1:4, .combine = "c") %do% {# type threshold
ifelse(testX1[[k]]$TestYpred[ ,j] > testX1[[k]]$th_aver[i,j], 1, 0) -> clAver
Evaluate(actual = Ytest1, predicted = clAver)$Metrics$F1 %>%
mean() %>% round(3)
}
} -> testX1[[k]]$TestScore
dimnames(testX1[[k]]$TestScore) <- list(type, colnames(testX1[[k]]$TestYpred))
k <- k + 1
}
}, env)
#---Metrics--test1-------------------------------------
evalq({
k <- 1L #origin
#k <- 2L #repaired
#k <- 3L #removed
#k <- 4L #relabeling
type <- qc(half, med, mce, both)
while (k <= 4) { # group
foreach(j = 1:4, .combine = "cbind") %do% {# type subset
foreach(i = 1:4, .combine = "c") %do% {# type threshold
ifelse(testX1[[k]]$Test1Ypred[ ,j] > testX1[[k]]$th_aver[i,j], 1, 0) -> clAver
Evaluate(actual = Ytest2, predicted = clAver)$Metrics$F1 %>%
mean() %>% round(3)
}
} -> testX1[[k]]$Test1Score
dimnames(testX1[[k]]$Test1Score) <- list(type, colnames(testX1[[k]]$Test1Ypred))
k <- k + 1
}
}, env)
##===========================================================
|
setwd("~/titani R") | /titanic.R | no_license | oni-00/titanic-R | R | false | false | 19 | r | setwd("~/titani R") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RunScenarios.R
\name{processRunningTasks}
\alias{processRunningTasks}
\title{Function that processes the tasks running currently}
\usage{
processRunningTasks(asyncTasksRunning = vector(mode = "list"),
wait = FALSE, catchErrors = TRUE, debug = FALSE,
maximumTasksToResolve = NULL)
}
\arguments{
\item{asyncTasksRunning}{A list of asynchronous tasks running currently.}
\item{wait}{A logical. TRUE if the processor needs to wait for all the results.}
\item{catchErrors}{A logical. Set to TRUE if want to catch the errors resulted
in futures.}
\item{debug}{A logical. Set to TRUE if need to print intermediate results.}
\item{maximumTaskToResolve}{An integer for the maximum number of tasks to resolve.}
}
\value{
An integer indicating the number of tasks currently running
}
\description{
\code{processRunningTasks} processes the tasks running
currently.
}
\details{
This function is called periodically, this will check all running asyncTasks
for completion. Returns number of remaining tasks so could be used as a boolean
}
| /sources/modules/VEScenario/man/processRunningTasks.Rd | permissive | rickdonnelly/VisionEval-Dev | R | false | true | 1,110 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RunScenarios.R
\name{processRunningTasks}
\alias{processRunningTasks}
\title{Function that processes the tasks running currently}
\usage{
processRunningTasks(asyncTasksRunning = vector(mode = "list"),
wait = FALSE, catchErrors = TRUE, debug = FALSE,
maximumTasksToResolve = NULL)
}
\arguments{
\item{asyncTasksRunning}{A list of asynchronous tasks running currently.}
\item{wait}{A logical. TRUE if the processor needs to wait for all the results.}
\item{catchErrors}{A logical. Set to TRUE if want to catch the errors resulted
in futures.}
\item{debug}{A logical. Set to TRUE if need to print intermediate results.}
\item{maximumTaskToResolve}{An integer for the maximum number of tasks to resolve.}
}
\value{
An integer indicating the number of tasks currently running
}
\description{
\code{processRunningTasks} processes the tasks running
currently.
}
\details{
This function is called periodically, this will check all running asyncTasks
for completion. Returns number of remaining tasks so could be used as a boolean
}
|
library(dplyr)
library(ggplot2)
library(data.table)
library(reshape)
library(cowplot)
library(devtools)
library(rpart)
library(rpart.plot)
install_github("vqv/ggbiplot")
uri <- 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris <- read.csv(uri, header=FALSE)
colnames(iris) <- c('sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species')
str(iris)
head(iris)
head(iris, 2)
print("iris is a table")
print(dim(iris))
shape=dim(iris)
shape[1]
shape[2]
print(sprintf("Shape of the iris data set: [%s,%s]",shape[1], shape[2] ))
print(table(iris[,'species']))
mean(iris$sepal_length)
mean(iris[,"sepal_length"])
mean(iris[which(iris$species == 'Iris.setosa'),"sepal_length"])
mean(iris$sepal_length[which(iris$species == 'Iris.setosa'),])
#mean
| /R_Introduction.R | no_license | JHumeau/TestGit | R | false | false | 825 | r | library(dplyr)
library(ggplot2)
library(data.table)
library(reshape)
library(cowplot)
library(devtools)
library(rpart)
library(rpart.plot)
install_github("vqv/ggbiplot")
uri <- 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris <- read.csv(uri, header=FALSE)
colnames(iris) <- c('sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species')
str(iris)
head(iris)
head(iris, 2)
print("iris is a table")
print(dim(iris))
shape=dim(iris)
shape[1]
shape[2]
print(sprintf("Shape of the iris data set: [%s,%s]",shape[1], shape[2] ))
print(table(iris[,'species']))
mean(iris$sepal_length)
mean(iris[,"sepal_length"])
mean(iris[which(iris$species == 'Iris.setosa'),"sepal_length"])
mean(iris$sepal_length[which(iris$species == 'Iris.setosa'),])
#mean
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getnlurl.R
\name{getNlUrlOLS}
\alias{getNlUrlOLS}
\title{Function to return the url of the OLS tile to download}
\usage{
getNlUrlOLS(nlPeriod, configName = pkgOptions("configName_OLS.Y"))
}
\arguments{
\item{nlPeriod}{The nlPeriod of the tile for which to return the tile download URL}
\item{configName}{character the type of raster being processed}
}
\value{
character string Url of the OLS tile file
}
\description{
Function to return the url of the OLS tile to download given the year
}
\examples{
\dontrun{
tileUrl <- Rnightlights:::getNlUrlOLS("1999")
}
}
| /man/getNlUrlOLS.Rd | no_license | mjdhasan/Rnightlights | R | false | true | 641 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getnlurl.R
\name{getNlUrlOLS}
\alias{getNlUrlOLS}
\title{Function to return the url of the OLS tile to download}
\usage{
getNlUrlOLS(nlPeriod, configName = pkgOptions("configName_OLS.Y"))
}
\arguments{
\item{nlPeriod}{The nlPeriod of the tile for which to return the tile download URL}
\item{configName}{character the type of raster being processed}
}
\value{
character string Url of the OLS tile file
}
\description{
Function to return the url of the OLS tile to download given the year
}
\examples{
\dontrun{
tileUrl <- Rnightlights:::getNlUrlOLS("1999")
}
}
|
testlist <- list(Rs = c(-1.9577272327571e+276, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = numeric(0), relh = c(7.64681398433536e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925480414416e-68, 2.08343441298214e-168, 1.39098956557385e-309 ), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615862397-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 473 | r | testlist <- list(Rs = c(-1.9577272327571e+276, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), atmp = numeric(0), relh = c(7.64681398433536e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925480414416e-68, 2.08343441298214e-168, 1.39098956557385e-309 ), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
## Experimental research in evolutionary computation
## author: thomas.bartz-beielstein@fh-koeln.de
## http://www.springer.com/3-540-32026-1
##
## Copyright (C) 2003-2010 T. Bartz-Beielstein and C. Lasarczyk
## This program is free software;
## you can redistribute it and/or modify it under the terms of the
## GNU General Public License as published by the Free Software Foundation;
## either version 3 of the License,
## or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
## You should have received a copy of the GNU General Public License along
## with this program; if not, see <http://www.gnu.org/licenses/>.
##
# Package Description for Roxygen:
#' Sequential Parameter Optimization Toolbox in R
#'
#' SPOT is a package for R, using statistic models to find
#' optimal parameters for optimization algorithms. SPOT is a very flexible and
#' user oriented tool box for parameter optimization. The flexibility has its
#' price: to fully use all the possibilities of flexibility the user is requested
#' to look at a large number of spot-parameters to change. The good news is, that
#' some defaults are given that already work perfectly well for 90 percent of the users.
#'
#' \tabular{ll}{
#' Package: \tab SPOT\cr
#' Type: \tab Package\cr
#' Version: \tab 1.0.5543\cr
#' Date: \tab 2015-04-24\cr
#' License: \tab GPL (>= 2)\cr
#' LazyLoad: \tab yes\cr
#' }
#'
#' @name SPOT-package
#' @aliases SPOT
#' @docType package
#' @title Sequential Parameter Optimization Toolbox in R
#' @author Thomas Bartz-Beielstein \email{thomas.bartz-beielstein@@fh-koeln.de} with contributions from: J. Ziegenhirt, W.
#' Konen, O. Flasch, M. Friese, P. Koch, M. Zaefferer, B. Naujoks, M. Friese
#' @references
#' \url{http://www.springer.com/3-540-32026-1}
#' @keywords package
#' @seealso Main interface functions are \code{\link{spot}} and \code{\link{spotOptim}}.
#' Also, a graphical interface can be used with \code{\link{spotGui}}
#' @import emoa
#' @import rpart
#' @import twiddler
#' @import rgl
#' @import AlgDesign
#' @import randomForest
#' @import mco
#' @import rsm
#' @import MASS
#'
#' @section Acknowledgments:
#' This work has been partially supported by the Federal Ministry of Education
#' and Research (BMBF) under the grants CIMO (FKZ 17002X11) and
#' MCIOP (FKZ 17N0311).
#'
#' @section Maintainer:
#' Martin Zaefferer \email{martin.zaefferer@@gmx.de}
#End of Package Description
NA #NULL, ends description without hiding first function
###################################################################################
## spot.R consists of three parts:
## - PART ONE: some help functions
## - PART TWO: the steps implemented as functions too
## - PART THREE: the main SPO algorithm
###################################################################################
###################################################################################
###################################################################################
# PART ONE: help functions
###################################################################################
###################################################################################
###################################################################################
# SPOT Prepare System - loads all required packages for SPOT
#
# installs and loads all packages that are needed for the core functionality of SPOT
# (hard coded in the function). All user defined Plugins needs to call
# \code{\link{spotInstAndLoadPackages}} to add their dependencies properly\cr
# This function is only provided for use in non-packaged version, for all packages
# are listed in the "Depends line" of DESCRIPTION
#
#
# @export
# @keywords internal
###################################################################################
#spotPrepareSystem <- function(){
### check whether necessary packages are installed and install missing packages
# see also: depends and suggests in DESCRIPTION file.
#necessaryPackages = c('rpart', 'emoa' );
#spotInstAndLoadPackages(necessaryPackages)
###### default packages with various use
# 'emoa' - used in various functions for multi objective optimization purpose, but mainly in spotGenerateSequentialDesign
###### default packages that are specified to be used in:
# spotPreditTree AND spotPlotBst: 'rpart'
###### deleted because use was not found:
# 'colorspace',Color Space Manipulation
# 'vcd' Visualizing Categorical Data - used in ???
# 'stats' - R statistical functions used in ???
# 'DoE.base' used in ???
# 'car' used in ???
######
## deleted from list and moved to the calling functions:
## rsm, tgp, randomForest, mlegp, FrF2, DoE.wrapper, AlgDesign, lhs, fields
# spotPredictLm: 'rsm'
# spotPredictTgp: 'tgp',
# spotPredictRandomForest: 'randomForest',
# spotPredictMlegp: 'mlegp',
# spotCreateDesignFrF2 : 'FrF2', 'DoE.wrapper',
# spotPredictDiceKriging: ,'DiceKriging' # depreciated
# spotCreateDesignBasicDoe: 'AlgDesign'
#}#end spotPrepareSystem
###################################################################################
## SPOT Prepare
###################################################################################
#' Prepares the configuration (spotConfig) for SPOT
#'
#' Set some globally important parameters for SPOT to run, creating the
#' parameter list (spotConfig) for SPOT from given usersConfigFile (.conf -file)
#'
#' @note For developers: this function also manages the include of all functions needed -
#' in the packaged version this is already done when package is installed.
#'
#' @param srcPath the absolute path to the SPOT sources
#' @param configFile the absolute path including file-specifier
#' @param spotConfigUser a list of parameters used to configure spot, usually spotConfigUser=NA will be passed to this function, which means the configuration will only be read from the \code{configFile}, not given by manual user input.
#' Notice that parameters given in spotConfigUser will overwrite both default values assigned by SPOT, AND values defined in the config file
#' However, values not passed by spotConfigUser will still be used as defaults. If you want to see those defaults, look at \code{\link{spotGetOptions}}
#' @return list \code{spotConfig} \cr
#' - \code{spotConfig} is the list of spot parameters created by this function
#'
#' @seealso \code{\link{SPOT}} \code{\link{spotGetOptions}} \code{\link{spot}}
#' @export
#' @keywords internal
###################################################################################
spotPrepare <- function(srcPath,configFile,spotConfigUser){
# Close graphic windows
graphics.off()
######################################
### Load sources
######################################
## Add path to files
## everything happens relative to users configuration file
if(file.exists(configFile)){
setwd(dirname(configFile))
}
## Call configuration program that extracts infos from userconf
spotConfig <- spotGetOptions(srcPath=srcPath,configFile)
## MZ 04.09.2010: New feature implemented, so user can set options in commandline when calling spot()
if(is.list(spotConfigUser)){
spotConfig <- append(spotConfigUser,spotConfig)
spotConfig <- spotConfig[!duplicated(names(spotConfig))]#Commandline Input from user will overwrite configfile/default parameters here !!
if(file.exists(spotConfig$io.roiFileName)){ #Read in the roi, just in case that spotConfigUser contained a new roi file name
spotConfig$alg.roi <- spotReadRoi(spotConfig$io.roiFileName,spotConfig$io.columnSep,spotConfig$io.verbosity)
spotConfig$alg.aroi <- spotConfig$alg.roi
}
colnames(spotConfig$alg.roi) <- c("lower","upper","type")
colnames(spotConfig$alg.aroi) <- c("lower","upper","type")
}
if(is.function(spotConfig$alg.func)){
spotConfig$alg.tar.func<-spotConfig$alg.func
spotConfig$alg.func<-"spotOptimInterface"
}
else if(!is.character(spotConfig$alg.func)){
stop("The optimization target function is neither a character string, nor a function handle")
}
## MZ 30.08.2012: Continue stopped or broken SPOT runs (broken runs only continued if file mode enabled)
if(spotConfig$spot.continue){
if(spotConfig$spot.fileMode && file.exists(spotConfig$io.resFileName)) {
spotConfig$alg.currentResult <- spotGetRawResData(spotConfig)$rawD
}
}
# if (spotConfig$spot.ocba == TRUE){#Bugfix: If ocba is chosen, makes sure that max repeats, and initial repeats are more than 1. However this will still crash with noise=0
# if (!is.na(spotConfig$init.design.repeats)){
# if (spotConfig$init.design.repeats <= 1){
# spotConfig$init.design.repeats=2
# }
# }
# if (!is.na(spotConfig$seq.design.maxRepeats)){
# if (spotConfig$seq.design.maxRepeats <= 1){
# spotConfig$seq.design.maxRepeats=2
# }
# }
# }
class(spotConfig)<-"spotConfig" #TODO class might yield slow-down!
spotConfig
} # end spotPrepare()
###################################################################################
###################################################################################
## PART TWO: The SPO Steps
###################################################################################
###################################################################################
###################################################################################
#' SPOT Step: Initialize (First SPOT- Step)
#'
#' Creates a sequential design based on the results derived so far. Therefor it is
#' essential to have another design evaluated before and have a .res file to use.
#' afterwards the design is extended by 4 columns: CONFIG, REPEATS,STEP, SEED
#'
#' uses the functions \code{spotConfig$init.design.func} and \code{link{spotWriteDes}}
#' that writes a design to the file <xxx>.des
#'
#' @param spotConfig the list of all parameters is given, but the used ones are: \cr
#' \code{spotConfig$init.design.func} holds the spotCreateDesign<XXX> function to be used
#' for building an initial design. \cr
#' \code{spotConfig$init.design.size} number of points that should be created for the initial design \cr
#' \code{spotConfig$init.design.retries} gives the number of trials to find a design with the greatest distance between points, (default is 1)\cr
#' \code{spotConfig$init.design.repeats} number of repeats for one initial design-point\cr
#' \code{spotConfig$alg.seed} seed value for reproducible runs\cr
#' \code{spotConfig$srcPath} source path as given when spot() is called (or uses default)\cr
#' \code{spotConfig$io.verbosity} verbosity for command window output, which is passed to the output function
#' @export
###################################################################################
spotStepInitial <- function(spotConfig) {
## Sets the seed for all random number generators in SPOT
set.seed(spotConfig$spot.seed)
#clear old data
spotConfig$alg.currentResult<-NULL
spotConfig$alg.currentBest<-NULL
spotWriteLines(spotConfig$io.verbosity,2,"Create Inital Design", con=stderr());
if(!exists(spotConfig$init.design.func))stop(paste("The design function name", spotConfig$init.design.func, "is not found in the workspace \n
Please make sure to load the design function in the workspace, or specify the correct function in spotConfig$init.design.func" ))
##
## write actual region of interest file (same data as roi file)
## TODO: Add type information to aroi file
A <- spotConfig$alg.roi
A <- cbind(row.names(A), A)
colnames(A) <- c("name", "lower", "upper", "type")
if(spotConfig$spot.fileMode){
spotWriteAroi(A,spotConfig$io.verbosity,spotConfig$io.columnSep,spotConfig$io.aroiFileName)
}
spotConfig$alg.aroi<-spotConfig$alg.roi
if(spotConfig$init.design.size>0){
initDes<-eval(call(spotConfig$init.design.func,
spotConfig,
spotConfig$init.design.size,
spotConfig$init.design.retries))
}else{
initDes <- NULL
}
#add manually specified design points
if(!is.null(spotConfig$init.design.man)){
colnames(spotConfig$init.design.man) = rownames(spotConfig$alg.roi)
initDes <- rbind(initDes,spotConfig$init.design.man)
}
if(is.null(initDes)){
stop("Initial Design for SPOT is empty. Set spotConfig$init.design.size to a value larger than zero, or specify design points manually in spotConfig$init.design.man.")
}
## FIRST COLUMN ADDED: Named "CONFIG" - holding a count variable:
## number of the configuration provided
configNumber<-1:nrow(initDes)
initDes <- cbind(initDes,configNumber)
colnames(initDes)[ncol(initDes)] <- "CONFIG"
## SECOND COLUMN ADDED:
## number of repeats for the initial design points as "repeats"
initDes <- cbind(initDes,spotConfig$init.design.repeats)
colnames(initDes)[ncol(initDes)] <- "REPEATS"
## THIRD COLUMN ADDED: column documenting the number of configurations so far (steps-column)
## initially the number of steps is 0 (refers to auto.loop.steps)
initDes <- cbind(initDes,0)
colnames(initDes)[ncol(initDes)] <- "STEP"
## FORTH COLUMN ADDED:
## Named "SEED" - holding the number of the seed for the randomgenerator
## used (same seed provides reproducable runs)
seed <- spotConfig$alg.seed
## could be considering the last used seed according to the last res,
## but not yet considered here
initDes <- cbind(initDes,seed)
colnames(initDes)[ncol(initDes)] <- "SEED"
if (spotConfig$spot.fileMode){
if (file.exists(spotConfig$io.desFileName)){
file.remove(spotConfig$io.desFileName)
}
## write the design to a NEW .des-file
spotWriteDes(initDes,spotConfig$io.verbosity,spotConfig$io.columnSep,spotConfig$io.desFileName)
## Now delete the old .res and .bst files
if (spotConfig$init.delete.previous.files & file.exists(spotConfig$io.bstFileName)){
file.remove(spotConfig$io.bstFileName)
}
if (spotConfig$init.delete.previous.files & file.exists(spotConfig$io.resFileName)){
file.remove(spotConfig$io.resFileName)
}
}
spotConfig$alg.currentDesign<-initDes
spotConfig
}
###################################################################################
## Second Step: Algorithm Call
###################################################################################
#' SPOT Step Algorithm Call
#'
#' This is the second SPOT Step after step "initial" - but also needed
#' after each step "sequential", and is a call frame for the algorithm-call.
#'
#' The algorithm is the heart of what the user must provide, but SPOT should be
#' able to handle them in the most flexible manner. This function is an interface to the algorithm,
#' given as a R-function.
#'
#' @param spotConfig the list of all configuration parameters, but most important ones are:\cr
#' \code{spotConfig$alg.func} the name of the R target function \cr
#' \code{spotConfig$io.apdFileName} filename for the problem definition of the algorithm,
#' first parameter of the generically defined R-function spotConfig$alg.func \cr
#' \code{spotConfig$io.desFileName} filename for the input of the algorithm,
#' second parameter of the generically defined R-function spotConfig$alg.func \cr
#' \code{spotConfig$io.resFileName} filename for the output of the algorithm
#' third parameter of the generically defined R-function spotConfig$alg.func\cr
#' \code{spotConfig$io.verbosity} verbosity for command window output, which is passed to the output function
#' @param ... additional parameters to be passed on to target function which is called inside alg.func
#'
#' @seealso \code{\link{SPOT}} \code{\link{spot}} \code{\link{spotStepInitial}}
#' \code{\link{spotStepSequential}}
#' @export
####################################################################################
spotStepRunAlg <- function(spotConfig,...){
spotWriteLines(spotConfig$io.verbosity,2,paste("spotStepRunAlg started with ",spotConfig$alg.func,sep=""))
if(!exists(spotConfig$alg.func))stop(paste("The target function name", spotConfig$alg.func, "is not found in the workspace \n
Please make sure to load the target function in the workspace, or specify the correct function in spotConfig$alg.func"))
#browser()
#spotConfig<-eval(call(spotConfig$alg.func, spotConfig,...))
spotConfig<-do.call(spotConfig$alg.func, args=list(spotConfig,...)) #this allows further arguments dot-dot-dot
}
###################################################################################
## Third Step: Sequential
#' SPOT Step Sequential
#'
#' Third SPOT Step to generate a sequential new design, this
#' is mainly a call of \code{\link{spotGenerateSequentialDesign}}
#'
#' Creates a sequential design based on the results derived so far. Therefor it is
#' essential to have another design evaluated before and have a .res file to use.
#' It uses the functions \code{\link{spotGenerateSequentialDesign}} and \code{\link{spotWriteDes}}
#' writes a sequential design to the file <xxx>.des
#'
#' @param spotConfig the list of all parameters is given, but the used ones are: \cr
#' \code{spotConfig$io.resFileName} is checked for existence is not, function fails with error\cr
#' \code{spotConfig$algSourceSrcPath} needed for the error message \cr
#' \code{spotConfig$userConfFileName} needed for the error message\cr
#' @export
###################################################################################
spotStepSequential <- function(spotConfig) {
spotWriteLines(spotConfig$io.verbosity,2,"Create Sequential Design", con=stderr())
if(spotConfig$spot.fileMode){
if (!file.exists(spotConfig$io.resFileName)){
stop("Error in spot.R::spotStepSequential:
.res file not found, spotStepAlgRun() has to be executed before.")
}
}else{
if(!nrow(spotConfig$alg.currentResult)>0){
stop("Error in spot.R::spotStepSequential:
result data not found, spotStepAlgRun() has to be executed before.")
}
}
##NOTE: the following code was moved to spotGenerateSequentialDesign, when merging with ocba
########MZ: Now first check for var of the y-values. Only if y varies (i.e. function is noisy and evaluations are repeated) use ocba
# varies=TRUE;
# if(spotConfig$spot.ocba == TRUE){ # only needs to be checked for ocba=TRUE
# if(spotConfig$spot.fileMode){
# res<-spotGetRawResData(spotConfig)
# spotConfig<-res$conf
# rawData<-res$rawD
# res<-NULL
# }else{
# rawData=spotConfig$alg.currentResult;
# }
# z <- split(rawData[,spotConfig$alg.resultColumn], rawData$CONFIG);
#########varY <- sapply(as.data.frame(z),var);
# varY <- sapply(z,var);
# for (i in 1:length(varY)){
# if (is.na(varY[i])||is.nan(varY[i])||is.null(varY[i])||(varY[i]==0)){
# varies=FALSE;
# }
# }
# }
############Now call sequential design function
# if ((spotConfig$spot.ocba == TRUE)&(varies == TRUE)){
# spotConfig <- spotGenerateSequentialDesignOcba(spotConfig);
# }
# else if (spotConfig$spot.ocba == FALSE) {
# spotConfig <- spotGenerateSequentialDesign(spotConfig);
# }
# else{
# stop("
# There is no variance for some point(s) in the current design.
# Therefore OCBA cannot be used. Possible reasons are a target
# function without noise, or the design points are not repeated.
# SPOT with OCBA makes only sense if the target function is noisy.
# If a non noisy function is used, the default settings should
# be adopted, as described in the help of spot() or spotOptim().
# That means: either use spot.ocba=FALSE, or set the repeats
# (init.design.repeats) to values larger
# than 1.
# The current variance vector is for the used design points is:
# ",paste(varY," "))
# }
spotConfig <- spotGenerateSequentialDesign(spotConfig)
}
###################################################################################
## Forth Step Report
###################################################################################
#' SPOT Step Report
#'
#' Forth and last step for SPOT, that is by default a call of \link{spotReportDefault}
#'
#' This step provides a very basic report about the .res-file, based on settings in the \code{spotConfig}
#' The mainly used parameters of \code{spotConfig} is \code{spotConfig$report.func},
#' specifying which report shall be called. The user can specify his own report and should set the
#' value {report.func} in the configuration file according to the specification rules
#' given. If nothing is set, the default report is used.
#'
#' @param spotConfig the list of all parameters is given, it is forwarded to the call of the report-function
#' @seealso \code{\link{SPOT}} \code{\link{spot}} \link{spotReportDefault} \code{\link{spotGetOptions}}
#'
#' @export
###################################################################################
spotStepReport <- function(spotConfig) {
if(!exists(spotConfig$report.func))stop(paste("The report function name", spotConfig$report.func, "is not found in the workspace \n
Please make sure to load the report function in the workspace, or specify the correct function in spotConfig$report.func" ))
if(is.null(spotConfig$alg.currentResult))spotConfig$alg.currentResult<- spotGetRawResData(spotConfig)$rawD;
spotConfig<-eval(call(spotConfig$report.func, spotConfig))
}
###################################################################################
## Step Auto
###################################################################################
#' SPOT Step Auto Opt
#'
#' spotStepAutoOpt is the default task called, when spot is started.
#'
#' The \code{auto} task calls the tasks \code{init} and \code{run} once
#' and loops \code{auto.loop.steps} times over the steps \code{seq} and \code{run}
#' finalising the function with a call of the report function. Instead of \code{auto.loop.steps}
#' also \code{auto.loop.nevals} can be used as a stopping criterion.
#'
#' @param spotConfig the list of all parameters is given, it is forwarded to the call of the report-function
#' the used parameters of spotConfig are just spotConfig$auto.loop.steps
#' specifying the number of meta models that should be calculated
#' @param ... additional parameters to be passed on to target function which is called inside alg.func
#'
#' @seealso \code{\link{SPOT}} \code{\link{spot}} \code{\link{spotStepInitial}}
#' \code{\link{spotStepSequential}} \code{\link{spotStepRunAlg}} \code{\link{spotStepReport}}
#' \code{\link{spotGetOptions}}
#' @export
###################################################################################
spotStepAutoOpt <- function(spotConfig,...){
if(!spotConfig$spot.continue || is.null(spotConfig$alg.currentResult)){
spotConfig=spotStepInitial(spotConfig);
spotConfig=spotStepRunAlg(spotConfig,...)
}
j <- max(spotConfig$alg.currentResult$STEP)
k <- nrow(spotGetRawDataMatrixB(spotConfig));
if(!is.null(spotConfig$spot.catch.error)){
res<-tryCatch({ #This function will catch crashes and interrupts, but still return the last valid spotConfig list, to recover any available results and settings.
while (j <= spotConfig$auto.loop.steps && k < spotConfig$auto.loop.nevals){
spotWriteLines(spotConfig$io.verbosity,2,paste("SPOT Step:", j), con=stderr());
spotConfig=spotStepSequential(spotConfig);
spotConfig=spotStepRunAlg(spotConfig,...);
k <- nrow(spotGetRawDataMatrixB(spotConfig));
j <- j+1;}
}, interrupt = function(ex) {
cat("An interrupt was detected in spotStepAutoOpt.\n");
print(ex);
}, error = function(ex) {
cat("An error was detected in spotStepAutoOpt.\n");
print(ex);
}, finally = {}
) #tryCatch end.
}else{
res<-tryCatch(
{ #This function will catch crashes and interrupts, but still return the last valid spotConfig list, to recover any available results and settings.
while (j <= spotConfig$auto.loop.steps && k < spotConfig$auto.loop.nevals){
spotWriteLines(spotConfig$io.verbosity,2,paste("SPOT Step:", j), con=stderr())
spotConfig=spotStepSequential(spotConfig)
spotConfig=spotStepRunAlg(spotConfig,...)
k <- nrow(spotGetRawDataMatrixB(spotConfig))
j <- j+1
}
}, interrupt = function(ex) {
cat("An interrupt was detected in spotStepAutoOpt.\n")
print(ex)
}, finally = {}
) #tryCatch end.
}
if(!is.null(res)){#&& any(class(res)=="interrupt")){
cat("A crash or interrupt ocurred, most recent spotConfig list is returned, to allow recovery of results.\n")
return(spotConfig)
}
if(spotConfig$io.verbosity>2){
mergedData <- spotPrepareData(spotConfig)
spotConfig=spotWriteBest(mergedData, spotConfig)
spotPlotBst(spotConfig)
}
spotConfig=spotStepReport(spotConfig)
spotConfig
}
###################################################################################
## Step Meta
###################################################################################
#' SPOT Step Meta
#'
#' Attention: This feature is work in progress, documentation is not up to date.
#'
#' The \code{meta} task calls spotStepMetaOpt which itself calls \code{\link{spot}}
#' with several different fixed
#' parameters to provide a mixed optimization mechanism: analyse a fully qualified
#' test of some parameters and the intelligent optimization of other parameters.
#' e.g. the number of the dimension of a problem etc.
#'
#' To start this step you could for example do this:\cr
#' \code{spot("configFileName.conf","meta")}\cr
#'
#' @param spotConfig the list of all parameters is given
#'
#' @seealso \code{\link{spotGetOptions}}
#' @export
###################################################################################
spotStepMetaOpt <- function(spotConfig) {
#spotInstAndLoadPackages("AlgDesign")
if(is.null(spotConfig$report.meta.func))spotConfig$report.meta.func = "spotReportMetaDefault";
### Delete old FBS file
if(file.exists(spotConfig$io.fbsFileName)) {
unlink(spotConfig$io.fbsFileName)
}
myList<-spotConfig$meta.list
mySetList<-spotConfig$meta.conf.list
nVars<-length(myList)
x <- as.numeric(lapply(myList, length)) # create a vector "x" holding the length of each variable
if (nVars==1){ # full factorial design with indicies for all combinations:
dat <- matrix(1:x, byrow = TRUE)
}
else{
dat<- gen.factorial(x,varNames=names(myList),factors="all")
}
for (j in 1:nrow(dat)) {## Loop over full factorial combinations of all parameters specified in .meta
graphics.off() ## close all remaining graphic devices - from old spotStepAutoOpt Runs
myFbs<-list()
newConf<-list()
newConfSet<-list()
for (k in 1:nVars) {
# left side of the assignment
## the factorial value of the kth variable for this dat[j]-row is assigned to a character variable:
newConf[[names(myList[k])]] <- myList[[k]][[dat[j,k]]]
for (ii in 1:length(mySetList[[k]])){
if(length(mySetList[[k]])>0){
newConfSet[[names(mySetList[[k]][ii])]]<-mySetList[[k]][[ii]][dat[j,k]]
}
}
}
myFbs <- c(myFbs, newConf)
newConf<-append(newConf,newConfSet)
newSpotConfig<-append(newConf,spotConfig) ## create a temporary spotConfig for the calling of spotStepAuto
newSpotConfig$spot.fileMode=FALSE;
newSpotConfig <- newSpotConfig[!duplicated(names(newSpotConfig))]; ## delete unneeded entries
newSpotConfig=spot(spotConfig=newSpotConfig) ## THIS calls spot for ONE configuration of the meta run
tmpBst<-newSpotConfig$alg.currentBest;
design = as.list(dat[j,])
names(design)=paste(names(design),"NUM", sep="")
myFbsFlattened <- spotMetaFlattenFbsRow(append(myFbs,design))
dataTHIS<-as.data.frame(cbind(tmpBst[nrow(tmpBst),],myFbsFlattened))
if(file.exists(spotConfig$io.fbsFileName)) {
dataLAST<-as.data.frame(read.table(file=spotConfig$io.fbsFileName,header=TRUE))
data<-merge(dataLAST,dataTHIS,all=TRUE,sort=FALSE)
}
else{
data=dataTHIS
}
write.table(file=spotConfig$io.fbsFileName,
data,
row.names = FALSE,
col.names = TRUE,
sep = " ",
append = FALSE,
quote=FALSE)
} # for (j in 1:nrow(dat))... (loop over full factorial design)
spotConfig$meta.fbs.result=data
if(!exists(spotConfig$report.meta.func))stop(paste("The meta report function name", spotConfig$report.meta.func, "is not found in the workspace \n
Please make sure to load the meta report function in the workspace, or specify the correct function in spotConfig$report.meta.func"))
spotConfig<-eval(call(spotConfig$report.meta.func, spotConfig))
}
############# end function definitions ############################################################
###################################################################################################
## PART THREE: SPOT: The Program
###################################################################################################
#' Main function for the use of SPOT
#'
#' Sequential Parameter Optimization Toolbox (SPOT) provides a toolbox for the
#' sequential optimization of parameter driven tasks.
#' Use \code{\link{spotOptim}} for a \code{\link{optim}} like interface
#'
#' The path given with the \code{userConfigFile} also fixes the working directory used
#' throughout the run of all SPOT functions. All files that are needed for input/output
#' can and will be given relative to the path of the userConfigFile (this also holds for
#' the binary of the algorithm). This refers to files that are specified in the configFile
#' by the user.
#'
#' It is of major importance to understand that spot by default expects to optimize noisy functions. That means, the default settings of spot,
#' which are also used in spotOptim, include repeats of the initial and sequentially created design points. Also, as a default OCBA
#' is used to spread the design points for optimal usage of the function evaluation budget. OCBA will not work when there is no variance in the data.
#' So if the user wants to optimize non-noisy functions, the following settings should be used:\cr
#' \code{spotConfig$spot.ocba <- FALSE}\cr
#' \code{spotConfig$seq.design.maxRepeats <- 1}\cr
#' \code{spotConfig$init.design.repeats <- 1}\cr
#'
#' @param configFile the absolute path including file-specifier, there is no default, this value should always be given
#' @param spotTask [init|seq|run|auto|rep] the switch for the tool used, default is "auto"
#' @param srcPath the absolute path to user written sources that extend SPOT, the default(NA) will search for sources in the path <.libPath()>/SPOT/R
#' @param spotConfig a list of parameters used to configure spot, default is spotConfig=NA, which means the configuration will only be read from the \code{configFile}, not given by manual user input.
#' Notice that parameters given in spotConfig will overwrite both default values assigned by SPOT, AND values defined in the Config file
#' However, values not passed by spotConfig will still be used as defaults. If you want to see those defaults, look at \code{\link{spotGetOptions}}
#' @param ... additional parameters to be passed on to target function which is called inside alg.func. Only relevant for spotTask "auto" and "run".
#' @note \code{spot()} expects char vectors as input, e.g. \code{spot("c:/configfile.conf","auto")}
#' @seealso \code{\link{SPOT}}, \code{\link{spotOptim}}, \code{\link{spotStepAutoOpt}}, \code{\link{spotStepInitial}},
#' \code{\link{spotStepSequential}}, \code{\link{spotStepRunAlg}}, \code{\link{spotStepReport}}
#' @export
###################################################################################################
spot <- function(configFile="NULL",spotTask="auto",srcPath=NA,spotConfig=NA,...){
writeLines("spot.R::spot started ") #bugfix MZ: spotWriteLines will not allways work here, since spotConfig could be NA
callingDirectory<-getwd()
if(!(configFile=="NULL")&!file.exists(configFile)){
stop("Error, configFile not found (or not \"NULL\")")
}
if(is.na(srcPath)){
for(k in 1:length(.libPaths())){
if(file.exists(paste(.libPaths()[k],"SPOT","R",sep="/"))){
srcPath<-(paste(.libPaths()[k],"SPOT","R",sep="/"))
break;
}
}
}
## PRELIMINARIES 1: load all functions belonging to SPOT - not necessary if provided SPOT is installed as package - useful for developers...
spotConfig<-spotPrepare(srcPath,configFile,spotConfig)
## SWITCH task according to the extracted from command line
resSwitch <- switch(spotTask
, init=, initial=spotStepInitial(spotConfig) # First Step
, seq=, sequential=spotStepSequential(spotConfig) # Second Step
, run=, runalg=spotStepRunAlg(spotConfig,...) # Third Step
, rep=, report=spotStepReport(spotConfig) # Fourth Step
, auto=, automatic=spotStepAutoOpt(spotConfig,...) # Automatically call First to Forth Step
, meta=spotStepMetaOpt(spotConfig) # Automatically call several spotStepAutoOpt - Runs to provide a systematic testing tool an fixed Parameters in .apd file
, "invalid switch" # return this at wrong CMD task
);
## ERROR handling
## valid switch returns null, otherwise show error warning and short help
if (is.character(resSwitch) && resSwitch == "invalid switch") {
#spotWriteLines(spotConfig$io.verbosity,0,paste("ERROR, unknown task:", spotTask), con=stderr());
#spotWriteLines(spotConfig$io.verbosity,0,"\nValid tasks are:\
# auto - run tuning in automated mode\
# initial - to create an initial design\
# run - start the program, algorithm, simulator\
# sequential - to create further design points\
# report - to generate a report from your results"
# , con=stderr());
stop(paste("ERROR, unknown task:", spotTask, "\nValid tasks are:\
auto - run tuning in automated mode\
initial - to create an initial design\
run - start the program, algorithm, simulator\
sequential - to create further design points\
report - to generate a report from your results" ))
}
# go back to - well where ever you came from
setwd(callingDirectory)
resSwitch
}
###################################################################################################
#' Print function for spotConfig class
#'
#' Print function to summarize a spotConfig.
#'
#' @rdname print
#' @method print spotConfig
# @S3method print spotConfig
#' @param x spotConfig
#' @param ... additional parameters
#' @export
#' @keywords internal
#####################################################################################
print.spotConfig <- function(x, ...){ #Remark: this overwrites the print method for the spotConfig class
#This class is set for spotConfig only once, in spotPrepare.
writeLines(paste("This is a spotConfig list"))
writeLines("Current list content:")
print(names(x))
writeLines("Use \"listname[]\" to print all list values, e.g. spotConfig[].")
writeLines("See the help of spotGetOptions for more information.")
}
| /SPOT/R/spot.R | no_license | ingted/R-Examples | R | false | false | 35,631 | r | ## Experimental research in evolutionary computation
## author: thomas.bartz-beielstein@fh-koeln.de
## http://www.springer.com/3-540-32026-1
##
## Copyright (C) 2003-2010 T. Bartz-Beielstein and C. Lasarczyk
## This program is free software;
## you can redistribute it and/or modify it under the terms of the
## GNU General Public License as published by the Free Software Foundation;
## either version 3 of the License,
## or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
## You should have received a copy of the GNU General Public License along
## with this program; if not, see <http://www.gnu.org/licenses/>.
##
# Package Description for Roxygen:
#' Sequential Parameter Optimization Toolbox in R
#'
#' SPOT is a package for R, using statistic models to find
#' optimal parameters for optimization algorithms. SPOT is a very flexible and
#' user oriented tool box for parameter optimization. The flexibility has its
#' price: to fully use all the possibilities of flexibility the user is requested
#' to look at a large number of spot-parameters to change. The good news is, that
#' some defaults are given that already work perfectly well for 90 percent of the users.
#'
#' \tabular{ll}{
#' Package: \tab SPOT\cr
#' Type: \tab Package\cr
#' Version: \tab 1.0.5543\cr
#' Date: \tab 2015-04-24\cr
#' License: \tab GPL (>= 2)\cr
#' LazyLoad: \tab yes\cr
#' }
#'
#' @name SPOT-package
#' @aliases SPOT
#' @docType package
#' @title Sequential Parameter Optimization Toolbox in R
#' @author Thomas Bartz-Beielstein \email{thomas.bartz-beielstein@@fh-koeln.de} with contributions from: J. Ziegenhirt, W.
#' Konen, O. Flasch, M. Friese, P. Koch, M. Zaefferer, B. Naujoks, M. Friese
#' @references
#' \url{http://www.springer.com/3-540-32026-1}
#' @keywords package
#' @seealso Main interface functions are \code{\link{spot}} and \code{\link{spotOptim}}.
#' Also, a graphical interface can be used with \code{\link{spotGui}}
#' @import emoa
#' @import rpart
#' @import twiddler
#' @import rgl
#' @import AlgDesign
#' @import randomForest
#' @import mco
#' @import rsm
#' @import MASS
#'
#' @section Acknowledgments:
#' This work has been partially supported by the Federal Ministry of Education
#' and Research (BMBF) under the grants CIMO (FKZ 17002X11) and
#' MCIOP (FKZ 17N0311).
#'
#' @section Maintainer:
#' Martin Zaefferer \email{martin.zaefferer@@gmx.de}
#End of Package Description
NA #NULL, ends description without hiding first function
###################################################################################
## spot.R consists of three parts:
## - PART ONE: some help functions
## - PART TWO: the steps implemented as functions too
## - PART THREE: the main SPO algorithm
###################################################################################
###################################################################################
###################################################################################
# PART ONE: help functions
###################################################################################
###################################################################################
###################################################################################
# SPOT Prepare System - loads all required packages for SPOT
#
# installs and loads all packages that are needed for the core functionality of SPOT
# (hard coded in the function). All user defined Plugins needs to call
# \code{\link{spotInstAndLoadPackages}} to add their dependencies properly\cr
# This function is only provided for use in non-packaged version, for all packages
# are listed in the "Depends line" of DESCRIPTION
#
#
# @export
# @keywords internal
###################################################################################
#spotPrepareSystem <- function(){
### check whether necessary packages are installed and install missing packages
# see also: depends and suggests in DESCRIPTION file.
#necessaryPackages = c('rpart', 'emoa' );
#spotInstAndLoadPackages(necessaryPackages)
###### default packages with various use
# 'emoa' - used in various functions for multi objective optimization purpose, but mainly in spotGenerateSequentialDesign
###### default packages that are specified to be used in:
# spotPreditTree AND spotPlotBst: 'rpart'
###### deleted because use was not found:
# 'colorspace',Color Space Manipulation
# 'vcd' Visualizing Categorical Data - used in ???
# 'stats' - R statistical functions used in ???
# 'DoE.base' used in ???
# 'car' used in ???
######
## deleted from list and moved to the calling functions:
## rsm, tgp, randomForest, mlegp, FrF2, DoE.wrapper, AlgDesign, lhs, fields
# spotPredictLm: 'rsm'
# spotPredictTgp: 'tgp',
# spotPredictRandomForest: 'randomForest',
# spotPredictMlegp: 'mlegp',
# spotCreateDesignFrF2 : 'FrF2', 'DoE.wrapper',
# spotPredictDiceKriging: ,'DiceKriging' # depreciated
# spotCreateDesignBasicDoe: 'AlgDesign'
#}#end spotPrepareSystem
###################################################################################
## SPOT Prepare
###################################################################################
#' Prepares the configuration (spotConfig) for SPOT
#'
#' Set some globally important parameters for SPOT to run, creating the
#' parameter list (spotConfig) for SPOT from given usersConfigFile (.conf -file)
#'
#' @note For developers: this function also manages the include of all functions needed -
#' in the packaged version this is already done when package is installed.
#'
#' @param srcPath the absolute path to the SPOT sources
#' @param configFile the absolute path including file-specifier
#' @param spotConfigUser a list of parameters used to configure spot, usually spotConfigUser=NA will be passed to this function, which means the configuration will only be read from the \code{configFile}, not given by manual user input.
#' Notice that parameters given in spotConfigUser will overwrite both default values assigned by SPOT, AND values defined in the config file
#' However, values not passed by spotConfigUser will still be used as defaults. If you want to see those defaults, look at \code{\link{spotGetOptions}}
#' @return list \code{spotConfig} \cr
#' - \code{spotConfig} is the list of spot parameters created by this function
#'
#' @seealso \code{\link{SPOT}} \code{\link{spotGetOptions}} \code{\link{spot}}
#' @export
#' @keywords internal
###################################################################################
spotPrepare <- function(srcPath,configFile,spotConfigUser){
# Close graphic windows
graphics.off()
######################################
### Load sources
######################################
## Add path to files
## everything happens relative to users configuration file
if(file.exists(configFile)){
setwd(dirname(configFile))
}
## Call configuration program that extracts infos from userconf
spotConfig <- spotGetOptions(srcPath=srcPath,configFile)
## MZ 04.09.2010: New feature implemented, so user can set options in commandline when calling spot()
if(is.list(spotConfigUser)){
spotConfig <- append(spotConfigUser,spotConfig)
spotConfig <- spotConfig[!duplicated(names(spotConfig))]#Commandline Input from user will overwrite configfile/default parameters here !!
if(file.exists(spotConfig$io.roiFileName)){ #Read in the roi, just in case that spotConfigUser contained a new roi file name
spotConfig$alg.roi <- spotReadRoi(spotConfig$io.roiFileName,spotConfig$io.columnSep,spotConfig$io.verbosity)
spotConfig$alg.aroi <- spotConfig$alg.roi
}
colnames(spotConfig$alg.roi) <- c("lower","upper","type")
colnames(spotConfig$alg.aroi) <- c("lower","upper","type")
}
if(is.function(spotConfig$alg.func)){
spotConfig$alg.tar.func<-spotConfig$alg.func
spotConfig$alg.func<-"spotOptimInterface"
}
else if(!is.character(spotConfig$alg.func)){
stop("The optimization target function is neither a character string, nor a function handle")
}
## MZ 30.08.2012: Continue stopped or broken SPOT runs (broken runs only continued if file mode enabled)
if(spotConfig$spot.continue){
if(spotConfig$spot.fileMode && file.exists(spotConfig$io.resFileName)) {
spotConfig$alg.currentResult <- spotGetRawResData(spotConfig)$rawD
}
}
# if (spotConfig$spot.ocba == TRUE){#Bugfix: If ocba is chosen, makes sure that max repeats, and initial repeats are more than 1. However this will still crash with noise=0
# if (!is.na(spotConfig$init.design.repeats)){
# if (spotConfig$init.design.repeats <= 1){
# spotConfig$init.design.repeats=2
# }
# }
# if (!is.na(spotConfig$seq.design.maxRepeats)){
# if (spotConfig$seq.design.maxRepeats <= 1){
# spotConfig$seq.design.maxRepeats=2
# }
# }
# }
class(spotConfig)<-"spotConfig" #TODO class might yield slow-down!
spotConfig
} # end spotPrepare()
###################################################################################
###################################################################################
## PART TWO: The SPO Steps
###################################################################################
###################################################################################
###################################################################################
#' SPOT Step: Initialize (First SPOT- Step)
#'
#' Creates a sequential design based on the results derived so far. Therefor it is
#' essential to have another design evaluated before and have a .res file to use.
#' afterwards the design is extended by 4 columns: CONFIG, REPEATS,STEP, SEED
#'
#' uses the functions \code{spotConfig$init.design.func} and \code{link{spotWriteDes}}
#' that writes a design to the file <xxx>.des
#'
#' @param spotConfig the list of all parameters is given, but the used ones are: \cr
#' \code{spotConfig$init.design.func} holds the spotCreateDesign<XXX> function to be used
#' for building an initial design. \cr
#' \code{spotConfig$init.design.size} number of points that should be created for the initial design \cr
#' \code{spotConfig$init.design.retries} gives the number of trials to find a design with the greatest distance between points, (default is 1)\cr
#' \code{spotConfig$init.design.repeats} number of repeats for one initial design-point\cr
#' \code{spotConfig$alg.seed} seed value for reproducible runs\cr
#' \code{spotConfig$srcPath} source path as given when spot() is called (or uses default)\cr
#' \code{spotConfig$io.verbosity} verbosity for command window output, which is passed to the output function
#' @export
###################################################################################
spotStepInitial <- function(spotConfig) {
## Sets the seed for all random number generators in SPOT
set.seed(spotConfig$spot.seed)
#clear old data
spotConfig$alg.currentResult<-NULL
spotConfig$alg.currentBest<-NULL
spotWriteLines(spotConfig$io.verbosity,2,"Create Inital Design", con=stderr());
if(!exists(spotConfig$init.design.func))stop(paste("The design function name", spotConfig$init.design.func, "is not found in the workspace \n
Please make sure to load the design function in the workspace, or specify the correct function in spotConfig$init.design.func" ))
##
## write actual region of interest file (same data as roi file)
## TODO: Add type information to aroi file
A <- spotConfig$alg.roi
A <- cbind(row.names(A), A)
colnames(A) <- c("name", "lower", "upper", "type")
if(spotConfig$spot.fileMode){
spotWriteAroi(A,spotConfig$io.verbosity,spotConfig$io.columnSep,spotConfig$io.aroiFileName)
}
spotConfig$alg.aroi<-spotConfig$alg.roi
if(spotConfig$init.design.size>0){
initDes<-eval(call(spotConfig$init.design.func,
spotConfig,
spotConfig$init.design.size,
spotConfig$init.design.retries))
}else{
initDes <- NULL
}
#add manually specified design points
if(!is.null(spotConfig$init.design.man)){
colnames(spotConfig$init.design.man) = rownames(spotConfig$alg.roi)
initDes <- rbind(initDes,spotConfig$init.design.man)
}
if(is.null(initDes)){
stop("Initial Design for SPOT is empty. Set spotConfig$init.design.size to a value larger than zero, or specify design points manually in spotConfig$init.design.man.")
}
## FIRST COLUMN ADDED: Named "CONFIG" - holding a count variable:
## number of the configuration provided
configNumber<-1:nrow(initDes)
initDes <- cbind(initDes,configNumber)
colnames(initDes)[ncol(initDes)] <- "CONFIG"
## SECOND COLUMN ADDED:
## number of repeats for the initial design points as "repeats"
initDes <- cbind(initDes,spotConfig$init.design.repeats)
colnames(initDes)[ncol(initDes)] <- "REPEATS"
## THIRD COLUMN ADDED: column documenting the number of configurations so far (steps-column)
## initially the number of steps is 0 (refers to auto.loop.steps)
initDes <- cbind(initDes,0)
colnames(initDes)[ncol(initDes)] <- "STEP"
## FORTH COLUMN ADDED:
## Named "SEED" - holding the number of the seed for the randomgenerator
## used (same seed provides reproducable runs)
seed <- spotConfig$alg.seed
## could be considering the last used seed according to the last res,
## but not yet considered here
initDes <- cbind(initDes,seed)
colnames(initDes)[ncol(initDes)] <- "SEED"
if (spotConfig$spot.fileMode){
if (file.exists(spotConfig$io.desFileName)){
file.remove(spotConfig$io.desFileName)
}
## write the design to a NEW .des-file
spotWriteDes(initDes,spotConfig$io.verbosity,spotConfig$io.columnSep,spotConfig$io.desFileName)
## Now delete the old .res and .bst files
if (spotConfig$init.delete.previous.files & file.exists(spotConfig$io.bstFileName)){
file.remove(spotConfig$io.bstFileName)
}
if (spotConfig$init.delete.previous.files & file.exists(spotConfig$io.resFileName)){
file.remove(spotConfig$io.resFileName)
}
}
spotConfig$alg.currentDesign<-initDes
spotConfig
}
###################################################################################
## Second Step: Algorithm Call
###################################################################################
#' SPOT Step Algorithm Call
#'
#' This is the second SPOT Step after step "initial" - but also needed
#' after each step "sequential", and is a call frame for the algorithm-call.
#'
#' The algorithm is the heart of what the user must provide, but SPOT should be
#' able to handle them in the most flexible manner. This function is an interface to the algorithm,
#' given as a R-function.
#'
#' @param spotConfig the list of all configuration parameters, but most important ones are:\cr
#' \code{spotConfig$alg.func} the name of the R target function \cr
#' \code{spotConfig$io.apdFileName} filename for the problem definition of the algorithm,
#' first parameter of the generically defined R-function spotConfig$alg.func \cr
#' \code{spotConfig$io.desFileName} filename for the input of the algorithm,
#' second parameter of the generically defined R-function spotConfig$alg.func \cr
#' \code{spotConfig$io.resFileName} filename for the output of the algorithm
#' third parameter of the generically defined R-function spotConfig$alg.func\cr
#' \code{spotConfig$io.verbosity} verbosity for command window output, which is passed to the output function
#' @param ... additional parameters to be passed on to target function which is called inside alg.func
#'
#' @seealso \code{\link{SPOT}} \code{\link{spot}} \code{\link{spotStepInitial}}
#' \code{\link{spotStepSequential}}
#' @export
####################################################################################
spotStepRunAlg <- function(spotConfig,...){
spotWriteLines(spotConfig$io.verbosity,2,paste("spotStepRunAlg started with ",spotConfig$alg.func,sep=""))
if(!exists(spotConfig$alg.func))stop(paste("The target function name", spotConfig$alg.func, "is not found in the workspace \n
Please make sure to load the target function in the workspace, or specify the correct function in spotConfig$alg.func"))
#browser()
#spotConfig<-eval(call(spotConfig$alg.func, spotConfig,...))
spotConfig<-do.call(spotConfig$alg.func, args=list(spotConfig,...)) #this allows further arguments dot-dot-dot
}
###################################################################################
## Third Step: Sequential
#' SPOT Step Sequential
#'
#' Third SPOT Step to generate a sequential new design, this
#' is mainly a call of \code{\link{spotGenerateSequentialDesign}}
#'
#' Creates a sequential design based on the results derived so far. Therefor it is
#' essential to have another design evaluated before and have a .res file to use.
#' It uses the functions \code{\link{spotGenerateSequentialDesign}} and \code{\link{spotWriteDes}}
#' writes a sequential design to the file <xxx>.des
#'
#' @param spotConfig the list of all parameters is given, but the used ones are: \cr
#' \code{spotConfig$io.resFileName} is checked for existence is not, function fails with error\cr
#' \code{spotConfig$algSourceSrcPath} needed for the error message \cr
#' \code{spotConfig$userConfFileName} needed for the error message\cr
#' @export
###################################################################################
spotStepSequential <- function(spotConfig) {
spotWriteLines(spotConfig$io.verbosity,2,"Create Sequential Design", con=stderr())
if(spotConfig$spot.fileMode){
if (!file.exists(spotConfig$io.resFileName)){
stop("Error in spot.R::spotStepSequential:
.res file not found, spotStepAlgRun() has to be executed before.")
}
}else{
if(!nrow(spotConfig$alg.currentResult)>0){
stop("Error in spot.R::spotStepSequential:
result data not found, spotStepAlgRun() has to be executed before.")
}
}
##NOTE: the following code was moved to spotGenerateSequentialDesign, when merging with ocba
########MZ: Now first check for var of the y-values. Only if y varies (i.e. function is noisy and evaluations are repeated) use ocba
# varies=TRUE;
# if(spotConfig$spot.ocba == TRUE){ # only needs to be checked for ocba=TRUE
# if(spotConfig$spot.fileMode){
# res<-spotGetRawResData(spotConfig)
# spotConfig<-res$conf
# rawData<-res$rawD
# res<-NULL
# }else{
# rawData=spotConfig$alg.currentResult;
# }
# z <- split(rawData[,spotConfig$alg.resultColumn], rawData$CONFIG);
#########varY <- sapply(as.data.frame(z),var);
# varY <- sapply(z,var);
# for (i in 1:length(varY)){
# if (is.na(varY[i])||is.nan(varY[i])||is.null(varY[i])||(varY[i]==0)){
# varies=FALSE;
# }
# }
# }
############Now call sequential design function
# if ((spotConfig$spot.ocba == TRUE)&(varies == TRUE)){
# spotConfig <- spotGenerateSequentialDesignOcba(spotConfig);
# }
# else if (spotConfig$spot.ocba == FALSE) {
# spotConfig <- spotGenerateSequentialDesign(spotConfig);
# }
# else{
# stop("
# There is no variance for some point(s) in the current design.
# Therefore OCBA cannot be used. Possible reasons are a target
# function without noise, or the design points are not repeated.
# SPOT with OCBA makes only sense if the target function is noisy.
# If a non noisy function is used, the default settings should
# be adopted, as described in the help of spot() or spotOptim().
# That means: either use spot.ocba=FALSE, or set the repeats
# (init.design.repeats) to values larger
# than 1.
# The current variance vector is for the used design points is:
# ",paste(varY," "))
# }
spotConfig <- spotGenerateSequentialDesign(spotConfig)
}
###################################################################################
## Forth Step Report
###################################################################################
#' SPOT Step Report
#'
#' Forth and last step for SPOT, that is by default a call of \link{spotReportDefault}
#'
#' This step provides a very basic report about the .res-file, based on settings in the \code{spotConfig}
#' The mainly used parameters of \code{spotConfig} is \code{spotConfig$report.func},
#' specifying which report shall be called. The user can specify his own report and should set the
#' value {report.func} in the configuration file according to the specification rules
#' given. If nothing is set, the default report is used.
#'
#' @param spotConfig the list of all parameters is given, it is forwarded to the call of the report-function
#' @seealso \code{\link{SPOT}} \code{\link{spot}} \link{spotReportDefault} \code{\link{spotGetOptions}}
#'
#' @export
###################################################################################
spotStepReport <- function(spotConfig) {
if(!exists(spotConfig$report.func))stop(paste("The report function name", spotConfig$report.func, "is not found in the workspace \n
Please make sure to load the report function in the workspace, or specify the correct function in spotConfig$report.func" ))
if(is.null(spotConfig$alg.currentResult))spotConfig$alg.currentResult<- spotGetRawResData(spotConfig)$rawD;
spotConfig<-eval(call(spotConfig$report.func, spotConfig))
}
###################################################################################
## Step Auto
###################################################################################
#' SPOT Step Auto Opt
#'
#' spotStepAutoOpt is the default task called, when spot is started.
#'
#' The \code{auto} task calls the tasks \code{init} and \code{run} once
#' and loops \code{auto.loop.steps} times over the steps \code{seq} and \code{run}
#' finalising the function with a call of the report function. Instead of \code{auto.loop.steps}
#' also \code{auto.loop.nevals} can be used as a stopping criterion.
#'
#' @param spotConfig the list of all parameters is given, it is forwarded to the call of the report-function
#' the used parameters of spotConfig are just spotConfig$auto.loop.steps
#' specifying the number of meta models that should be calculated
#' @param ... additional parameters to be passed on to target function which is called inside alg.func
#'
#' @seealso \code{\link{SPOT}} \code{\link{spot}} \code{\link{spotStepInitial}}
#' \code{\link{spotStepSequential}} \code{\link{spotStepRunAlg}} \code{\link{spotStepReport}}
#' \code{\link{spotGetOptions}}
#' @export
###################################################################################
spotStepAutoOpt <- function(spotConfig,...){
if(!spotConfig$spot.continue || is.null(spotConfig$alg.currentResult)){
spotConfig=spotStepInitial(spotConfig);
spotConfig=spotStepRunAlg(spotConfig,...)
}
j <- max(spotConfig$alg.currentResult$STEP)
k <- nrow(spotGetRawDataMatrixB(spotConfig));
if(!is.null(spotConfig$spot.catch.error)){
res<-tryCatch({ #This function will catch crashes and interrupts, but still return the last valid spotConfig list, to recover any available results and settings.
while (j <= spotConfig$auto.loop.steps && k < spotConfig$auto.loop.nevals){
spotWriteLines(spotConfig$io.verbosity,2,paste("SPOT Step:", j), con=stderr());
spotConfig=spotStepSequential(spotConfig);
spotConfig=spotStepRunAlg(spotConfig,...);
k <- nrow(spotGetRawDataMatrixB(spotConfig));
j <- j+1;}
}, interrupt = function(ex) {
cat("An interrupt was detected in spotStepAutoOpt.\n");
print(ex);
}, error = function(ex) {
cat("An error was detected in spotStepAutoOpt.\n");
print(ex);
}, finally = {}
) #tryCatch end.
}else{
res<-tryCatch(
{ #This function will catch crashes and interrupts, but still return the last valid spotConfig list, to recover any available results and settings.
while (j <= spotConfig$auto.loop.steps && k < spotConfig$auto.loop.nevals){
spotWriteLines(spotConfig$io.verbosity,2,paste("SPOT Step:", j), con=stderr())
spotConfig=spotStepSequential(spotConfig)
spotConfig=spotStepRunAlg(spotConfig,...)
k <- nrow(spotGetRawDataMatrixB(spotConfig))
j <- j+1
}
}, interrupt = function(ex) {
cat("An interrupt was detected in spotStepAutoOpt.\n")
print(ex)
}, finally = {}
) #tryCatch end.
}
if(!is.null(res)){#&& any(class(res)=="interrupt")){
cat("A crash or interrupt ocurred, most recent spotConfig list is returned, to allow recovery of results.\n")
return(spotConfig)
}
if(spotConfig$io.verbosity>2){
mergedData <- spotPrepareData(spotConfig)
spotConfig=spotWriteBest(mergedData, spotConfig)
spotPlotBst(spotConfig)
}
spotConfig=spotStepReport(spotConfig)
spotConfig
}
###################################################################################
## Step Meta
###################################################################################
#' SPOT Step Meta
#'
#' Attention: This feature is work in progress, documentation is not up to date.
#'
#' The \code{meta} task calls spotStepMetaOpt which itself calls \code{\link{spot}}
#' with several different fixed
#' parameters to provide a mixed optimization mechanism: analyse a fully qualified
#' test of some parameters and the intelligent optimization of other parameters.
#' e.g. the number of the dimension of a problem etc.
#'
#' To start this step you could for example do this:\cr
#' \code{spot("configFileName.conf","meta")}\cr
#'
#' @param spotConfig the list of all parameters is given
#'
#' @seealso \code{\link{spotGetOptions}}
#' @export
###################################################################################
spotStepMetaOpt <- function(spotConfig) {
#spotInstAndLoadPackages("AlgDesign")
if(is.null(spotConfig$report.meta.func))spotConfig$report.meta.func = "spotReportMetaDefault";
### Delete old FBS file
if(file.exists(spotConfig$io.fbsFileName)) {
unlink(spotConfig$io.fbsFileName)
}
myList<-spotConfig$meta.list
mySetList<-spotConfig$meta.conf.list
nVars<-length(myList)
x <- as.numeric(lapply(myList, length)) # create a vector "x" holding the length of each variable
if (nVars==1){ # full factorial design with indicies for all combinations:
dat <- matrix(1:x, byrow = TRUE)
}
else{
dat<- gen.factorial(x,varNames=names(myList),factors="all")
}
for (j in 1:nrow(dat)) {## Loop over full factorial combinations of all parameters specified in .meta
graphics.off() ## close all remaining graphic devices - from old spotStepAutoOpt Runs
myFbs<-list()
newConf<-list()
newConfSet<-list()
for (k in 1:nVars) {
# left side of the assignment
## the factorial value of the kth variable for this dat[j]-row is assigned to a character variable:
newConf[[names(myList[k])]] <- myList[[k]][[dat[j,k]]]
for (ii in 1:length(mySetList[[k]])){
if(length(mySetList[[k]])>0){
newConfSet[[names(mySetList[[k]][ii])]]<-mySetList[[k]][[ii]][dat[j,k]]
}
}
}
myFbs <- c(myFbs, newConf)
newConf<-append(newConf,newConfSet)
newSpotConfig<-append(newConf,spotConfig) ## create a temporary spotConfig for the calling of spotStepAuto
newSpotConfig$spot.fileMode=FALSE;
newSpotConfig <- newSpotConfig[!duplicated(names(newSpotConfig))]; ## delete unneeded entries
newSpotConfig=spot(spotConfig=newSpotConfig) ## THIS calls spot for ONE configuration of the meta run
tmpBst<-newSpotConfig$alg.currentBest;
design = as.list(dat[j,])
names(design)=paste(names(design),"NUM", sep="")
myFbsFlattened <- spotMetaFlattenFbsRow(append(myFbs,design))
dataTHIS<-as.data.frame(cbind(tmpBst[nrow(tmpBst),],myFbsFlattened))
if(file.exists(spotConfig$io.fbsFileName)) {
dataLAST<-as.data.frame(read.table(file=spotConfig$io.fbsFileName,header=TRUE))
data<-merge(dataLAST,dataTHIS,all=TRUE,sort=FALSE)
}
else{
data=dataTHIS
}
write.table(file=spotConfig$io.fbsFileName,
data,
row.names = FALSE,
col.names = TRUE,
sep = " ",
append = FALSE,
quote=FALSE)
} # for (j in 1:nrow(dat))... (loop over full factorial design)
spotConfig$meta.fbs.result=data
if(!exists(spotConfig$report.meta.func))stop(paste("The meta report function name", spotConfig$report.meta.func, "is not found in the workspace \n
Please make sure to load the meta report function in the workspace, or specify the correct function in spotConfig$report.meta.func"))
spotConfig<-eval(call(spotConfig$report.meta.func, spotConfig))
}
############# end function definitions ############################################################
###################################################################################################
## PART THREE: SPOT: The Program
###################################################################################################
#' Main function for the use of SPOT
#'
#' Sequential Parameter Optimization Toolbox (SPOT) provides a toolbox for the
#' sequential optimization of parameter driven tasks.
#' Use \code{\link{spotOptim}} for a \code{\link{optim}} like interface
#'
#' The path given with the \code{userConfigFile} also fixes the working directory used
#' throughout the run of all SPOT functions. All files that are needed for input/output
#' can and will be given relative to the path of the userConfigFile (this also holds for
#' the binary of the algorithm). This refers to files that are specified in the configFile
#' by the user.
#'
#' It is of major importance to understand that spot by default expects to optimize noisy functions. That means, the default settings of spot,
#' which are also used in spotOptim, include repeats of the initial and sequentially created design points. Also, as a default OCBA
#' is used to spread the design points for optimal usage of the function evaluation budget. OCBA will not work when there is no variance in the data.
#' So if the user wants to optimize non-noisy functions, the following settings should be used:\cr
#' \code{spotConfig$spot.ocba <- FALSE}\cr
#' \code{spotConfig$seq.design.maxRepeats <- 1}\cr
#' \code{spotConfig$init.design.repeats <- 1}\cr
#'
#' @param configFile the absolute path including file-specifier, there is no default, this value should always be given
#' @param spotTask [init|seq|run|auto|rep] the switch for the tool used, default is "auto"
#' @param srcPath the absolute path to user written sources that extend SPOT, the default(NA) will search for sources in the path <.libPath()>/SPOT/R
#' @param spotConfig a list of parameters used to configure spot, default is spotConfig=NA, which means the configuration will only be read from the \code{configFile}, not given by manual user input.
#' Notice that parameters given in spotConfig will overwrite both default values assigned by SPOT, AND values defined in the Config file
#' However, values not passed by spotConfig will still be used as defaults. If you want to see those defaults, look at \code{\link{spotGetOptions}}
#' @param ... additional parameters to be passed on to target function which is called inside alg.func. Only relevant for spotTask "auto" and "run".
#' @note \code{spot()} expects char vectors as input, e.g. \code{spot("c:/configfile.conf","auto")}
#' @seealso \code{\link{SPOT}}, \code{\link{spotOptim}}, \code{\link{spotStepAutoOpt}}, \code{\link{spotStepInitial}},
#' \code{\link{spotStepSequential}}, \code{\link{spotStepRunAlg}}, \code{\link{spotStepReport}}
#' @export
###################################################################################################
spot <- function(configFile="NULL",spotTask="auto",srcPath=NA,spotConfig=NA,...){
writeLines("spot.R::spot started ") #bugfix MZ: spotWriteLines will not allways work here, since spotConfig could be NA
callingDirectory<-getwd()
if(!(configFile=="NULL")&!file.exists(configFile)){
stop("Error, configFile not found (or not \"NULL\")")
}
if(is.na(srcPath)){
for(k in 1:length(.libPaths())){
if(file.exists(paste(.libPaths()[k],"SPOT","R",sep="/"))){
srcPath<-(paste(.libPaths()[k],"SPOT","R",sep="/"))
break;
}
}
}
## PRELIMINARIES 1: load all functions belonging to SPOT - not necessary if provided SPOT is installed as package - useful for developers...
spotConfig<-spotPrepare(srcPath,configFile,spotConfig)
## SWITCH task according to the extracted from command line
resSwitch <- switch(spotTask
, init=, initial=spotStepInitial(spotConfig) # First Step
, seq=, sequential=spotStepSequential(spotConfig) # Second Step
, run=, runalg=spotStepRunAlg(spotConfig,...) # Third Step
, rep=, report=spotStepReport(spotConfig) # Fourth Step
, auto=, automatic=spotStepAutoOpt(spotConfig,...) # Automatically call First to Forth Step
, meta=spotStepMetaOpt(spotConfig) # Automatically call several spotStepAutoOpt - Runs to provide a systematic testing tool an fixed Parameters in .apd file
, "invalid switch" # return this at wrong CMD task
);
## ERROR handling
## valid switch returns null, otherwise show error warning and short help
if (is.character(resSwitch) && resSwitch == "invalid switch") {
#spotWriteLines(spotConfig$io.verbosity,0,paste("ERROR, unknown task:", spotTask), con=stderr());
#spotWriteLines(spotConfig$io.verbosity,0,"\nValid tasks are:\
# auto - run tuning in automated mode\
# initial - to create an initial design\
# run - start the program, algorithm, simulator\
# sequential - to create further design points\
# report - to generate a report from your results"
# , con=stderr());
stop(paste("ERROR, unknown task:", spotTask, "\nValid tasks are:\
auto - run tuning in automated mode\
initial - to create an initial design\
run - start the program, algorithm, simulator\
sequential - to create further design points\
report - to generate a report from your results" ))
}
# go back to - well where ever you came from
setwd(callingDirectory)
resSwitch
}
###################################################################################################
#' Print function for spotConfig class
#'
#' Print function to summarize a spotConfig.
#'
#' @rdname print
#' @method print spotConfig
# @S3method print spotConfig
#' @param x spotConfig
#' @param ... additional parameters
#' @export
#' @keywords internal
#####################################################################################
print.spotConfig <- function(x, ...){ #Remark: this overwrites the print method for the spotConfig class
#This class is set for spotConfig only once, in spotPrepare.
writeLines(paste("This is a spotConfig list"))
writeLines("Current list content:")
print(names(x))
writeLines("Use \"listname[]\" to print all list values, e.g. spotConfig[].")
writeLines("See the help of spotGetOptions for more information.")
}
|
#############
# FUNCTIONS #
#############
#CALCULATE BETA PARAMETERS FOR EACH TAXON IN DATASET
####################################################
collectParameters <- function(datamatrix) {
taxa <- rownames(datamatrix)
parameterNames <- list("type","a","b","location","scale")
BetaParameters <- matrix(data = NA, nrow = length(taxa), ncol = length(parameterNames), dimnames=list(taxa, parameterNames))
for (i in 1:nrow(BetaParameters)) {
parameters <- getBetaParams(datamatrix[i,1],datamatrix[i,2]);
BetaParameters[i,] <- c(parameters$type, parameters$a, parameters$b, parameters$location, parameters$scale)
}
return(BetaParameters)
}
#CALCULATE PARAMETERS OF BETA DISTRIBUTION
##########################################
getBetaParams <- function(mean, sd) {
m <- (1-mean)/mean
n <- 1 + m
alpha <- (1/n)*(m/(sd^2*n^2)-1)
beta <- m * alpha
params <- list(type=1, a=alpha, b=beta, location=0, scale=1)
return(params)
}
#CALCULATE PERCENT REMAINDER OF TAXON MEANS
###########################################
Premainder <- function(x) {
rowsize <- dim(x)[1]
colsize <- dim(x)[2]
dims <- dimnames(x)[[2]]
y <- matrix(nrow=rowsize, ncol=colsize)
for(i in 1:rowsize){
total <- apply(X=x,MARGIN=1,sum)[i]
y[i,1] <- x[i,1]/100
total <- total - x[i,1]
for(k in 2:colsize){
y[i,k] <- x[i,k]/ total
total <- total - x[i,k]
}
}
return(matrix(y,nrow=colsize, ncol=rowsize, dimnames=list(dims, "Mean")))
}
#SIMULATE SUBJECTS WITH BROKEN-STICK MODEL FOR A GIVEN DISTRIBUTION
###################################################################
spaceFill <- function (dataMatrix, distParameters, subjects){
subjects <- subjects #number of subjects for rows
taxa <- dim(dataMatrix)[1] #number of taxa for columns
Cdata <- matrix(nrow=subjects,ncol=taxa)
taxaNames <- dimnames(dataMatrix)[[1]]
colnames(Cdata) <- taxaNames #list taxa names in columns
rownames(Cdata) <- rownames(Cdata, do.NULL= FALSE, prefix= "Sample") #call rows samples
for (i in 1:subjects){
total <- 1
for (j in 1:(taxa-1)){
r <- rpearson(n=1, params=distParameters[j,]);
Cdata[i,j] <- total * r
total <- total - Cdata[i,j];
}
Cdata[i,taxa] <- total;
}
return(Cdata)
}
#WRAPPER FUNCTION
#################
simulateBrokenStick <- function(inputFilename,outputLabel,numberSubjects=25) {
#set.seed(1234) #always use the same seed during testing
library(PearsonDS) #use pearsons distribution library (beta distribution)
library(HMP) #use HMP package
(rawData <- read.table(inputFilename)) #read data from file
colSums(rawData) ##!!validate that column sums equal 1!!
#get standard deviation of each taxon in the data set
(rawStdDiv <- matrix(apply(rawData,1,sd),dimnames=list(rownames(rawData), "SD")))
#get arithmetic mean of each taxon in the dataset
(rawMean <- matrix(rowMeans(rawData), nrow=1, ncol=5, dimnames=list("Mean", rownames(rawData))))
(meanPercentRemainder <- Premainder(rawMean*100)) #get percent remainder for mean proportions of taxa
(pR_MeanStDev <- cbind(meanPercentRemainder, rawStdDiv)) #combine percent remainder means with raw stdev
parameters <- collectParameters(pR_MeanStDev) #calculate parameters of percent remainder beta dist
brokenStickSim <- spaceFill(pR_MeanStDev,parameters,numberSubjects) #run the simulation
Barchart.data(brokenStickSim, title=outputLabel) #display data as barchart
print('Standard Deviation, simulated vs. provided data')
print(apply(brokenStickSim,2,sd)) #compare simulated standard deviations
print(apply(rawData,1,sd)) #to raw reported standard deviations
print('Mean, simulated vs. provided data')
print(colMeans(brokenStickSim)) #compare simulated means
print(rowMeans(rawData)) #to raw reported means
print('')
print('')
return(brokenStickSim)
}
simulateBrokenStick("ADControls.txt","Controls", 5000)
simulateBrokenStick("ADBaseline.txt","ADBaseline", 300)
simulateBrokenStick("ADFlareNT.txt","ADFlare", 300)
simulateBrokenStick("ADFlareT.txt","ADTreatment", 300)
simulateBrokenStick("ADPostFlare.txt","ADPost") | /ADdata/ADDataSimulationRae20131003.R | no_license | rheitkamp/microbiomePower | R | false | false | 4,152 | r |
#############
# FUNCTIONS #
#############
#CALCULATE BETA PARAMETERS FOR EACH TAXON IN DATASET
####################################################
collectParameters <- function(datamatrix) {
taxa <- rownames(datamatrix)
parameterNames <- list("type","a","b","location","scale")
BetaParameters <- matrix(data = NA, nrow = length(taxa), ncol = length(parameterNames), dimnames=list(taxa, parameterNames))
for (i in 1:nrow(BetaParameters)) {
parameters <- getBetaParams(datamatrix[i,1],datamatrix[i,2]);
BetaParameters[i,] <- c(parameters$type, parameters$a, parameters$b, parameters$location, parameters$scale)
}
return(BetaParameters)
}
#CALCULATE PARAMETERS OF BETA DISTRIBUTION
##########################################
getBetaParams <- function(mean, sd) {
m <- (1-mean)/mean
n <- 1 + m
alpha <- (1/n)*(m/(sd^2*n^2)-1)
beta <- m * alpha
params <- list(type=1, a=alpha, b=beta, location=0, scale=1)
return(params)
}
#CALCULATE PERCENT REMAINDER OF TAXON MEANS
###########################################
Premainder <- function(x) {
rowsize <- dim(x)[1]
colsize <- dim(x)[2]
dims <- dimnames(x)[[2]]
y <- matrix(nrow=rowsize, ncol=colsize)
for(i in 1:rowsize){
total <- apply(X=x,MARGIN=1,sum)[i]
y[i,1] <- x[i,1]/100
total <- total - x[i,1]
for(k in 2:colsize){
y[i,k] <- x[i,k]/ total
total <- total - x[i,k]
}
}
return(matrix(y,nrow=colsize, ncol=rowsize, dimnames=list(dims, "Mean")))
}
#SIMULATE SUBJECTS WITH BROKEN-STICK MODEL FOR A GIVEN DISTRIBUTION
###################################################################
spaceFill <- function (dataMatrix, distParameters, subjects){
subjects <- subjects #number of subjects for rows
taxa <- dim(dataMatrix)[1] #number of taxa for columns
Cdata <- matrix(nrow=subjects,ncol=taxa)
taxaNames <- dimnames(dataMatrix)[[1]]
colnames(Cdata) <- taxaNames #list taxa names in columns
rownames(Cdata) <- rownames(Cdata, do.NULL= FALSE, prefix= "Sample") #call rows samples
for (i in 1:subjects){
total <- 1
for (j in 1:(taxa-1)){
r <- rpearson(n=1, params=distParameters[j,]);
Cdata[i,j] <- total * r
total <- total - Cdata[i,j];
}
Cdata[i,taxa] <- total;
}
return(Cdata)
}
#WRAPPER FUNCTION
#################
simulateBrokenStick <- function(inputFilename,outputLabel,numberSubjects=25) {
#set.seed(1234) #always use the same seed during testing
library(PearsonDS) #use pearsons distribution library (beta distribution)
library(HMP) #use HMP package
(rawData <- read.table(inputFilename)) #read data from file
colSums(rawData) ##!!validate that column sums equal 1!!
#get standard deviation of each taxon in the data set
(rawStdDiv <- matrix(apply(rawData,1,sd),dimnames=list(rownames(rawData), "SD")))
#get arithmetic mean of each taxon in the dataset
(rawMean <- matrix(rowMeans(rawData), nrow=1, ncol=5, dimnames=list("Mean", rownames(rawData))))
(meanPercentRemainder <- Premainder(rawMean*100)) #get percent remainder for mean proportions of taxa
(pR_MeanStDev <- cbind(meanPercentRemainder, rawStdDiv)) #combine percent remainder means with raw stdev
parameters <- collectParameters(pR_MeanStDev) #calculate parameters of percent remainder beta dist
brokenStickSim <- spaceFill(pR_MeanStDev,parameters,numberSubjects) #run the simulation
Barchart.data(brokenStickSim, title=outputLabel) #display data as barchart
print('Standard Deviation, simulated vs. provided data')
print(apply(brokenStickSim,2,sd)) #compare simulated standard deviations
print(apply(rawData,1,sd)) #to raw reported standard deviations
print('Mean, simulated vs. provided data')
print(colMeans(brokenStickSim)) #compare simulated means
print(rowMeans(rawData)) #to raw reported means
print('')
print('')
return(brokenStickSim)
}
simulateBrokenStick("ADControls.txt","Controls", 5000)
simulateBrokenStick("ADBaseline.txt","ADBaseline", 300)
simulateBrokenStick("ADFlareNT.txt","ADFlare", 300)
simulateBrokenStick("ADFlareT.txt","ADTreatment", 300)
simulateBrokenStick("ADPostFlare.txt","ADPost") |
library(crfsuite)
### Name: crf_options
### Title: Conditional Random Fields parameters
### Aliases: crf_options
### ** Examples
# L-BFGS with L1/L2 regularization
opts <- crf_options("lbfgs")
str(opts)
# SGD with L2-regularization
crf_options("l2sgd")
# Averaged Perceptron
crf_options("averaged-perceptron")
# Passive Aggressive
crf_options("passive-aggressive")
# Adaptive Regularization of Weights (AROW)
crf_options("arow")
| /data/genthat_extracted_code/crfsuite/examples/crf_options.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 440 | r | library(crfsuite)
### Name: crf_options
### Title: Conditional Random Fields parameters
### Aliases: crf_options
### ** Examples
# L-BFGS with L1/L2 regularization
opts <- crf_options("lbfgs")
str(opts)
# SGD with L2-regularization
crf_options("l2sgd")
# Averaged Perceptron
crf_options("averaged-perceptron")
# Passive Aggressive
crf_options("passive-aggressive")
# Adaptive Regularization of Weights (AROW)
crf_options("arow")
|
doubleGauss.boot <- function(part1.list, seed = new.seed(), alpha = 0.05, paired = FALSE, N.iter = 1000, cores = 1, p.adj = "oleson", test.spots = NULL, time.test = NULL, test.params = FALSE) {
data <- part1.list$data
col <- part1.list$col
rho.0 <- part1.list$rho.0
N.time <- part1.list$N.time
coef.id1 <- part1.list$coef.id1
coef.id2 <- part1.list$coef.id2
coef.id3 <- part1.list$coef.id3
coef.id4 <- part1.list$coef.id4
sdev.id1 <- part1.list$sdev.id1
sdev.id2 <- part1.list$sdev.id2
sdev.id3 <- part1.list$sdev.id3
sdev.id4 <- part1.list$sdev.id4
sigma.id1 <- part1.list$sigma.id1
sigma.id2 <- part1.list$sigma.id2
sigma.id3 <- part1.list$sigma.id3
sigma.id4 <- part1.list$sigma.id4
id.nums.g1 <- part1.list$id.nums.g1
id.nums.g2 <- part1.list$id.nums.g2
groups <- part1.list$groups
time.all <- part1.list$time.all
N.g1 <- part1.list$N.g1
N.g2 <- part1.list$N.g2
diffs <- part1.list$diffs
if(!is.null(test.spots)) time.all <- test.spots
N.tests <- length(time.all)
N.time <- length(time.all)
group1.bad <- is.na(coef.id1[,1]) | is.na(coef.id3[,1])
group2.bad <- is.na(coef.id2[,1]) | is.na(coef.id4[,1])
coef.id1 <- subset(coef.id1, !group1.bad)
coef.id3 <- subset(coef.id3, !group1.bad)
coef.id2 <- subset(coef.id2, !group2.bad)
coef.id4 <- subset(coef.id4, !group2.bad)
sdev.id1 <- subset(sdev.id1, !group1.bad)
sdev.id3 <- subset(sdev.id3, !group1.bad)
sdev.id2 <- subset(sdev.id2, !group2.bad)
sdev.id4 <- subset(sdev.id4, !group2.bad)
sigma.id1 <- subset(sigma.id1, !group1.bad)
sigma.id3 <- subset(sigma.id3, !group1.bad)
sigma.id2 <- subset(sigma.id2, !group2.bad)
sigma.id4 <- subset(sigma.id4, !group2.bad)
id.nums.g1 <- id.nums.g1[!group1.bad]
id.nums.g2 <- id.nums.g2[!group2.bad]
N.g1 <- N.g1 - sum(group1.bad)
N.g2 <- N.g2 - sum(group2.bad)
curve1.0 <- matrix(NA, ncol = N.time, nrow = N.g1)
mu1.ran <- rep(NA, N.g1)
ht1.ran <- rep(NA, N.g1)
s11.ran <- rep(NA, N.g1)
s21.ran <- rep(NA, N.g1)
b11.ran <- rep(NA, N.g1)
b21.ran <- rep(NA, N.g1)
curve2.0 <- matrix(NA, ncol = N.time, nrow = N.g2)
mu2.ran <- rep(NA, N.g2)
ht2.ran <- rep(NA, N.g2)
s12.ran <- rep(NA, N.g2)
s22.ran <- rep(NA, N.g2)
b12.ran <- rep(NA, N.g2)
b22.ran <- rep(NA, N.g2)
curve1.mat <- matrix(NA, ncol = N.time, nrow = N.iter)
curve2.mat <- matrix(NA, ncol = N.time, nrow = N.iter)
curve3.mat <- matrix(NA, ncol = N.time, nrow = N.iter)
#Target Curve
curve.f <- function(mu, ht, sig1, sig2, base1, base2, x){
whichgauss <- x < mu
y1 <- exp(-1 * (x - mu) ^ 2 / (2 * sig1 ^ 2)) * (ht - base1) + base1
y2 <- exp(-1 * (x - mu) ^ 2 / (2 * sig2 ^ 2)) * (ht - base2) + base2
y <- whichgauss * y1 + (1 - whichgauss) * y2
y
}
mu.1 <- ht.1 <- s1.1 <- s2.1 <- b1.1 <- b2.1 <-
mu.2 <- ht.2 <- s1.2 <- s2.2 <- b1.2 <- b2.2 <- numeric(N.iter)
##################
##### 1 Core #####
##################
if(paired) {
mu.cov.1 <- cov(coef.id1[,1], coef.id2[,1], use = "pairwise.complete.obs")
ht.cov.1 <- cov(coef.id1[,2], coef.id2[,2], use = "pairwise.complete.obs")
s1.cov.1 <- cov(coef.id1[,3], coef.id2[,3], use = "pairwise.complete.obs")
s2.cov.1 <- cov(coef.id1[,4], coef.id2[,4], use = "pairwise.complete.obs")
b1.cov.1 <- cov(coef.id1[,5], coef.id2[,5], use = "pairwise.complete.obs")
b2.cov.1 <- cov(coef.id1[,6], coef.id2[,6], use = "pairwise.complete.obs")
if(diffs) {
mu.cov.2 <- cov(coef.id3[,1], coef.id4[,1], use = "pairwise.complete.obs")
ht.cov.2 <- cov(coef.id3[,2], coef.id4[,2], use = "pairwise.complete.obs")
s1.cov.2 <- cov(coef.id3[,3], coef.id4[,3], use = "pairwise.complete.obs")
s2.cov.2 <- cov(coef.id3[,4], coef.id4[,4], use = "pairwise.complete.obs")
b1.cov.2 <- cov(coef.id3[,5], coef.id4[,5], use = "pairwise.complete.obs")
b2.cov.2 <- cov(coef.id3[,6], coef.id4[,6], use = "pairwise.complete.obs")
}
}
if(cores == 1) {
set.seed(seed)
for(iter in 1:N.iter){
if(paired) {
for(i in 1:N.g1) {
mu <- rmvnorm(1, mean = c(coef.id1[i,1], coef.id2[i,1]),
sigma = matrix(c(sdev.id1[i,1] ^ 2, mu.cov.1, mu.cov.1, sdev.id2[i,1] ^ 2), nrow = 2))
ht <- rmvnorm(1, mean = c(coef.id1[i,2], coef.id2[i,2]),
sigma = matrix(c(sdev.id1[i,2] ^ 2, ht.cov.1, ht.cov.1, sdev.id2[i,2] ^ 2), nrow = 2))
s1 <- rmvnorm(1, mean = c(coef.id1[i,3], coef.id2[i,3]),
sigma = matrix(c(sdev.id1[i,3] ^ 2, s1.cov.1, s1.cov.1, sdev.id2[i,3] ^ 2), nrow = 2))
s2 <- rmvnorm(1, mean = c(coef.id1[i,4], coef.id2[i,4]),
sigma = matrix(c(sdev.id1[i,4] ^ 2, s2.cov.1, s2.cov.1, sdev.id2[i,4] ^ 2), nrow = 2))
b1 <- rmvnorm(1, mean = c(coef.id1[i,5], coef.id2[i,5]),
sigma = matrix(c(sdev.id1[i,5] ^ 2, b1.cov.1, b1.cov.1, sdev.id2[i,5] ^ 2), nrow = 2))
b2 <- rmvnorm(1, mean = c(coef.id1[i,6], coef.id2[i,6]),
sigma = matrix(c(sdev.id1[i,6] ^ 2, b2.cov.1, b2.cov.1, sdev.id2[i,6] ^ 2), nrow = 2))
mu1.ran <- mu[1]; mu2.ran <- mu[2]
ht1.ran <- ht[1]; ht2.ran <- ht[2]
s11.ran <- s1[1]; s12.ran <- s1[2]
s21.ran <- s2[1]; s22.ran <- s2[2]
b11.ran <- b1[1]; b12.ran <- b1[2]
b21.ran <- b2[1]; b22.ran <- b2[2]
}
} else {
mu1.ran <- rnorm(N.g1, coef.id1[,1], sdev.id1[,1])
ht1.ran <- rnorm(N.g1, coef.id1[,2], sdev.id1[,2])
s11.ran <- rnorm(N.g1, coef.id1[,3], sdev.id1[,3])
s21.ran <- rnorm(N.g1, coef.id1[,4], sdev.id1[,4])
b11.ran <- rnorm(N.g1, coef.id1[,5], sdev.id1[,5])
b21.ran <- rnorm(N.g1, coef.id1[,6], sdev.id1[,6])
mu2.ran <- rnorm(N.g2, coef.id2[,1], sdev.id2[,1])
ht2.ran <- rnorm(N.g2, coef.id2[,2], sdev.id2[,2])
s12.ran <- rnorm(N.g2, coef.id2[,3], sdev.id2[,3])
s22.ran <- rnorm(N.g2, coef.id2[,4], sdev.id2[,4])
b12.ran <- rnorm(N.g2, coef.id2[,5], sdev.id2[,5])
b22.ran <- rnorm(N.g2, coef.id2[,6], sdev.id2[,6])
}
mu.1[iter] <- mean(mu1.ran); mu.2[iter] <- mean(mu2.ran)
ht.1[iter] <- mean(ht1.ran); ht.2[iter] <- mean(ht1.ran)
s1.1[iter] <- mean(s11.ran); s1.2[iter] <- mean(s12.ran)
s2.1[iter] <- mean(s21.ran); s2.2[iter] <- mean(s22.ran)
b1.1[iter] <- mean(b11.ran); b1.2[iter] <- mean(b12.ran)
b2.1[iter] <- mean(b21.ran); b2.2[iter] <- mean(b22.ran)
if(diffs) {
if(paired) {
for(i in 1:N.g1) {
mu <- rmvnorm(1, mean = c(coef.id3[i,1], coef.id4[i,1]),
sigma = matrix(c(sdev.id1[i,1] ^ 2, mu.cov.2, mu.cov.2, sdev.id2[i,1] ^ 2), nrow = 2))
ht <- rmvnorm(1, mean = c(coef.id3[i,2], coef.id4[i,2]),
sigma = matrix(c(sdev.id1[i,2] ^ 2, ht.cov.2, ht.cov.2, sdev.id2[i,2] ^ 2), nrow = 2))
s1 <- rmvnorm(1, mean = c(coef.id3[i,3], coef.id4[i,3]),
sigma = matrix(c(sdev.id1[i,3] ^ 2, s1.cov.2, s1.cov.2, sdev.id2[i,3] ^ 2), nrow = 2))
s2 <- rmvnorm(1, mean = c(coef.id3[i,4], coef.id4[i,4]),
sigma = matrix(c(sdev.id1[i,4] ^ 2, s2.cov.2, s2.cov.2, sdev.id2[i,4] ^ 2), nrow = 2))
b1 <- rmvnorm(1, mean = c(coef.id3[i,5], coef.id4[i,5]),
sigma = matrix(c(sdev.id1[i,5] ^ 2, b1.cov.2, b1.cov.2, sdev.id2[i,5] ^ 2), nrow = 2))
b2 <- rmvnorm(1, mean = c(coef.id3[i,6], coef.id4[i,6]),
sigma = matrix(c(sdev.id1[i,6] ^ 2, b2.cov.2, b2.cov.2, sdev.id2[i,6] ^ 2), nrow = 2))
mu3.ran <- mu[1]; mu4.ran <- mu[2]
ht3.ran <- ht[1]; ht4.ran <- ht[2]
s13.ran <- s1[1]; s14.ran <- s1[2]
s23.ran <- s2[1]; s24.ran <- s2[2]
b13.ran <- b1[1]; b14.ran <- b1[2]
b23.ran <- b2[1]; b24.ran <- b2[2]
}
} else {
mu3.ran <- rnorm(N.g1, coef.id3[,1], sdev.id3[,1])
ht3.ran <- rnorm(N.g1, coef.id3[,2], sdev.id3[,2])
s13.ran <- rnorm(N.g1, coef.id3[,3], sdev.id3[,3])
s23.ran <- rnorm(N.g1, coef.id3[,4], sdev.id3[,4])
b13.ran <- rnorm(N.g1, coef.id3[,5], sdev.id3[,5])
b23.ran <- rnorm(N.g1, coef.id3[,6], sdev.id3[,6])
mu4.ran <- rnorm(N.g2, coef.id4[,1], sdev.id4[,1])
ht4.ran <- rnorm(N.g2, coef.id4[,2], sdev.id4[,2])
s14.ran <- rnorm(N.g2, coef.id4[,3], sdev.id4[,3])
s24.ran <- rnorm(N.g2, coef.id4[,4], sdev.id4[,4])
b14.ran <- rnorm(N.g2, coef.id4[,5], sdev.id4[,5])
b24.ran <- rnorm(N.g2, coef.id4[,6], sdev.id4[,6])
}
}
if(diffs) {
for(id in 1:N.g1) { #Get fixation level for each group 1 subject
curve1.0[id,] <- curve.f(mu1.ran[id], ht1.ran[id], s11.ran[id], s21.ran[id],
b11.ran[id], b21.ran[id], time.all) -
curve.f(mu3.ran[id], ht3.ran[id], s13.ran[id], s23.ran[id],
b13.ran[id], b23.ran[id], time.all)
}
for(id in 1:N.g2) { #Get fixation level for each group 2 subject
curve2.0[id,] <- curve.f(mu2.ran[id], ht2.ran[id], s12.ran[id], s22.ran[id],
b12.ran[id], b22.ran[id], time.all) -
curve.f(mu4.ran[id], ht4.ran[id], s14.ran[id], s24.ran[id],
b14.ran[id], b24.ran[id], time.all)
}
} else {
for(id in 1:N.g1) { #Get fixation level for each group 1 subject
curve1.0[id,] <- curve.f(mu1.ran[id], ht1.ran[id], s11.ran[id], s21.ran[id],
b11.ran[id], b21.ran[id], time.all)
}
for(id in 1:N.g2) { #Get fixation level for each group 2 subject
curve2.0[id,] <- curve.f(mu2.ran[id], ht2.ran[id], s12.ran[id], s22.ran[id],
b12.ran[id], b22.ran[id], time.all)
}
}
curve1.mat[iter,] <- apply(curve1.0, 2, mean) #Mean fixations at each time point for group 1
curve2.mat[iter,] <- apply(curve2.0, 2, mean) #Mean fixations at each time point for group 2
if(paired) curve3.mat[iter,] <- apply(curve2.0 - curve1.0, 2, mean)
}
} else {
####################
##### 2+ Cores #####
####################
cl <- makePSOCKcluster(cores)
registerDoParallel(cl)
for.out <- foreach(iter = 1:N.iter, .combine = rbind, .options.RNG = seed) %dorng% {
if(paired) {
for(i in 1:N.g1) {
mu <- rmvnorm(1, mean = c(coef.id1[i,1], coef.id2[i,1]),
sigma = matrix(c(sdev.id1[i,1] ^ 2, mu.cov.1, mu.cov.1, sdev.id2[i,1] ^ 2), nrow = 2))
ht <- rmvnorm(1, mean = c(coef.id1[i,2], coef.id2[i,2]),
sigma = matrix(c(sdev.id1[i,2] ^ 2, ht.cov.1, ht.cov.1, sdev.id2[i,2] ^ 2), nrow = 2))
s1 <- rmvnorm(1, mean = c(coef.id1[i,3], coef.id2[i,3]),
sigma = matrix(c(sdev.id1[i,3] ^ 2, s1.cov.1, s1.cov.1, sdev.id2[i,3] ^ 2), nrow = 2))
s2 <- rmvnorm(1, mean = c(coef.id1[i,4], coef.id2[i,4]),
sigma = matrix(c(sdev.id1[i,4] ^ 2, s2.cov.1, s2.cov.1, sdev.id2[i,4] ^ 2), nrow = 2))
b1 <- rmvnorm(1, mean = c(coef.id1[i,5], coef.id2[i,5]),
sigma = matrix(c(sdev.id1[i,5] ^ 2, b1.cov.1, b1.cov.1, sdev.id2[i,5] ^ 2), nrow = 2))
b2 <- rmvnorm(1, mean = c(coef.id1[i,6], coef.id2[i,6]),
sigma = matrix(c(sdev.id1[i,6] ^ 2, b2.cov.1, b2.cov.1, sdev.id2[i,6] ^ 2), nrow = 2))
mu1.ran <- mu[1]; mu2.ran <- mu[2]
ht1.ran <- ht[1]; ht2.ran <- ht[2]
s11.ran <- s1[1]; s12.ran <- s1[2]
s21.ran <- s2[1]; s22.ran <- s2[2]
b11.ran <- b1[1]; b12.ran <- b1[2]
b21.ran <- b2[1]; b22.ran <- b2[2]
}
} else {
mu1.ran <- rnorm(N.g1, coef.id1[,1], sdev.id1[,1])
ht1.ran <- rnorm(N.g1, coef.id1[,2], sdev.id1[,2])
s11.ran <- rnorm(N.g1, coef.id1[,3], sdev.id1[,3])
s21.ran <- rnorm(N.g1, coef.id1[,4], sdev.id1[,4])
b11.ran <- rnorm(N.g1, coef.id1[,5], sdev.id1[,5])
b21.ran <- rnorm(N.g1, coef.id1[,6], sdev.id1[,6])
mu2.ran <- rnorm(N.g2, coef.id2[,1], sdev.id2[,1])
ht2.ran <- rnorm(N.g2, coef.id2[,2], sdev.id2[,2])
s12.ran <- rnorm(N.g2, coef.id2[,3], sdev.id2[,3])
s22.ran <- rnorm(N.g2, coef.id2[,4], sdev.id2[,4])
b12.ran <- rnorm(N.g2, coef.id2[,5], sdev.id2[,5])
b22.ran <- rnorm(N.g2, coef.id2[,6], sdev.id2[,6])
}
mu.temp.1 <- mean(mu1.ran); mu.temp.2 <- mean(mu2.ran)
ht.temp.1 <- mean(ht1.ran); ht.temp.2 <- mean(ht1.ran)
s1.temp.1 <- mean(s11.ran); s1.temp.2 <- mean(s12.ran)
s2.temp.1 <- mean(s21.ran); s2.temp.2 <- mean(s22.ran)
b1.temp.1 <- mean(b11.ran); b1.temp.2 <- mean(b12.ran)
b2.temp.1 <- mean(b21.ran); b2.temp.2 <- mean(b22.ran)
if(diffs) {
if(paired) {
for(i in 1:N.g1) {
mu <- rmvnorm(1, mean = c(coef.id3[i,1], coef.id4[i,1]),
sigma = matrix(c(sdev.id1[i,1] ^ 2, mu.cov.2, mu.cov.2, sdev.id2[i,1] ^ 2), nrow = 2))
ht <- rmvnorm(1, mean = c(coef.id3[i,2], coef.id4[i,2]),
sigma = matrix(c(sdev.id1[i,2] ^ 2, ht.cov.2, ht.cov.2, sdev.id2[i,2] ^ 2), nrow = 2))
s1 <- rmvnorm(1, mean = c(coef.id3[i,3], coef.id4[i,3]),
sigma = matrix(c(sdev.id1[i,3] ^ 2, s1.cov.2, s1.cov.2, sdev.id2[i,3] ^ 2), nrow = 2))
s2 <- rmvnorm(1, mean = c(coef.id3[i,4], coef.id4[i,4]),
sigma = matrix(c(sdev.id1[i,4] ^ 2, s2.cov.2, s2.cov.2, sdev.id2[i,4] ^ 2), nrow = 2))
b1 <- rmvnorm(1, mean = c(coef.id3[i,5], coef.id4[i,5]),
sigma = matrix(c(sdev.id1[i,5] ^ 2, b1.cov.2, b1.cov.2, sdev.id2[i,5] ^ 2), nrow = 2))
b2 <- rmvnorm(1, mean = c(coef.id3[i,6], coef.id4[i,6]),
sigma = matrix(c(sdev.id1[i,6] ^ 2, b2.cov.2, b2.cov.2, sdev.id2[i,6] ^ 2), nrow = 2))
mu3.ran <- mu[1]; mu4.ran <- mu[2]
ht3.ran <- ht[1]; ht4.ran <- ht[2]
s13.ran <- s1[1]; s14.ran <- s1[2]
s23.ran <- s2[1]; s24.ran <- s2[2]
b13.ran <- b1[1]; b14.ran <- b1[2]
b23.ran <- b2[1]; b24.ran <- b2[2]
}
} else {
mu3.ran <- rnorm(N.g1, coef.id3[,1], sdev.id3[,1])
ht3.ran <- rnorm(N.g1, coef.id3[,2], sdev.id3[,2])
s13.ran <- rnorm(N.g1, coef.id3[,3], sdev.id3[,3])
s23.ran <- rnorm(N.g1, coef.id3[,4], sdev.id3[,4])
b13.ran <- rnorm(N.g1, coef.id3[,5], sdev.id3[,5])
b23.ran <- rnorm(N.g1, coef.id3[,6], sdev.id3[,6])
mu4.ran <- rnorm(N.g2, coef.id4[,1], sdev.id4[,1])
ht4.ran <- rnorm(N.g2, coef.id4[,2], sdev.id4[,2])
s14.ran <- rnorm(N.g2, coef.id4[,3], sdev.id4[,3])
s24.ran <- rnorm(N.g2, coef.id4[,4], sdev.id4[,4])
b14.ran <- rnorm(N.g2, coef.id4[,5], sdev.id4[,5])
b24.ran <- rnorm(N.g2, coef.id4[,6], sdev.id4[,6])
}
}
if(diffs) {
for(id in 1:N.g1) { #Get fixation level for each group 1 subject
curve1.0[id,] <- curve.f(mu1.ran[id], ht1.ran[id], s11.ran[id], s21.ran[id],
b11.ran[id], b21.ran[id], time.all) -
curve.f(mu3.ran[id], ht3.ran[id], s13.ran[id], s23.ran[id],
b13.ran[id], b23.ran[id], time.all)
}
for(id in 1:N.g2) { #Get fixation level for each group 2 subject
curve2.0[id,] <- curve.f(mu2.ran[id], ht2.ran[id], s12.ran[id], s22.ran[id],
b12.ran[id], b22.ran[id], time.all) -
curve.f(mu4.ran[id], ht4.ran[id], s14.ran[id], s24.ran[id],
b14.ran[id], b24.ran[id], time.all)
}
} else {
for(id in 1:N.g1) { #Get fixation level for each group 1 subject
curve1.0[id,] <- curve.f(mu1.ran[id], ht1.ran[id], s11.ran[id], s21.ran[id],
b11.ran[id], b21.ran[id], time.all)
}
for(id in 1:N.g2) { #Get fixation level for each group 2 subject
curve2.0[id,] <- curve.f(mu2.ran[id], ht2.ran[id], s12.ran[id], s22.ran[id],
b12.ran[id], b22.ran[id], time.all)
}
}
curve1 <- apply(curve1.0, 2, mean) #Mean fixations at each time point for CIs
curve2 <- apply(curve2.0, 2, mean) #Mean fixations at each time point for NHs
curve3 <- curve2 - curve1
c(curve1, curve2, curve3, mu.temp.1, ht.temp.1, s1.temp.1, s2.temp.1, b1.temp.1, b2.temp.1,
mu.temp.2, ht.temp.2, s1.temp.2, s2.temp.2, b1.temp.2, b2.temp.2)
}
curve1.mat <- for.out[,1:N.time]
curve2.mat <- for.out[,(N.time + 1):(2 * N.time)]
curve3.mat <- for.out[,(2 * N.time + 1):(3 * N.time)]
mu.1 <- for.out[, 3 * N.time + 1]
ht.1 <- for.out[, 3 * N.time + 2]
s1.1 <- for.out[, 3 * N.time + 3]
s2.1 <- for.out[, 3 * N.time + 4]
b1.1 <- for.out[, 3 * N.time + 5]
b2.1 <- for.out[, 3 * N.time + 6]
mu.2 <- for.out[, 3 * N.time + 7]
ht.2 <- for.out[, 3 * N.time + 8]
s1.2 <- for.out[, 3 * N.time + 9]
s2.2 <- for.out[, 3 * N.time + 10]
b1.2 <- for.out[, 3 * N.time + 11]
b2.2 <- for.out[, 3 * N.time + 12]
stopCluster(cl)
}
curve.mean1 <- apply(curve1.mat, 2, mean)
curve.mean2 <- apply(curve2.mat, 2, mean)
curve.g1 <- curve.mean1
curve.g2 <- curve.mean2
curve.sd1 <- apply(curve1.mat, 2, sd)
curve.sd2 <- apply(curve2.mat, 2, sd)
if(paired) {
diff.mean <- apply(curve3.mat, 2, mean)
curve.sd <- apply(curve3.mat, 2, sd)
t.val <- diff.mean / curve.sd
p.values <- 2 * (1 - pt(abs(t.val), N.g1 - 1))
} else {
t.num <- (curve.mean1 - curve.mean2)
t.den <- sqrt((N.g1 * (N.g1 - 1) * curve.sd1 ^ 2 + N.g2 * (N.g2 - 1) * curve.sd2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
t.val <- t.num / t.den
p.values <- 2 * (1 - pt(abs(t.val), (N.g1 + N.g2 - 2)))
}
par(mfrow = c(1,1))
# compute t-test at each time point
ticks <- seq(0, max(time.all), round(max(time.all) / 10))
plot(NULL, ,xlim = c(0, max(time.all)), ylim = c(0,1), ylab = 'Proportion of Fixations',
xlab = 'Time', axes = FALSE, main = 'Double-Gauss Curve')
axis(1, at = ticks)
axis(2)
box()
legend('topleft', lty = 1:2, legend = groups)
#Entries in tsmultcomp:
#1 : Estimate of rho
#2 : Overall Type I Error
#3 : Total tests we will perform
rho.est <- ar(t.val, FALSE, order.max = 1)$ar
if(p.adj == "oleson") {
if(paired) {
alphastar <- tsmultcomp(rho.est, alpha, N.tests, df = N.g1 - 1)
} else {
alphastar <- tsmultcomp(rho.est, alpha, N.tests, df = N.g1 + N.g2 - 2)
}
sig <- p.values <= alphastar
} else if(p.adj == "fdr") {
sig <- p.adjust(p.values, "fdr") <= alpha
} else if(p.adj == "none") {
sig <- p.values <= alpha
}
#Make significant area yellow
buck <- bucket(sig, time.all, ylim = c(0, .9))
#Plot overall estimate of curves
lines(time.all, curve.g1, lty = 1, lwd = 2)
lines(time.all, curve.g2, lty = 2, lwd = 2)
#Plot Confidence Interval for Group 1 curve
lines(time.all, curve.g1 - curve.sd1 * qt(alpha / 2, N.g1 - 1), lty = 1, lwd = 1,
col = "gray44")
lines(time.all, curve.g1 + curve.sd1 * qt(alpha / 2, N.g1 - 1), lty = 1, lwd = 1,
col = "gray44")
#Plot Confidence Interval for Group 2 curve
lines(time.all, curve.g2 - curve.sd2 * qt(alpha / 2, N.g2 - 1), lty = 2, lwd = 1,
col = "gray44")
lines(time.all, curve.g2 + curve.sd2 * qt(alpha / 2, N.g2 - 1), lty = 2, lwd = 1,
col = "gray44")
# Record confidence intervals
curve.ci1 <- curve.ci2 <- matrix(NA, nrow = length(time.all), ncol = 4)
curve.ci1[,1] <- curve.ci2[,1] <- time.all
curve.ci1[,2] <- curve.g1 - curve.sd1 * qt(1 - alpha / 2, N.g1 - 1)
curve.ci1[,3] <- curve.g1
curve.ci1[,4] <- curve.g1 + curve.sd1 * qt(1 - alpha / 2, N.g1 - 1)
curve.ci2[,2] <- curve.g2 - curve.sd2 * qt(1 - alpha / 2, N.g2 - 1)
curve.ci2[,3] <- curve.g2
curve.ci2[,4] <- curve.g2 + curve.sd2 * qt(1 - alpha / 2, N.g2 - 1)
colnames(curve.ci1) <- colnames(curve.ci2) <- c("Time", "Lower CI", "Estimate", "Upper CI")
if(!is.null(time.test)) {
time.test <- which(time.all %in% time.test)
cat("######################\n")
cat("## Individual Tests ##\n")
cat("######################\n")
for(i in 1:length(time.test)) {
time <- time.test[i]
mean.1 <- curve.g1[time]
mean.2 <- curve.g2[time]
sd.1 <- curve.sd1[time]
sd.2 <- curve.sd2[time]
time.mean <- mean.1 - mean.2
time.se <- sqrt((N.g1 * (N.g1 - 1) * sd.1 ^ 2 + N.g2 * (N.g2 - 1) * sd.2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
time.df <- N.g1 + N.g2 - 2
time.t <- time.mean / time.se
time.p <- pt(abs(time.t), time.df, lower.tail = FALSE) * 2
pooled.sd <- sqrt((N.g1 * (N.g1 - 1) * sd.1 ^ 2 + N.g2 * (N.g2 - 1) * sd.2 ^ 2) / (N.g1 + N.g2 - 2))
time.d <- time.mean / pooled.sd
cat(paste0("Test # = ", i, " --- Time = ", time.all[time], "\n"))
cat(paste0("Mean Diff = ", round(time.mean, 4), " --- SE = ", round(time.se, 4), "\n"))
if(time.p < .0001) {
cat(paste0("t = ", round(time.t, 2), " --- DF = ", round(time.df, 1), " --- p < 0.0001 \n"))
} else {
cat(paste0("t = ", round(time.t, 2), " --- DF = ", round(time.df, 1), " --- p = ", round(time.p, 4), "\n"))
}
cat(paste0("Pooled SD = ", round(pooled.sd, 4), " --- Cohen's d = ", round(time.d, 1), "\n\n"))
}
}
if(test.params) {
if(paired) {
cat("######################\n")
cat("## Parameter Tests ##\n")
cat("## Paired t-test ##\n")
cat("######################\n")
df <- N.g1 - 1
mu <- mu.1 - mu.2
mu.mean <- mean(mu)
mu.se <- sd(mu)
mu.t <- mu.mean / mu.se
mu.p <- pt(abs(mu.t), df, lower.tail = FALSE) * 2
ht <- ht.1 - ht.2
ht.mean <- mean(ht)
ht.se <- sd(ht)
ht.t <- ht.mean / ht.se
ht.p <- pt(abs(ht.t), df, lower.tail = FALSE) * 2
s1 <- s1.1 - s1.2
s1.mean <- mean(s1)
s1.se <- sd(s1)
s1.t <- s1.mean / s1.se
s1.p <- pt(abs(s1.t), df, lower.tail = FALSE) * 2
s2 <- s2.1 - s2.2
s2.mean <- mean(s2)
s2.se <- sd(s2)
s2.t <- s2.mean / s2.se
s2.p <- pt(abs(s2.t), df, lower.tail = FALSE) * 2
b1 <- b1.1 - b1.2
b1.mean <- mean(b1)
b1.se <- sd(b1)
b1.t <- b1.mean / b1.se
b1.p <- pt(abs(b1.t), df, lower.tail = FALSE) * 2
b2 <- b2.1 - b2.2
b2.mean <- mean(b2)
b2.se <- sd(b2)
b2.t <- b2.mean / b2.se
b2.p <- pt(abs(b2.t), df, lower.tail = FALSE) * 2
cat(paste0("Mu -- Diff: ", round(mu.mean, 4), ", t: ", round(mu.t, 3), ", SE: ", round(mu.se, 3), ", df: ", df, ", p: ", round(mu.p, 4), "\n"))
cat(paste0("Height -- Diff: ", round(ht.mean, 4), ", t: ", round(ht.t, 3), ", SE: ", round(ht.se, 3), ", df: ", df, ", p: ", round(ht.p, 4), "\n"))
cat(paste0("SD 1 -- Diff: ", round(s1.mean, 4), ", t: ", round(s1.t, 3), ", SE: ", round(s1.se, 3), ", df: ", df, ", p: ", round(s1.p, 4), "\n"))
cat(paste0("SD 2 -- Diff: ", round(s2.mean, 4), ", t: ", round(s2.t, 3), ", SE: ", round(s2.se, 3), ", df: ", df, ", p: ", round(s2.p, 4), "\n"))
cat(paste0("Base 1 -- Diff: ", round(b1.mean, 4), ", t: ", round(b1.t, 3), ", SE: ", round(b1.se, 3), ", df: ", df, ", p: ", round(b1.p, 4), "\n"))
cat(paste0("Base 2 -- Diff: ", round(b2.mean, 4), ", t: ", round(b2.t, 3), ", SE: ", round(b2.se, 3), ", df: ", df, ", p: ", round(b2.p, 4), "\n\n"))
} else {
cat("######################\n")
cat("## Parameter Tests ##\n")
cat("## 2 Sample t-test ##\n")
cat("######################\n")
df <- N.g1 + N.g2 - 2
mu.mean <- mean(mu.1) - mean(mu.2)
mu.se1 <- sd(mu.1)
mu.se2 <- sd(mu.2)
mu.se <- sqrt((N.g1 * (N.g1 - 1) * mu.se1 ^ 2 + N.g2 * (N.g2 - 1) * mu.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
mu.t <- mu.mean / mu.se
mu.p <- pt(abs(mu.t), df, lower.tail = FALSE) * 2
ht.mean <- mean(ht.1) - mean(ht.2)
ht.se1 <- sd(ht.1)
ht.se2 <- sd(ht.2)
ht.se <- sqrt((N.g1 * (N.g1 - 1) * ht.se1 ^ 2 + N.g2 * (N.g2 - 1) * ht.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
ht.t <- ht.mean / ht.se
ht.p <- pt(abs(ht.t), df, lower.tail = FALSE) * 2
s1.mean <- mean(s1.1) - mean(s1.2)
s1.se1 <- sd(s1.1)
s1.se2 <- sd(s1.2)
s1.se <- sqrt((N.g1 * (N.g1 - 1) * s1.se1 ^ 2 + N.g2 * (N.g2 - 1) * s1.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
s1.t <- s1.mean / s1.se
s1.p <- pt(abs(s1.t), df, lower.tail = FALSE) * 2
s2.mean <- mean(s2.1) - mean(s2.2)
s2.se1 <- sd(s2.1)
s2.se2 <- sd(s2.2)
s2.se <- sqrt((N.g1 * (N.g1 - 1) * s2.se1 ^ 2 + N.g2 * (N.g2 - 1) * s2.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
s2.t <- s2.mean / s2.se
s2.p <- pt(abs(s2.t), df, lower.tail = FALSE) * 2
b1.mean <- mean(b1.1) - mean(b1.2)
b1.se1 <- sd(b1.1)
b1.se2 <- sd(b1.2)
b1.se <- sqrt((N.g1 * (N.g1 - 1) * b1.se1 ^ 2 + N.g2 * (N.g2 - 1) * b1.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
b1.t <- b1.mean / b1.se
b1.p <- pt(abs(b1.t), df, lower.tail = FALSE) * 2
b2.mean <- mean(b2.1) - mean(b2.2)
b2.se1 <- sd(b2.1)
b2.se2 <- sd(b2.2)
b2.se <- sqrt((N.g1 * (N.g1 - 1) * b2.se1 ^ 2 + N.g2 * (N.g2 - 1) * b2.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
b2.t <- b2.mean / b2.se
b2.p <- pt(abs(b2.t), df, lower.tail = FALSE) * 2
cat(paste0("Mu -- Diff: ", round(mu.mean, 4), ", t: ", round(mu.t, 3), ", SE: ", round(mu.se, 3), ", df: ", df, ", p: ", round(mu.p, 4), "\n"))
cat(paste0("Height -- Diff: ", round(ht.mean, 4), ", t: ", round(ht.t, 3), ", SE: ", round(ht.se, 3), ", df: ", df, ", p: ", round(ht.p, 4), "\n"))
cat(paste0("SD 1 -- Diff: ", round(s1.mean, 4), ", t: ", round(s1.t, 3), ", SE: ", round(s1.se, 3), ", df: ", df, ", p: ", round(s1.p, 4), "\n"))
cat(paste0("SD 2 -- Diff: ", round(s2.mean, 4), ", t: ", round(s2.t, 3), ", SE: ", round(s2.se, 3), ", df: ", df, ", p: ", round(s2.p, 4), "\n"))
cat(paste0("Base 1 -- Diff: ", round(b1.mean, 4), ", t: ", round(b1.t, 3), ", SE: ", round(b1.se, 3), ", df: ", df, ", p: ", round(b1.p, 4), "\n"))
cat(paste0("Base 2 -- Diff: ", round(b2.mean, 4), ", t: ", round(b2.t, 3), ", SE: ", round(b2.se, 3), ", df: ", df, ", p: ", round(b2.p, 4), "\n\n"))
}
}
params <- round(c(alpha = alpha, alpha.adj = alphastar, rho.est = rho.est), 4)
print(list(alpha = params, significant = buck))
invisible(list(alpha = params, significant = buck, time.all = time.all,sig = sig,
curve.ci1 = curve.ci1, curve.ci2 = curve.ci2, curve.g1 = curve.g1, curve.g2 = curve.g2,
curve.sd1 = curve.sd1, curve.sd2 = curve.sd2, N.g1 = N.g1, N.g2 = N.g2,
curve1.mat = curve1.mat, curve2.mat = curve2.mat, groups = groups, seed = seed))
} | /bdots/R/doubleGaussBoot.r | no_license | ingted/R-Examples | R | false | false | 26,242 | r | doubleGauss.boot <- function(part1.list, seed = new.seed(), alpha = 0.05, paired = FALSE, N.iter = 1000, cores = 1, p.adj = "oleson", test.spots = NULL, time.test = NULL, test.params = FALSE) {
data <- part1.list$data
col <- part1.list$col
rho.0 <- part1.list$rho.0
N.time <- part1.list$N.time
coef.id1 <- part1.list$coef.id1
coef.id2 <- part1.list$coef.id2
coef.id3 <- part1.list$coef.id3
coef.id4 <- part1.list$coef.id4
sdev.id1 <- part1.list$sdev.id1
sdev.id2 <- part1.list$sdev.id2
sdev.id3 <- part1.list$sdev.id3
sdev.id4 <- part1.list$sdev.id4
sigma.id1 <- part1.list$sigma.id1
sigma.id2 <- part1.list$sigma.id2
sigma.id3 <- part1.list$sigma.id3
sigma.id4 <- part1.list$sigma.id4
id.nums.g1 <- part1.list$id.nums.g1
id.nums.g2 <- part1.list$id.nums.g2
groups <- part1.list$groups
time.all <- part1.list$time.all
N.g1 <- part1.list$N.g1
N.g2 <- part1.list$N.g2
diffs <- part1.list$diffs
if(!is.null(test.spots)) time.all <- test.spots
N.tests <- length(time.all)
N.time <- length(time.all)
group1.bad <- is.na(coef.id1[,1]) | is.na(coef.id3[,1])
group2.bad <- is.na(coef.id2[,1]) | is.na(coef.id4[,1])
coef.id1 <- subset(coef.id1, !group1.bad)
coef.id3 <- subset(coef.id3, !group1.bad)
coef.id2 <- subset(coef.id2, !group2.bad)
coef.id4 <- subset(coef.id4, !group2.bad)
sdev.id1 <- subset(sdev.id1, !group1.bad)
sdev.id3 <- subset(sdev.id3, !group1.bad)
sdev.id2 <- subset(sdev.id2, !group2.bad)
sdev.id4 <- subset(sdev.id4, !group2.bad)
sigma.id1 <- subset(sigma.id1, !group1.bad)
sigma.id3 <- subset(sigma.id3, !group1.bad)
sigma.id2 <- subset(sigma.id2, !group2.bad)
sigma.id4 <- subset(sigma.id4, !group2.bad)
id.nums.g1 <- id.nums.g1[!group1.bad]
id.nums.g2 <- id.nums.g2[!group2.bad]
N.g1 <- N.g1 - sum(group1.bad)
N.g2 <- N.g2 - sum(group2.bad)
curve1.0 <- matrix(NA, ncol = N.time, nrow = N.g1)
mu1.ran <- rep(NA, N.g1)
ht1.ran <- rep(NA, N.g1)
s11.ran <- rep(NA, N.g1)
s21.ran <- rep(NA, N.g1)
b11.ran <- rep(NA, N.g1)
b21.ran <- rep(NA, N.g1)
curve2.0 <- matrix(NA, ncol = N.time, nrow = N.g2)
mu2.ran <- rep(NA, N.g2)
ht2.ran <- rep(NA, N.g2)
s12.ran <- rep(NA, N.g2)
s22.ran <- rep(NA, N.g2)
b12.ran <- rep(NA, N.g2)
b22.ran <- rep(NA, N.g2)
curve1.mat <- matrix(NA, ncol = N.time, nrow = N.iter)
curve2.mat <- matrix(NA, ncol = N.time, nrow = N.iter)
curve3.mat <- matrix(NA, ncol = N.time, nrow = N.iter)
#Target Curve
curve.f <- function(mu, ht, sig1, sig2, base1, base2, x){
whichgauss <- x < mu
y1 <- exp(-1 * (x - mu) ^ 2 / (2 * sig1 ^ 2)) * (ht - base1) + base1
y2 <- exp(-1 * (x - mu) ^ 2 / (2 * sig2 ^ 2)) * (ht - base2) + base2
y <- whichgauss * y1 + (1 - whichgauss) * y2
y
}
mu.1 <- ht.1 <- s1.1 <- s2.1 <- b1.1 <- b2.1 <-
mu.2 <- ht.2 <- s1.2 <- s2.2 <- b1.2 <- b2.2 <- numeric(N.iter)
##################
##### 1 Core #####
##################
if(paired) {
mu.cov.1 <- cov(coef.id1[,1], coef.id2[,1], use = "pairwise.complete.obs")
ht.cov.1 <- cov(coef.id1[,2], coef.id2[,2], use = "pairwise.complete.obs")
s1.cov.1 <- cov(coef.id1[,3], coef.id2[,3], use = "pairwise.complete.obs")
s2.cov.1 <- cov(coef.id1[,4], coef.id2[,4], use = "pairwise.complete.obs")
b1.cov.1 <- cov(coef.id1[,5], coef.id2[,5], use = "pairwise.complete.obs")
b2.cov.1 <- cov(coef.id1[,6], coef.id2[,6], use = "pairwise.complete.obs")
if(diffs) {
mu.cov.2 <- cov(coef.id3[,1], coef.id4[,1], use = "pairwise.complete.obs")
ht.cov.2 <- cov(coef.id3[,2], coef.id4[,2], use = "pairwise.complete.obs")
s1.cov.2 <- cov(coef.id3[,3], coef.id4[,3], use = "pairwise.complete.obs")
s2.cov.2 <- cov(coef.id3[,4], coef.id4[,4], use = "pairwise.complete.obs")
b1.cov.2 <- cov(coef.id3[,5], coef.id4[,5], use = "pairwise.complete.obs")
b2.cov.2 <- cov(coef.id3[,6], coef.id4[,6], use = "pairwise.complete.obs")
}
}
if(cores == 1) {
set.seed(seed)
for(iter in 1:N.iter){
if(paired) {
for(i in 1:N.g1) {
mu <- rmvnorm(1, mean = c(coef.id1[i,1], coef.id2[i,1]),
sigma = matrix(c(sdev.id1[i,1] ^ 2, mu.cov.1, mu.cov.1, sdev.id2[i,1] ^ 2), nrow = 2))
ht <- rmvnorm(1, mean = c(coef.id1[i,2], coef.id2[i,2]),
sigma = matrix(c(sdev.id1[i,2] ^ 2, ht.cov.1, ht.cov.1, sdev.id2[i,2] ^ 2), nrow = 2))
s1 <- rmvnorm(1, mean = c(coef.id1[i,3], coef.id2[i,3]),
sigma = matrix(c(sdev.id1[i,3] ^ 2, s1.cov.1, s1.cov.1, sdev.id2[i,3] ^ 2), nrow = 2))
s2 <- rmvnorm(1, mean = c(coef.id1[i,4], coef.id2[i,4]),
sigma = matrix(c(sdev.id1[i,4] ^ 2, s2.cov.1, s2.cov.1, sdev.id2[i,4] ^ 2), nrow = 2))
b1 <- rmvnorm(1, mean = c(coef.id1[i,5], coef.id2[i,5]),
sigma = matrix(c(sdev.id1[i,5] ^ 2, b1.cov.1, b1.cov.1, sdev.id2[i,5] ^ 2), nrow = 2))
b2 <- rmvnorm(1, mean = c(coef.id1[i,6], coef.id2[i,6]),
sigma = matrix(c(sdev.id1[i,6] ^ 2, b2.cov.1, b2.cov.1, sdev.id2[i,6] ^ 2), nrow = 2))
mu1.ran <- mu[1]; mu2.ran <- mu[2]
ht1.ran <- ht[1]; ht2.ran <- ht[2]
s11.ran <- s1[1]; s12.ran <- s1[2]
s21.ran <- s2[1]; s22.ran <- s2[2]
b11.ran <- b1[1]; b12.ran <- b1[2]
b21.ran <- b2[1]; b22.ran <- b2[2]
}
} else {
mu1.ran <- rnorm(N.g1, coef.id1[,1], sdev.id1[,1])
ht1.ran <- rnorm(N.g1, coef.id1[,2], sdev.id1[,2])
s11.ran <- rnorm(N.g1, coef.id1[,3], sdev.id1[,3])
s21.ran <- rnorm(N.g1, coef.id1[,4], sdev.id1[,4])
b11.ran <- rnorm(N.g1, coef.id1[,5], sdev.id1[,5])
b21.ran <- rnorm(N.g1, coef.id1[,6], sdev.id1[,6])
mu2.ran <- rnorm(N.g2, coef.id2[,1], sdev.id2[,1])
ht2.ran <- rnorm(N.g2, coef.id2[,2], sdev.id2[,2])
s12.ran <- rnorm(N.g2, coef.id2[,3], sdev.id2[,3])
s22.ran <- rnorm(N.g2, coef.id2[,4], sdev.id2[,4])
b12.ran <- rnorm(N.g2, coef.id2[,5], sdev.id2[,5])
b22.ran <- rnorm(N.g2, coef.id2[,6], sdev.id2[,6])
}
mu.1[iter] <- mean(mu1.ran); mu.2[iter] <- mean(mu2.ran)
ht.1[iter] <- mean(ht1.ran); ht.2[iter] <- mean(ht1.ran)
s1.1[iter] <- mean(s11.ran); s1.2[iter] <- mean(s12.ran)
s2.1[iter] <- mean(s21.ran); s2.2[iter] <- mean(s22.ran)
b1.1[iter] <- mean(b11.ran); b1.2[iter] <- mean(b12.ran)
b2.1[iter] <- mean(b21.ran); b2.2[iter] <- mean(b22.ran)
if(diffs) {
if(paired) {
for(i in 1:N.g1) {
mu <- rmvnorm(1, mean = c(coef.id3[i,1], coef.id4[i,1]),
sigma = matrix(c(sdev.id1[i,1] ^ 2, mu.cov.2, mu.cov.2, sdev.id2[i,1] ^ 2), nrow = 2))
ht <- rmvnorm(1, mean = c(coef.id3[i,2], coef.id4[i,2]),
sigma = matrix(c(sdev.id1[i,2] ^ 2, ht.cov.2, ht.cov.2, sdev.id2[i,2] ^ 2), nrow = 2))
s1 <- rmvnorm(1, mean = c(coef.id3[i,3], coef.id4[i,3]),
sigma = matrix(c(sdev.id1[i,3] ^ 2, s1.cov.2, s1.cov.2, sdev.id2[i,3] ^ 2), nrow = 2))
s2 <- rmvnorm(1, mean = c(coef.id3[i,4], coef.id4[i,4]),
sigma = matrix(c(sdev.id1[i,4] ^ 2, s2.cov.2, s2.cov.2, sdev.id2[i,4] ^ 2), nrow = 2))
b1 <- rmvnorm(1, mean = c(coef.id3[i,5], coef.id4[i,5]),
sigma = matrix(c(sdev.id1[i,5] ^ 2, b1.cov.2, b1.cov.2, sdev.id2[i,5] ^ 2), nrow = 2))
b2 <- rmvnorm(1, mean = c(coef.id3[i,6], coef.id4[i,6]),
sigma = matrix(c(sdev.id1[i,6] ^ 2, b2.cov.2, b2.cov.2, sdev.id2[i,6] ^ 2), nrow = 2))
mu3.ran <- mu[1]; mu4.ran <- mu[2]
ht3.ran <- ht[1]; ht4.ran <- ht[2]
s13.ran <- s1[1]; s14.ran <- s1[2]
s23.ran <- s2[1]; s24.ran <- s2[2]
b13.ran <- b1[1]; b14.ran <- b1[2]
b23.ran <- b2[1]; b24.ran <- b2[2]
}
} else {
mu3.ran <- rnorm(N.g1, coef.id3[,1], sdev.id3[,1])
ht3.ran <- rnorm(N.g1, coef.id3[,2], sdev.id3[,2])
s13.ran <- rnorm(N.g1, coef.id3[,3], sdev.id3[,3])
s23.ran <- rnorm(N.g1, coef.id3[,4], sdev.id3[,4])
b13.ran <- rnorm(N.g1, coef.id3[,5], sdev.id3[,5])
b23.ran <- rnorm(N.g1, coef.id3[,6], sdev.id3[,6])
mu4.ran <- rnorm(N.g2, coef.id4[,1], sdev.id4[,1])
ht4.ran <- rnorm(N.g2, coef.id4[,2], sdev.id4[,2])
s14.ran <- rnorm(N.g2, coef.id4[,3], sdev.id4[,3])
s24.ran <- rnorm(N.g2, coef.id4[,4], sdev.id4[,4])
b14.ran <- rnorm(N.g2, coef.id4[,5], sdev.id4[,5])
b24.ran <- rnorm(N.g2, coef.id4[,6], sdev.id4[,6])
}
}
if(diffs) {
for(id in 1:N.g1) { #Get fixation level for each group 1 subject
curve1.0[id,] <- curve.f(mu1.ran[id], ht1.ran[id], s11.ran[id], s21.ran[id],
b11.ran[id], b21.ran[id], time.all) -
curve.f(mu3.ran[id], ht3.ran[id], s13.ran[id], s23.ran[id],
b13.ran[id], b23.ran[id], time.all)
}
for(id in 1:N.g2) { #Get fixation level for each group 2 subject
curve2.0[id,] <- curve.f(mu2.ran[id], ht2.ran[id], s12.ran[id], s22.ran[id],
b12.ran[id], b22.ran[id], time.all) -
curve.f(mu4.ran[id], ht4.ran[id], s14.ran[id], s24.ran[id],
b14.ran[id], b24.ran[id], time.all)
}
} else {
for(id in 1:N.g1) { #Get fixation level for each group 1 subject
curve1.0[id,] <- curve.f(mu1.ran[id], ht1.ran[id], s11.ran[id], s21.ran[id],
b11.ran[id], b21.ran[id], time.all)
}
for(id in 1:N.g2) { #Get fixation level for each group 2 subject
curve2.0[id,] <- curve.f(mu2.ran[id], ht2.ran[id], s12.ran[id], s22.ran[id],
b12.ran[id], b22.ran[id], time.all)
}
}
curve1.mat[iter,] <- apply(curve1.0, 2, mean) #Mean fixations at each time point for group 1
curve2.mat[iter,] <- apply(curve2.0, 2, mean) #Mean fixations at each time point for group 2
if(paired) curve3.mat[iter,] <- apply(curve2.0 - curve1.0, 2, mean)
}
} else {
####################
##### 2+ Cores #####
####################
cl <- makePSOCKcluster(cores)
registerDoParallel(cl)
for.out <- foreach(iter = 1:N.iter, .combine = rbind, .options.RNG = seed) %dorng% {
if(paired) {
for(i in 1:N.g1) {
mu <- rmvnorm(1, mean = c(coef.id1[i,1], coef.id2[i,1]),
sigma = matrix(c(sdev.id1[i,1] ^ 2, mu.cov.1, mu.cov.1, sdev.id2[i,1] ^ 2), nrow = 2))
ht <- rmvnorm(1, mean = c(coef.id1[i,2], coef.id2[i,2]),
sigma = matrix(c(sdev.id1[i,2] ^ 2, ht.cov.1, ht.cov.1, sdev.id2[i,2] ^ 2), nrow = 2))
s1 <- rmvnorm(1, mean = c(coef.id1[i,3], coef.id2[i,3]),
sigma = matrix(c(sdev.id1[i,3] ^ 2, s1.cov.1, s1.cov.1, sdev.id2[i,3] ^ 2), nrow = 2))
s2 <- rmvnorm(1, mean = c(coef.id1[i,4], coef.id2[i,4]),
sigma = matrix(c(sdev.id1[i,4] ^ 2, s2.cov.1, s2.cov.1, sdev.id2[i,4] ^ 2), nrow = 2))
b1 <- rmvnorm(1, mean = c(coef.id1[i,5], coef.id2[i,5]),
sigma = matrix(c(sdev.id1[i,5] ^ 2, b1.cov.1, b1.cov.1, sdev.id2[i,5] ^ 2), nrow = 2))
b2 <- rmvnorm(1, mean = c(coef.id1[i,6], coef.id2[i,6]),
sigma = matrix(c(sdev.id1[i,6] ^ 2, b2.cov.1, b2.cov.1, sdev.id2[i,6] ^ 2), nrow = 2))
mu1.ran <- mu[1]; mu2.ran <- mu[2]
ht1.ran <- ht[1]; ht2.ran <- ht[2]
s11.ran <- s1[1]; s12.ran <- s1[2]
s21.ran <- s2[1]; s22.ran <- s2[2]
b11.ran <- b1[1]; b12.ran <- b1[2]
b21.ran <- b2[1]; b22.ran <- b2[2]
}
} else {
mu1.ran <- rnorm(N.g1, coef.id1[,1], sdev.id1[,1])
ht1.ran <- rnorm(N.g1, coef.id1[,2], sdev.id1[,2])
s11.ran <- rnorm(N.g1, coef.id1[,3], sdev.id1[,3])
s21.ran <- rnorm(N.g1, coef.id1[,4], sdev.id1[,4])
b11.ran <- rnorm(N.g1, coef.id1[,5], sdev.id1[,5])
b21.ran <- rnorm(N.g1, coef.id1[,6], sdev.id1[,6])
mu2.ran <- rnorm(N.g2, coef.id2[,1], sdev.id2[,1])
ht2.ran <- rnorm(N.g2, coef.id2[,2], sdev.id2[,2])
s12.ran <- rnorm(N.g2, coef.id2[,3], sdev.id2[,3])
s22.ran <- rnorm(N.g2, coef.id2[,4], sdev.id2[,4])
b12.ran <- rnorm(N.g2, coef.id2[,5], sdev.id2[,5])
b22.ran <- rnorm(N.g2, coef.id2[,6], sdev.id2[,6])
}
mu.temp.1 <- mean(mu1.ran); mu.temp.2 <- mean(mu2.ran)
ht.temp.1 <- mean(ht1.ran); ht.temp.2 <- mean(ht1.ran)
s1.temp.1 <- mean(s11.ran); s1.temp.2 <- mean(s12.ran)
s2.temp.1 <- mean(s21.ran); s2.temp.2 <- mean(s22.ran)
b1.temp.1 <- mean(b11.ran); b1.temp.2 <- mean(b12.ran)
b2.temp.1 <- mean(b21.ran); b2.temp.2 <- mean(b22.ran)
if(diffs) {
if(paired) {
for(i in 1:N.g1) {
mu <- rmvnorm(1, mean = c(coef.id3[i,1], coef.id4[i,1]),
sigma = matrix(c(sdev.id1[i,1] ^ 2, mu.cov.2, mu.cov.2, sdev.id2[i,1] ^ 2), nrow = 2))
ht <- rmvnorm(1, mean = c(coef.id3[i,2], coef.id4[i,2]),
sigma = matrix(c(sdev.id1[i,2] ^ 2, ht.cov.2, ht.cov.2, sdev.id2[i,2] ^ 2), nrow = 2))
s1 <- rmvnorm(1, mean = c(coef.id3[i,3], coef.id4[i,3]),
sigma = matrix(c(sdev.id1[i,3] ^ 2, s1.cov.2, s1.cov.2, sdev.id2[i,3] ^ 2), nrow = 2))
s2 <- rmvnorm(1, mean = c(coef.id3[i,4], coef.id4[i,4]),
sigma = matrix(c(sdev.id1[i,4] ^ 2, s2.cov.2, s2.cov.2, sdev.id2[i,4] ^ 2), nrow = 2))
b1 <- rmvnorm(1, mean = c(coef.id3[i,5], coef.id4[i,5]),
sigma = matrix(c(sdev.id1[i,5] ^ 2, b1.cov.2, b1.cov.2, sdev.id2[i,5] ^ 2), nrow = 2))
b2 <- rmvnorm(1, mean = c(coef.id3[i,6], coef.id4[i,6]),
sigma = matrix(c(sdev.id1[i,6] ^ 2, b2.cov.2, b2.cov.2, sdev.id2[i,6] ^ 2), nrow = 2))
mu3.ran <- mu[1]; mu4.ran <- mu[2]
ht3.ran <- ht[1]; ht4.ran <- ht[2]
s13.ran <- s1[1]; s14.ran <- s1[2]
s23.ran <- s2[1]; s24.ran <- s2[2]
b13.ran <- b1[1]; b14.ran <- b1[2]
b23.ran <- b2[1]; b24.ran <- b2[2]
}
} else {
mu3.ran <- rnorm(N.g1, coef.id3[,1], sdev.id3[,1])
ht3.ran <- rnorm(N.g1, coef.id3[,2], sdev.id3[,2])
s13.ran <- rnorm(N.g1, coef.id3[,3], sdev.id3[,3])
s23.ran <- rnorm(N.g1, coef.id3[,4], sdev.id3[,4])
b13.ran <- rnorm(N.g1, coef.id3[,5], sdev.id3[,5])
b23.ran <- rnorm(N.g1, coef.id3[,6], sdev.id3[,6])
mu4.ran <- rnorm(N.g2, coef.id4[,1], sdev.id4[,1])
ht4.ran <- rnorm(N.g2, coef.id4[,2], sdev.id4[,2])
s14.ran <- rnorm(N.g2, coef.id4[,3], sdev.id4[,3])
s24.ran <- rnorm(N.g2, coef.id4[,4], sdev.id4[,4])
b14.ran <- rnorm(N.g2, coef.id4[,5], sdev.id4[,5])
b24.ran <- rnorm(N.g2, coef.id4[,6], sdev.id4[,6])
}
}
if(diffs) {
for(id in 1:N.g1) { #Get fixation level for each group 1 subject
curve1.0[id,] <- curve.f(mu1.ran[id], ht1.ran[id], s11.ran[id], s21.ran[id],
b11.ran[id], b21.ran[id], time.all) -
curve.f(mu3.ran[id], ht3.ran[id], s13.ran[id], s23.ran[id],
b13.ran[id], b23.ran[id], time.all)
}
for(id in 1:N.g2) { #Get fixation level for each group 2 subject
curve2.0[id,] <- curve.f(mu2.ran[id], ht2.ran[id], s12.ran[id], s22.ran[id],
b12.ran[id], b22.ran[id], time.all) -
curve.f(mu4.ran[id], ht4.ran[id], s14.ran[id], s24.ran[id],
b14.ran[id], b24.ran[id], time.all)
}
} else {
for(id in 1:N.g1) { #Get fixation level for each group 1 subject
curve1.0[id,] <- curve.f(mu1.ran[id], ht1.ran[id], s11.ran[id], s21.ran[id],
b11.ran[id], b21.ran[id], time.all)
}
for(id in 1:N.g2) { #Get fixation level for each group 2 subject
curve2.0[id,] <- curve.f(mu2.ran[id], ht2.ran[id], s12.ran[id], s22.ran[id],
b12.ran[id], b22.ran[id], time.all)
}
}
curve1 <- apply(curve1.0, 2, mean) #Mean fixations at each time point for CIs
curve2 <- apply(curve2.0, 2, mean) #Mean fixations at each time point for NHs
curve3 <- curve2 - curve1
c(curve1, curve2, curve3, mu.temp.1, ht.temp.1, s1.temp.1, s2.temp.1, b1.temp.1, b2.temp.1,
mu.temp.2, ht.temp.2, s1.temp.2, s2.temp.2, b1.temp.2, b2.temp.2)
}
curve1.mat <- for.out[,1:N.time]
curve2.mat <- for.out[,(N.time + 1):(2 * N.time)]
curve3.mat <- for.out[,(2 * N.time + 1):(3 * N.time)]
mu.1 <- for.out[, 3 * N.time + 1]
ht.1 <- for.out[, 3 * N.time + 2]
s1.1 <- for.out[, 3 * N.time + 3]
s2.1 <- for.out[, 3 * N.time + 4]
b1.1 <- for.out[, 3 * N.time + 5]
b2.1 <- for.out[, 3 * N.time + 6]
mu.2 <- for.out[, 3 * N.time + 7]
ht.2 <- for.out[, 3 * N.time + 8]
s1.2 <- for.out[, 3 * N.time + 9]
s2.2 <- for.out[, 3 * N.time + 10]
b1.2 <- for.out[, 3 * N.time + 11]
b2.2 <- for.out[, 3 * N.time + 12]
stopCluster(cl)
}
curve.mean1 <- apply(curve1.mat, 2, mean)
curve.mean2 <- apply(curve2.mat, 2, mean)
curve.g1 <- curve.mean1
curve.g2 <- curve.mean2
curve.sd1 <- apply(curve1.mat, 2, sd)
curve.sd2 <- apply(curve2.mat, 2, sd)
if(paired) {
diff.mean <- apply(curve3.mat, 2, mean)
curve.sd <- apply(curve3.mat, 2, sd)
t.val <- diff.mean / curve.sd
p.values <- 2 * (1 - pt(abs(t.val), N.g1 - 1))
} else {
t.num <- (curve.mean1 - curve.mean2)
t.den <- sqrt((N.g1 * (N.g1 - 1) * curve.sd1 ^ 2 + N.g2 * (N.g2 - 1) * curve.sd2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
t.val <- t.num / t.den
p.values <- 2 * (1 - pt(abs(t.val), (N.g1 + N.g2 - 2)))
}
par(mfrow = c(1,1))
# compute t-test at each time point
ticks <- seq(0, max(time.all), round(max(time.all) / 10))
plot(NULL, ,xlim = c(0, max(time.all)), ylim = c(0,1), ylab = 'Proportion of Fixations',
xlab = 'Time', axes = FALSE, main = 'Double-Gauss Curve')
axis(1, at = ticks)
axis(2)
box()
legend('topleft', lty = 1:2, legend = groups)
#Entries in tsmultcomp:
#1 : Estimate of rho
#2 : Overall Type I Error
#3 : Total tests we will perform
rho.est <- ar(t.val, FALSE, order.max = 1)$ar
if(p.adj == "oleson") {
if(paired) {
alphastar <- tsmultcomp(rho.est, alpha, N.tests, df = N.g1 - 1)
} else {
alphastar <- tsmultcomp(rho.est, alpha, N.tests, df = N.g1 + N.g2 - 2)
}
sig <- p.values <= alphastar
} else if(p.adj == "fdr") {
sig <- p.adjust(p.values, "fdr") <= alpha
} else if(p.adj == "none") {
sig <- p.values <= alpha
}
#Make significant area yellow
buck <- bucket(sig, time.all, ylim = c(0, .9))
#Plot overall estimate of curves
lines(time.all, curve.g1, lty = 1, lwd = 2)
lines(time.all, curve.g2, lty = 2, lwd = 2)
#Plot Confidence Interval for Group 1 curve
lines(time.all, curve.g1 - curve.sd1 * qt(alpha / 2, N.g1 - 1), lty = 1, lwd = 1,
col = "gray44")
lines(time.all, curve.g1 + curve.sd1 * qt(alpha / 2, N.g1 - 1), lty = 1, lwd = 1,
col = "gray44")
#Plot Confidence Interval for Group 2 curve
lines(time.all, curve.g2 - curve.sd2 * qt(alpha / 2, N.g2 - 1), lty = 2, lwd = 1,
col = "gray44")
lines(time.all, curve.g2 + curve.sd2 * qt(alpha / 2, N.g2 - 1), lty = 2, lwd = 1,
col = "gray44")
# Record confidence intervals
curve.ci1 <- curve.ci2 <- matrix(NA, nrow = length(time.all), ncol = 4)
curve.ci1[,1] <- curve.ci2[,1] <- time.all
curve.ci1[,2] <- curve.g1 - curve.sd1 * qt(1 - alpha / 2, N.g1 - 1)
curve.ci1[,3] <- curve.g1
curve.ci1[,4] <- curve.g1 + curve.sd1 * qt(1 - alpha / 2, N.g1 - 1)
curve.ci2[,2] <- curve.g2 - curve.sd2 * qt(1 - alpha / 2, N.g2 - 1)
curve.ci2[,3] <- curve.g2
curve.ci2[,4] <- curve.g2 + curve.sd2 * qt(1 - alpha / 2, N.g2 - 1)
colnames(curve.ci1) <- colnames(curve.ci2) <- c("Time", "Lower CI", "Estimate", "Upper CI")
if(!is.null(time.test)) {
time.test <- which(time.all %in% time.test)
cat("######################\n")
cat("## Individual Tests ##\n")
cat("######################\n")
for(i in 1:length(time.test)) {
time <- time.test[i]
mean.1 <- curve.g1[time]
mean.2 <- curve.g2[time]
sd.1 <- curve.sd1[time]
sd.2 <- curve.sd2[time]
time.mean <- mean.1 - mean.2
time.se <- sqrt((N.g1 * (N.g1 - 1) * sd.1 ^ 2 + N.g2 * (N.g2 - 1) * sd.2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
time.df <- N.g1 + N.g2 - 2
time.t <- time.mean / time.se
time.p <- pt(abs(time.t), time.df, lower.tail = FALSE) * 2
pooled.sd <- sqrt((N.g1 * (N.g1 - 1) * sd.1 ^ 2 + N.g2 * (N.g2 - 1) * sd.2 ^ 2) / (N.g1 + N.g2 - 2))
time.d <- time.mean / pooled.sd
cat(paste0("Test # = ", i, " --- Time = ", time.all[time], "\n"))
cat(paste0("Mean Diff = ", round(time.mean, 4), " --- SE = ", round(time.se, 4), "\n"))
if(time.p < .0001) {
cat(paste0("t = ", round(time.t, 2), " --- DF = ", round(time.df, 1), " --- p < 0.0001 \n"))
} else {
cat(paste0("t = ", round(time.t, 2), " --- DF = ", round(time.df, 1), " --- p = ", round(time.p, 4), "\n"))
}
cat(paste0("Pooled SD = ", round(pooled.sd, 4), " --- Cohen's d = ", round(time.d, 1), "\n\n"))
}
}
if(test.params) {
if(paired) {
cat("######################\n")
cat("## Parameter Tests ##\n")
cat("## Paired t-test ##\n")
cat("######################\n")
df <- N.g1 - 1
mu <- mu.1 - mu.2
mu.mean <- mean(mu)
mu.se <- sd(mu)
mu.t <- mu.mean / mu.se
mu.p <- pt(abs(mu.t), df, lower.tail = FALSE) * 2
ht <- ht.1 - ht.2
ht.mean <- mean(ht)
ht.se <- sd(ht)
ht.t <- ht.mean / ht.se
ht.p <- pt(abs(ht.t), df, lower.tail = FALSE) * 2
s1 <- s1.1 - s1.2
s1.mean <- mean(s1)
s1.se <- sd(s1)
s1.t <- s1.mean / s1.se
s1.p <- pt(abs(s1.t), df, lower.tail = FALSE) * 2
s2 <- s2.1 - s2.2
s2.mean <- mean(s2)
s2.se <- sd(s2)
s2.t <- s2.mean / s2.se
s2.p <- pt(abs(s2.t), df, lower.tail = FALSE) * 2
b1 <- b1.1 - b1.2
b1.mean <- mean(b1)
b1.se <- sd(b1)
b1.t <- b1.mean / b1.se
b1.p <- pt(abs(b1.t), df, lower.tail = FALSE) * 2
b2 <- b2.1 - b2.2
b2.mean <- mean(b2)
b2.se <- sd(b2)
b2.t <- b2.mean / b2.se
b2.p <- pt(abs(b2.t), df, lower.tail = FALSE) * 2
cat(paste0("Mu -- Diff: ", round(mu.mean, 4), ", t: ", round(mu.t, 3), ", SE: ", round(mu.se, 3), ", df: ", df, ", p: ", round(mu.p, 4), "\n"))
cat(paste0("Height -- Diff: ", round(ht.mean, 4), ", t: ", round(ht.t, 3), ", SE: ", round(ht.se, 3), ", df: ", df, ", p: ", round(ht.p, 4), "\n"))
cat(paste0("SD 1 -- Diff: ", round(s1.mean, 4), ", t: ", round(s1.t, 3), ", SE: ", round(s1.se, 3), ", df: ", df, ", p: ", round(s1.p, 4), "\n"))
cat(paste0("SD 2 -- Diff: ", round(s2.mean, 4), ", t: ", round(s2.t, 3), ", SE: ", round(s2.se, 3), ", df: ", df, ", p: ", round(s2.p, 4), "\n"))
cat(paste0("Base 1 -- Diff: ", round(b1.mean, 4), ", t: ", round(b1.t, 3), ", SE: ", round(b1.se, 3), ", df: ", df, ", p: ", round(b1.p, 4), "\n"))
cat(paste0("Base 2 -- Diff: ", round(b2.mean, 4), ", t: ", round(b2.t, 3), ", SE: ", round(b2.se, 3), ", df: ", df, ", p: ", round(b2.p, 4), "\n\n"))
} else {
cat("######################\n")
cat("## Parameter Tests ##\n")
cat("## 2 Sample t-test ##\n")
cat("######################\n")
df <- N.g1 + N.g2 - 2
mu.mean <- mean(mu.1) - mean(mu.2)
mu.se1 <- sd(mu.1)
mu.se2 <- sd(mu.2)
mu.se <- sqrt((N.g1 * (N.g1 - 1) * mu.se1 ^ 2 + N.g2 * (N.g2 - 1) * mu.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
mu.t <- mu.mean / mu.se
mu.p <- pt(abs(mu.t), df, lower.tail = FALSE) * 2
ht.mean <- mean(ht.1) - mean(ht.2)
ht.se1 <- sd(ht.1)
ht.se2 <- sd(ht.2)
ht.se <- sqrt((N.g1 * (N.g1 - 1) * ht.se1 ^ 2 + N.g2 * (N.g2 - 1) * ht.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
ht.t <- ht.mean / ht.se
ht.p <- pt(abs(ht.t), df, lower.tail = FALSE) * 2
s1.mean <- mean(s1.1) - mean(s1.2)
s1.se1 <- sd(s1.1)
s1.se2 <- sd(s1.2)
s1.se <- sqrt((N.g1 * (N.g1 - 1) * s1.se1 ^ 2 + N.g2 * (N.g2 - 1) * s1.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
s1.t <- s1.mean / s1.se
s1.p <- pt(abs(s1.t), df, lower.tail = FALSE) * 2
s2.mean <- mean(s2.1) - mean(s2.2)
s2.se1 <- sd(s2.1)
s2.se2 <- sd(s2.2)
s2.se <- sqrt((N.g1 * (N.g1 - 1) * s2.se1 ^ 2 + N.g2 * (N.g2 - 1) * s2.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
s2.t <- s2.mean / s2.se
s2.p <- pt(abs(s2.t), df, lower.tail = FALSE) * 2
b1.mean <- mean(b1.1) - mean(b1.2)
b1.se1 <- sd(b1.1)
b1.se2 <- sd(b1.2)
b1.se <- sqrt((N.g1 * (N.g1 - 1) * b1.se1 ^ 2 + N.g2 * (N.g2 - 1) * b1.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
b1.t <- b1.mean / b1.se
b1.p <- pt(abs(b1.t), df, lower.tail = FALSE) * 2
b2.mean <- mean(b2.1) - mean(b2.2)
b2.se1 <- sd(b2.1)
b2.se2 <- sd(b2.2)
b2.se <- sqrt((N.g1 * (N.g1 - 1) * b2.se1 ^ 2 + N.g2 * (N.g2 - 1) * b2.se2 ^ 2) / (N.g1 + N.g2 - 2) * (1 / N.g1 + 1 / N.g2))
b2.t <- b2.mean / b2.se
b2.p <- pt(abs(b2.t), df, lower.tail = FALSE) * 2
cat(paste0("Mu -- Diff: ", round(mu.mean, 4), ", t: ", round(mu.t, 3), ", SE: ", round(mu.se, 3), ", df: ", df, ", p: ", round(mu.p, 4), "\n"))
cat(paste0("Height -- Diff: ", round(ht.mean, 4), ", t: ", round(ht.t, 3), ", SE: ", round(ht.se, 3), ", df: ", df, ", p: ", round(ht.p, 4), "\n"))
cat(paste0("SD 1 -- Diff: ", round(s1.mean, 4), ", t: ", round(s1.t, 3), ", SE: ", round(s1.se, 3), ", df: ", df, ", p: ", round(s1.p, 4), "\n"))
cat(paste0("SD 2 -- Diff: ", round(s2.mean, 4), ", t: ", round(s2.t, 3), ", SE: ", round(s2.se, 3), ", df: ", df, ", p: ", round(s2.p, 4), "\n"))
cat(paste0("Base 1 -- Diff: ", round(b1.mean, 4), ", t: ", round(b1.t, 3), ", SE: ", round(b1.se, 3), ", df: ", df, ", p: ", round(b1.p, 4), "\n"))
cat(paste0("Base 2 -- Diff: ", round(b2.mean, 4), ", t: ", round(b2.t, 3), ", SE: ", round(b2.se, 3), ", df: ", df, ", p: ", round(b2.p, 4), "\n\n"))
}
}
params <- round(c(alpha = alpha, alpha.adj = alphastar, rho.est = rho.est), 4)
print(list(alpha = params, significant = buck))
invisible(list(alpha = params, significant = buck, time.all = time.all,sig = sig,
curve.ci1 = curve.ci1, curve.ci2 = curve.ci2, curve.g1 = curve.g1, curve.g2 = curve.g2,
curve.sd1 = curve.sd1, curve.sd2 = curve.sd2, N.g1 = N.g1, N.g2 = N.g2,
curve1.mat = curve1.mat, curve2.mat = curve2.mat, groups = groups, seed = seed))
} |
context("Functionality that probably belongs in the SummarizedExperiment
package")
# NOTE: This also coverage RangedSummarizedExperiment objects, since the method
# is defined for these via inheritance to SummarizedExperiment.
test_that("combine,SummarizedExperiment,SummarizedExperiment-method is compatible with generic", {
generic <- getGeneric("combine")
method <- getMethod("combine",
c("SummarizedExperiment", "SummarizedExperiment"))
expect_identical(generic@signature, c("x", "y"))
expect_identical(formals(generic@.Data), formals(method@.Data))
})
test_that("combine,SummarizedExperiment,SummarizedExperiment-method works", {
se <- se[1:100, ]
expect_identical(combine(se, SummarizedExperiment::SummarizedExperiment()),
se)
expect_identical(combine(SummarizedExperiment::SummarizedExperiment(),
se), se)
# Can't expect_identical() on SummarizedExperiment objects because assays
# slot is a reference class.
expect_equal(combine(se[1:40, 1:4], se[30:100, 3:6]), se)
expect_equal(combine(se[1:40, ], se[90:100, ]), se[c(1:40, 90:100)])
expect_equal(combine(se[, 2], se[, 2:4]), se[, 2:4])
se_unnamed <- se
names(se_unnamed) <- NULL
expect_error(combine(se_unnamed[, 1], se_unnamed[, 2]),
paste0("Cannot combine 'SummarizedExperiment' objects with ",
"NULL 'names\\(\\)'"))
se_dupnames <- se
names(se_dupnames) <- rep("A", nrow(se_dupnames))
expect_error(combine(se_dupnames[, 2], se_dupnames[, 1]),
"'anyDuplicated\\(x\\)' must be 0 \\(FALSE\\)")
se_nullcn <- se
colnames(se_nullcn) <- NULL
expect_error(combine(se_nullcn[, 1], se_nullcn[, 2]),
paste0("Cannot combine 'SummarizedExperiment' objects with ",
"NULL 'colnames\\(\\)'"))
expect_error(combine(se, rse),
paste0("Cannot combine 'SummarizedExperiment' and ",
"'RangedSummarizedExperiment' objects because only one ",
"of these has a 'rowRanges' slot."))
})
test_that("combine,RangedSummarizedExperiment,RangedSummarizedExperiment-method works", {
rse <- rse[1:100, ]
expect_identical(combine(rse, SummarizedExperiment::SummarizedExperiment()),
rse)
expect_identical(combine(SummarizedExperiment::SummarizedExperiment(),
rse), rse)
# Can't expect_identical() on SummarizedExperiment objects because assays
# slot is a reference class.
expect_equal(combine(rse[1:40, 1:4], rse[30:100, 3:6]), rse)
expect_equal(combine(rse[1:40, ], rse[90:100, ]), rse[c(1:40, 90:100)])
expect_equal(combine(rse[, 2], rse[, 2:4]), rse[, 2:4])
rse_dupranges <- rse
rse_dupranges <- rbind(rse_dupranges[1:10, ], rse_dupranges[1:10, ])
expect_error(combine(rse_dupranges[, 2], rse_dupranges[, 1]),
"'any\\(duplicated\\(x\\)\\)' must be FALSE")
rse_nullcn <- rse
colnames(rse_nullcn) <- NULL
expect_error(combine(rse_nullcn[, 1], rse_nullcn[, 2]),
paste0("Cannot combine 'RangedSummarizedExperiment' objects ",
"with NULL 'colnames\\(\\)'"))
rse_grl <- rse
rowRanges(rse_grl) <- as(rowRanges(rse_grl), "GRangesList")
expect_error(combine(rse_grl[1:40, 1:4], rse_grl[30:100, 3:6]),
paste0("Cannot combine 'RangedSummarizedExperiment' objects ",
"with 'GRangesList'-based 'rowRanges'"))
expect_error(combine(rse, se),
paste0("Cannot combine 'RangedSummarizedExperiment' and ",
"'SummarizedExperiment' objects because only one of ",
"these has a 'rowRanges' slot."))
})
| /tests/testthat/test-SummarizedExperiment-pkg.R | permissive | PeteHaitch/SparseSummarizedExperiment | R | false | false | 3,717 | r | context("Functionality that probably belongs in the SummarizedExperiment
package")
# NOTE: This also coverage RangedSummarizedExperiment objects, since the method
# is defined for these via inheritance to SummarizedExperiment.
test_that("combine,SummarizedExperiment,SummarizedExperiment-method is compatible with generic", {
generic <- getGeneric("combine")
method <- getMethod("combine",
c("SummarizedExperiment", "SummarizedExperiment"))
expect_identical(generic@signature, c("x", "y"))
expect_identical(formals(generic@.Data), formals(method@.Data))
})
test_that("combine,SummarizedExperiment,SummarizedExperiment-method works", {
se <- se[1:100, ]
expect_identical(combine(se, SummarizedExperiment::SummarizedExperiment()),
se)
expect_identical(combine(SummarizedExperiment::SummarizedExperiment(),
se), se)
# Can't expect_identical() on SummarizedExperiment objects because assays
# slot is a reference class.
expect_equal(combine(se[1:40, 1:4], se[30:100, 3:6]), se)
expect_equal(combine(se[1:40, ], se[90:100, ]), se[c(1:40, 90:100)])
expect_equal(combine(se[, 2], se[, 2:4]), se[, 2:4])
se_unnamed <- se
names(se_unnamed) <- NULL
expect_error(combine(se_unnamed[, 1], se_unnamed[, 2]),
paste0("Cannot combine 'SummarizedExperiment' objects with ",
"NULL 'names\\(\\)'"))
se_dupnames <- se
names(se_dupnames) <- rep("A", nrow(se_dupnames))
expect_error(combine(se_dupnames[, 2], se_dupnames[, 1]),
"'anyDuplicated\\(x\\)' must be 0 \\(FALSE\\)")
se_nullcn <- se
colnames(se_nullcn) <- NULL
expect_error(combine(se_nullcn[, 1], se_nullcn[, 2]),
paste0("Cannot combine 'SummarizedExperiment' objects with ",
"NULL 'colnames\\(\\)'"))
expect_error(combine(se, rse),
paste0("Cannot combine 'SummarizedExperiment' and ",
"'RangedSummarizedExperiment' objects because only one ",
"of these has a 'rowRanges' slot."))
})
test_that("combine,RangedSummarizedExperiment,RangedSummarizedExperiment-method works", {
rse <- rse[1:100, ]
expect_identical(combine(rse, SummarizedExperiment::SummarizedExperiment()),
rse)
expect_identical(combine(SummarizedExperiment::SummarizedExperiment(),
rse), rse)
# Can't expect_identical() on SummarizedExperiment objects because assays
# slot is a reference class.
expect_equal(combine(rse[1:40, 1:4], rse[30:100, 3:6]), rse)
expect_equal(combine(rse[1:40, ], rse[90:100, ]), rse[c(1:40, 90:100)])
expect_equal(combine(rse[, 2], rse[, 2:4]), rse[, 2:4])
rse_dupranges <- rse
rse_dupranges <- rbind(rse_dupranges[1:10, ], rse_dupranges[1:10, ])
expect_error(combine(rse_dupranges[, 2], rse_dupranges[, 1]),
"'any\\(duplicated\\(x\\)\\)' must be FALSE")
rse_nullcn <- rse
colnames(rse_nullcn) <- NULL
expect_error(combine(rse_nullcn[, 1], rse_nullcn[, 2]),
paste0("Cannot combine 'RangedSummarizedExperiment' objects ",
"with NULL 'colnames\\(\\)'"))
rse_grl <- rse
rowRanges(rse_grl) <- as(rowRanges(rse_grl), "GRangesList")
expect_error(combine(rse_grl[1:40, 1:4], rse_grl[30:100, 3:6]),
paste0("Cannot combine 'RangedSummarizedExperiment' objects ",
"with 'GRangesList'-based 'rowRanges'"))
expect_error(combine(rse, se),
paste0("Cannot combine 'RangedSummarizedExperiment' and ",
"'SummarizedExperiment' objects because only one of ",
"these has a 'rowRanges' slot."))
})
|
# DEAP model 2017
###SET THESE BEFORE RUNNING#####
targets=readLines("/home/max/Documents/DRD/indif_varnames.txt")
behav_features=readLines("/home/max/Documents/DRD/es_varlist_622.txt")
brain_features=readLines("/home/max/Documents/DRD/drd_brain_features.txt")
strat_vars=c("rel_family_id","mri_info_device.serial.number")
qc_vars=c("iqc_rsfmri_good_ser",'iqc_dmri_good_ser','fsqc_qc','tfmri_mid_all_beta_mean.motion',
'tfmri_nback_all_beta_mean.motion','tfmri_sst_all_beta_mean.motion',
'iqc_rsfmri_all_mean_motion','rsfmri_var_ntpoints','iqc_dmri_all_mean_motion')
allvars=c('src_subject_id','eventname',qc_vars,strat_vars,behav_features,brain_features)
#data = readRDS( paste0("/home/max/Documents/linear_mixed_model_abcd/nda2.0.1.Rds"))
#backup_data=data
data=backup_data
data_yr1=data[c('src_subject_id','eventname',targets)]
data_yr1=data_yr1[ which(data_yr1$eventname=='1_year_follow_up_y_arm_1'),]
data_yr1 <- data_yr1[c(1,3:9)]
data = data[allvars]
data <-data[ which(data$eventname=='baseline_year_1_arm_1'),]
data<-merge(data,data_yr1)
facs=readLines('/home/max/Documents/DRD/factor_list.csv')
data[facs] <- lapply(data[facs], as.numeric)
#data3<-data[facs_cols]
write.csv(data,"/home/max/Documents/DRD/drd_data_dest.csv")
data[behav_features,]
typeof(data$cbcl_scr_syn_anxdep_r)
typeof(data$src_subject_id)
typeof(data$sex)
| /DRD_filter_dest.R | no_license | owensmax/ABCD-DRD-Prediction | R | false | false | 1,361 | r | # DEAP model 2017
###SET THESE BEFORE RUNNING#####
targets=readLines("/home/max/Documents/DRD/indif_varnames.txt")
behav_features=readLines("/home/max/Documents/DRD/es_varlist_622.txt")
brain_features=readLines("/home/max/Documents/DRD/drd_brain_features.txt")
strat_vars=c("rel_family_id","mri_info_device.serial.number")
qc_vars=c("iqc_rsfmri_good_ser",'iqc_dmri_good_ser','fsqc_qc','tfmri_mid_all_beta_mean.motion',
'tfmri_nback_all_beta_mean.motion','tfmri_sst_all_beta_mean.motion',
'iqc_rsfmri_all_mean_motion','rsfmri_var_ntpoints','iqc_dmri_all_mean_motion')
allvars=c('src_subject_id','eventname',qc_vars,strat_vars,behav_features,brain_features)
#data = readRDS( paste0("/home/max/Documents/linear_mixed_model_abcd/nda2.0.1.Rds"))
#backup_data=data
data=backup_data
data_yr1=data[c('src_subject_id','eventname',targets)]
data_yr1=data_yr1[ which(data_yr1$eventname=='1_year_follow_up_y_arm_1'),]
data_yr1 <- data_yr1[c(1,3:9)]
data = data[allvars]
data <-data[ which(data$eventname=='baseline_year_1_arm_1'),]
data<-merge(data,data_yr1)
facs=readLines('/home/max/Documents/DRD/factor_list.csv')
data[facs] <- lapply(data[facs], as.numeric)
#data3<-data[facs_cols]
write.csv(data,"/home/max/Documents/DRD/drd_data_dest.csv")
data[behav_features,]
typeof(data$cbcl_scr_syn_anxdep_r)
typeof(data$src_subject_id)
typeof(data$sex)
|
## This program is to compute the inverse of a matrix and cache it so that it can be retrieved when required.
## If not available in cache it computes the inverse of matrix and returns it.
## Course: Introduction to R
## Assignment 2
## This function creates a matrix object and caches its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x = matrix(), ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached matrix inverse")
return(m)
}
mymatrix <- x$get()
m <- solve(mymatrix)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | ChiragNM/ProgrammingAssignment2 | R | false | false | 906 | r | ## This program is to compute the inverse of a matrix and cache it so that it can be retrieved when required.
## If not available in cache it computes the inverse of matrix and returns it.
## Course: Introduction to R
## Assignment 2
## This function creates a matrix object and caches its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x = matrix(), ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached matrix inverse")
return(m)
}
mymatrix <- x$get()
m <- solve(mymatrix)
x$setsolve(m)
m
}
|
Paper_Fig_Simulation <- function(results, bw=FALSE){
par(mfrow=c(2,5), mar=c(3,0,0,0), omi=c(1,2,1,1))
## catch time series
xplot <- 1:datyrs
if(bw==FALSE){
cols <- brewer.pal(4, "Set1")
lty_type <- 2
}
if(bw==TRUE){
cols <- rep(gray(0.2), 4)
lty_type <- 2
}
up <- (0.8-0.2)/(datyrs-1)*xplot + 0.2
down <- (0.2-0.8)/(datyrs-1)*xplot + 0.8
plot(x=xplot, y=catch1, type="l", lwd=3, ylim=c(0, max(catch1)*1.6),
xaxs="i", yaxs="i", xaxt="n", yaxt="n", col=gray(0.2))
axis(2, at=pretty(c(1,catch1*1.6)), cex.axis=3)
mtext("Reported\ncatch", side=2, line=5, cex=3)
mtext("100% Reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=catch1, type="l", lwd=3, ylim=c(0, max(catch1)*1.6),
xaxs="i", yaxs="i", xaxt="n", yaxt="n", col=gray(0.2), lty=lty_type)
lines(x=xplot, y=catch1*0.5, lwd=3, col=cols[1])
mtext("Constant Under-reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=catch1, type="l", lwd=3, xaxs="i", ylim=c(0, max(catch1)*1.6),
yaxs="i", xaxt="n", yaxt="n", col=gray(0.2), lty=lty_type)
lines(x=xplot, y=catch1*1.5, lwd=3, col=cols[2])
mtext("Constant Over-reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=catch1, type="l", lwd=3, xaxs="i", ylim=c(0, max(catch1)*1.6),
yaxs="i", xaxt="n", yaxt="n", col=gray(0.2), lty=lty_type)
lines(x=xplot, y=catch1*up, lwd=3, col=cols[3])
mtext("Increasing Reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=catch1, type="l", lwd=3, xaxs="i", ylim=c(0, max(catch1)*1.6),
yaxs="i", xaxt="n", yaxt="n", col=gray(0.2), lty=lty_type)
lines(x=xplot, y=catch1*down, lwd=3, col=cols[4])
mtext("Decreasing Reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
axis(2, at=seq(0, 1850, by=500), cex.axis=3)
mtext("Estimated\nbiomass", side=2, line=5, cex=3)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), lty=lty_type, type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
lines(x=xplot, y=res$b_est[1,4,], col=cols[1], type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), lty=lty_type, type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
mtext("Year", side=1, line=5, cex=3)
lines(x=xplot, y=res$b_est[1,6,], col=cols[2], type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), lty=lty_type, type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
lines(x=xplot, y=res$b_est[1,12,], col=cols[3], type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), lty=lty_type, type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
lines(x=xplot, y=res$b_est[1,14,], col=cols[4], type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
} | /R_functions/Paper_Fig_Simulation.R | no_license | merrillrudd/catch_misreporting_sim | R | false | false | 3,213 | r | Paper_Fig_Simulation <- function(results, bw=FALSE){
par(mfrow=c(2,5), mar=c(3,0,0,0), omi=c(1,2,1,1))
## catch time series
xplot <- 1:datyrs
if(bw==FALSE){
cols <- brewer.pal(4, "Set1")
lty_type <- 2
}
if(bw==TRUE){
cols <- rep(gray(0.2), 4)
lty_type <- 2
}
up <- (0.8-0.2)/(datyrs-1)*xplot + 0.2
down <- (0.2-0.8)/(datyrs-1)*xplot + 0.8
plot(x=xplot, y=catch1, type="l", lwd=3, ylim=c(0, max(catch1)*1.6),
xaxs="i", yaxs="i", xaxt="n", yaxt="n", col=gray(0.2))
axis(2, at=pretty(c(1,catch1*1.6)), cex.axis=3)
mtext("Reported\ncatch", side=2, line=5, cex=3)
mtext("100% Reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=catch1, type="l", lwd=3, ylim=c(0, max(catch1)*1.6),
xaxs="i", yaxs="i", xaxt="n", yaxt="n", col=gray(0.2), lty=lty_type)
lines(x=xplot, y=catch1*0.5, lwd=3, col=cols[1])
mtext("Constant Under-reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=catch1, type="l", lwd=3, xaxs="i", ylim=c(0, max(catch1)*1.6),
yaxs="i", xaxt="n", yaxt="n", col=gray(0.2), lty=lty_type)
lines(x=xplot, y=catch1*1.5, lwd=3, col=cols[2])
mtext("Constant Over-reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=catch1, type="l", lwd=3, xaxs="i", ylim=c(0, max(catch1)*1.6),
yaxs="i", xaxt="n", yaxt="n", col=gray(0.2), lty=lty_type)
lines(x=xplot, y=catch1*up, lwd=3, col=cols[3])
mtext("Increasing Reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=catch1, type="l", lwd=3, xaxs="i", ylim=c(0, max(catch1)*1.6),
yaxs="i", xaxt="n", yaxt="n", col=gray(0.2), lty=lty_type)
lines(x=xplot, y=catch1*down, lwd=3, col=cols[4])
mtext("Decreasing Reporting", side=3, line=1.5, cex=1.9)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
axis(2, at=seq(0, 1850, by=500), cex.axis=3)
mtext("Estimated\nbiomass", side=2, line=5, cex=3)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), lty=lty_type, type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
lines(x=xplot, y=res$b_est[1,4,], col=cols[1], type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), lty=lty_type, type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
mtext("Year", side=1, line=5, cex=3)
lines(x=xplot, y=res$b_est[1,6,], col=cols[2], type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), lty=lty_type, type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
lines(x=xplot, y=res$b_est[1,12,], col=cols[3], type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
plot(x=xplot, y=res$b_est[1,2,], col=gray(0.2), lty=lty_type, type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
lines(x=xplot, y=res$b_est[1,14,], col=cols[4], type="l", lwd=3, ylim=c(0,1850), xaxs="i", yaxs="i", xaxt="n", yaxt="n")
axis(1, at=seq(10,30, by=10), cex.axis=3)
} |
library(qqman)
library(data.table)
ceu <- fread("output/ceu.qassoc", h = T)
yri <- fread("output/yri.qassoc", h = T)
dataset <- fread("output/dataset.qassoc", h = T)
snps <- read.table("lib/snplist.txt", h = F)
snps <- snps$V1
if(!dir.exists("diagnostics")) dir.create("diagnostics")
png('diagnostics/ceu.man.png')
manhattan(ceu, highlight = snps)
dev.off()
png('diagnostics/yri.man.png')
manhattan(yri, highlight = snps)
dev.off()
png('diagnostics/dataset.man.png')
manhattan(dataset, highlight = snps)
dev.off()
png('diagnostics/ceu.qq.png')
qq(ceu$P)
dev.off()
png('diagnostics/yri.qq.png')
qq(yri$P)
dev.off()
png('diagnostics/dataset.qq.png')
qq(dataset$P)
dev.off()
| /R/assoc.R | permissive | Chris1221/gwas_sim | R | false | false | 682 | r | library(qqman)
library(data.table)
ceu <- fread("output/ceu.qassoc", h = T)
yri <- fread("output/yri.qassoc", h = T)
dataset <- fread("output/dataset.qassoc", h = T)
snps <- read.table("lib/snplist.txt", h = F)
snps <- snps$V1
if(!dir.exists("diagnostics")) dir.create("diagnostics")
png('diagnostics/ceu.man.png')
manhattan(ceu, highlight = snps)
dev.off()
png('diagnostics/yri.man.png')
manhattan(yri, highlight = snps)
dev.off()
png('diagnostics/dataset.man.png')
manhattan(dataset, highlight = snps)
dev.off()
png('diagnostics/ceu.qq.png')
qq(ceu$P)
dev.off()
png('diagnostics/yri.qq.png')
qq(yri$P)
dev.off()
png('diagnostics/dataset.qq.png')
qq(dataset$P)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{cap_subtopics}
\alias{cap_subtopics}
\title{Subtopic codes from the Comparative Agendas Project}
\format{An object of class \code{data.frame} with 213 rows and 3 columns.}
\source{
See for more: https://www.comparativeagendas.net/datasets_codebooks
}
\usage{
data(cap_subtopics)
}
\description{
Subtopic codes from the Comparative Agendas Project
}
\keyword{datasets}
| /man/cap_subtopics.Rd | permissive | elliottmorris/politicaldata | R | false | true | 474 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{cap_subtopics}
\alias{cap_subtopics}
\title{Subtopic codes from the Comparative Agendas Project}
\format{An object of class \code{data.frame} with 213 rows and 3 columns.}
\source{
See for more: https://www.comparativeagendas.net/datasets_codebooks
}
\usage{
data(cap_subtopics)
}
\description{
Subtopic codes from the Comparative Agendas Project
}
\keyword{datasets}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.