content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | rakshith-p/ProgrammingAssignment2 | R | false | false | 1,092 | r | # Matrix inversion is usually a costly computation and there may be some benefit
# to caching the inverse of a matrix rather than compute it repeatedly. The
# following two functions are used to cache the inverse of a matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
# This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/local_W.R
\name{R.lw.tapered}
\alias{R.lw.tapered}
\title{Concentrated local Whittle likelihood for tapered estimate. Only for internal use. Cf. Velasco (1999).}
\usage{
R.lw.tapered(d, peri, m, p, T)
}
\description{
Concentrated local Whittle likelihood for tapered estimate. Only for internal use. Cf. Velasco (1999).
}
\keyword{internal}
| /man/R.lw.tapered.Rd | no_license | ayotoasset/LongMemoryTS | R | false | true | 419 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/local_W.R
\name{R.lw.tapered}
\alias{R.lw.tapered}
\title{Concentrated local Whittle likelihood for tapered estimate. Only for internal use. Cf. Velasco (1999).}
\usage{
R.lw.tapered(d, peri, m, p, T)
}
\description{
Concentrated local Whittle likelihood for tapered estimate. Only for internal use. Cf. Velasco (1999).
}
\keyword{internal}
|
library(BiocParallel)
#register(MulticoreParam(6))
register(bpstart(MulticoreParam(6)))
bpparam()
#q()
#library(multtest)
library(xcms)
library(faahKO)
options(width = 160)
testing <- TRUE
start_time <- Sys.time()
# set path according to opsys
opSys <- Sys.info()[1]
isLinux <- grepl('Linux', opSys[1])
filepath <- '/media/sf_VM_share/singapore_batch5_pbqd'
# MS files should be grouped in folders below this directory.
datapath <- '/media/sf_VM_share/singapore_batch5_pbqd'
dir(datapath, recursive=TRUE)
# Load a "typical" MS file which can be considered a reference.
ref <- "/media/sf_VM_share/singapore_batch5_pbqd/pbqd/pbQC70.mzXML"
# Load all the MS files
rawfiles <- dir(datapath, full.names=TRUE,pattern="\\.mzXML*", recursive=TRUE)
rawfiles
of1 <- paste('gp5_pbqcs_2_iters.dat', sep = '')
cat('$Parameters', file = of1, append = FALSE, sep = '\n')
for (i in 1:length(rawfiles)) {
outStr <- paste('FILELIST', i, rawfiles[i], sep = ',')
cat(outStr, file = of1, append = TRUE, sep = '\n')
}
##########################################################################
#
# Set some parameters
#
##########################################################################
err_ppm = 20
PeakWidth = 25
# IntThresh used for noise in centwave
IntThresh = 100
mzdiff = 0.001
SNThresh = 10
rtStart <- 60
rtEnd <- 1860
# for graphics set width of window
width <- 25
###########################################################################
#
# Deal with reference file
#
###########################################################################
# Load a reference file & define the scan range START----------------------
refRaw <- xcmsRaw(ref, profstep= 0.1, includeMSn= FALSE, mslevel= NULL,
scanrange= NULL)
refRaw
scanStart <- head(which(refRaw@scantime > rtStart & refRaw@scantime < rtEnd),
n= 1)
scanEnd <- tail(which(refRaw@scantime > rtStart & refRaw@scantime < rtEnd),
n= 1)
scanRange <- c(scanStart,scanEnd)
# Find Peaks in Ref -------------------------------------------------------
refRaw <- xcmsRaw(ref, profstep= 0.1, includeMSn= FALSE, mslevel= NULL,
scanrange= scanRange)
refRaw
#refPks <- findPeaks(refRaw, method= 'centWave', ppm= err_ppm,
# peakwidth= PeakWidth, snthresh= SNThresh,
# prefilter= c(3,IntThresh), mzCenterFun= "mean",
# integrate= 1, mzdiff= mzdiff, verbose.columns= TRUE,
# fitgauss= FALSE, noise=IntThresh)
refPks <- findPeaks(refRaw)
###########################################################################
#
# Deal with all other LC/MS files
#
###########################################################################
print ('Create xset')
# Create xset -------------------------------------------------------------
#xset <- xcmsSet(rawfiles, method='centWave', ppm= err_ppm,
# peakwidth= PeakWidth, snthresh= SNThresh,
# prefilter= c(3,IntThresh), mzCenterFun= "mean",
# integrate= 1, mzdiff= mzdiff, verbose.columns= FALSE,
# fitgauss= FALSE, BPPARAM = MulticoreParam(workers = 6))
xset <- xcmsSet(rawfiles)
############################################################################
#
# Set Grouping & Alignment Parameters
#
##############################################################################
bw = 5
minsamp = 2
mzwid = 0.015
max_pks = 100
#############################################################################
#
# Grouping Happens here
#
#############################################################################
print ('grouping')
xset <- group(
xset, method= "density", bw= bw, minfrac= 0.2, minsamp=minsamp,
mzwid= mzwid, max= max_pks, sleep= 0)
###########################################################################
#
# Retention Time Alignment
#
###########################################################################
print ('alignment')
# RT alignment ------------------------------------------------------------
align_ref <- match(basename(ref),basename(rawfiles[]))
numIter <- 2
for (i in 1:numIter) {
print (paste('processing RTCORR loop',i))
xset <- retcor(xset)
# max is different when doing multiple retcor passes
xset <- group(xset, method= "density", bw= bw, minfrac= 0.5,
minsamp= minsamp, mzwid= mzwid, max=max_pks, sleep= 0
)
}
##############################################################################
#
# Retrieve missing data
#
##############################################################################
print ('rt correction')
xset3 <- fillPeaks(xset, method="chrom", BPPARAM = MulticoreParam(workers = 6))
#//////////////////////////////////////////////////////////////////////////////
#//////////////////////////////////////////////////////////////////////////////
#//////////////////////////////////////////////////////////////////////////////
#//////////////////////////////////////////////////////////////////////////////
#cdffiles <- list.files(filepath, recursive = TRUE, full.names = TRUE)
#print(cdffiles)
#xset <- xcmsSet(cdffiles)
#xset <- group(xset)
#xset2 <- retcor(xset, family = 'symmetric')
#xset2 <- group(xset2, bw = 10)
#xset3 <- fillPeaks(xset2)
#xset3 <- group(xset3)
groupIndices <- which(groups(xset3)[,1] > 1)
EICs <- getEIC(xset3, groupidx = groupIndices, rt = 'corrected')
allGroups <- xset3@groupidx
groupEntries <- unlist(xset3@groupidx, recursive = TRUE)
# careful
# seems that the exact cols present in the xcmsSet@Peaks array depend on the method used to generate it
# these cols present in every case according to https://rdrr.io/bioc/xcms/man/findPeaks-methods.html
# mz, mzmin, mzmax, rt, rtmin, rtmax, into, maxo
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#headers <- '#mz, mzmin, mzmax, rt, rtmin, rtmax, into, intf, maxo, maxf, i, sn, sample, group, index, filled, accepted, score, rts, ints'
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
standardHeaders <- '#mz, mzmin, mzmax, rt, rtmin, rtmax, into, maxo, sample, group, index, filled, accepted, score, eicRTs, eicINTs, specMZs, specINTs'
cat(standardHeaders, file = of1, append = TRUE, sep = '\n')
getMissedPeaks <- function(mz, sample, index, xset, tol = 0.1) {
# Don't want to select other filled peaks so need to get matrix of only directly detected peaks
firstFilled <- head(xset@filled,1)
lastReal <- firstFilled - 1
xReal <- xset@peaks[0:lastReal,]
# search for alternative peaks in the same data file within mz tols
indices <- which(xReal[,'sample'] == sample & xReal[,1] > mz - tol & xReal[,1] < mz + tol)
return (indices)
}
getEICdataForPeak <- function(groupNumber, sampleNumber, EICs) {
dataFrame <- as.data.frame(do.call(rbind, EICs@eic[[sampleNumber]][groupNumber]))
rts <- paste(unlist(dataFrame['rt']), sep = '', collapse = ' ')
ints <- paste(unlist(round(dataFrame['intensity'])), sep = '', collapse = ' ')
eicData <- list('rts' = rts, 'ints' = ints)
return(eicData)
}
getStandardPeakColumns <- function(xset, row) {
# some peak edetection algos in xcms add extra columns to the xset@peaks matrix
# don't want this to muck around with the output formatting
# extract data from only the columns that are present in all groups
result <- unlist(
do.call(paste,
as.data.frame(
xset@peaks[row, c("mz", "mzmin", "mzmax", "rt", "rtmin", "rtmax", "into", "maxo")]
)
)
)
return (result)
}
getGroupNumberFromName <- function(name, xset) {
return (which(groupnames(xset) == name))
}
getGroupNameFromNumber <- function(number, xset) {
return(groupnames(xset)[number])
}
getSampleNumberFromPeakIndex <- function(peakIndex, xset) {
return(xset@peaks[peakIndex, 'sample'])
}
getPeakMZFromIndex <- function(peakIndex, xset) {
return(xset@peaks[peakIndex, 1])
}
getMSData <- function(xraw, rt, mz) {
mzH <- mz + 10
mzL <- mz - 10
scanTimes <- xraw@scantime
scanIndex <- match(rt, scanTimes)
#print (scanIndex)
#print(scanIndex)
spec <- getScan(xraw, scanIndex)
mask <- which(spec[,'mz'] > mzL & spec[,'mz'] < mzH)
#print(spec)
subset <- spec[mask,]
# NB: subset class = matrix if more than 1 entry
# if # entries == 1, class = numeric
if (class(subset) == 'matrix') {
mzs <- subset[,'mz']
ints <- subset[,'intensity']
} else {
mzs <- subset[1]
ints <- subset[2]
}
#mzs <- paste(unlist(subset[,'mz']), sep = '', collapse = ' ')
#ints <- paste(unlist(subset[,'intensity']), sep = '', collapse = ' ')
msdata <- list('mz' = mzs, 'int' = ints, 'mzTarget' = mz, 'rtTarget' = rt)
return (msdata)
}
getRawData <- function(xset, files) {
totalPeaks <- length(xset@peaks[,'mz'])
allData <- vector('list', totalPeaks)
for (filenum in 1:length(files)) {
print (paste('processing filenum', filenum))
# get peaks from file
mask <- which(xset@peaks[,'sample'] == filenum)
if (length(mask) == 0) { next }
# get raw data for file
xraw <- xcmsRaw(files[filenum])
rawRTs <- xset@rt[['raw']][[filenum]]
correctedRTs <- xset@rt[['corrected']][[filenum]]
# if (filenum == 2) {
# print('raw')
# print(length(rawRTs))
# print(rawRTs)
# print('corrected')
# print(length(correctedRTs))
# print(correctedRTs)
# }
for (samplePeak in mask) {
mz <- xset@peaks[samplePeak,'mz']
# careful here, rt corrections changes the rt in xset@peaks
# this RT is the CORRECTED value
rtCorr <- xset@peaks[samplePeak,'rt']
# need to get corresponding raw RT
# seem that the number of decimal can differ b/w raw and
# corrected RT lists for cerain rtcorr algos
# ---> need to minimise difference
#rtIndex <- match(rtCorr, correctedRTs)
rtIndex <- which(abs(correctedRTs - rtCorr) == min(abs(correctedRTs - rtCorr)))
# print(paste(rtCorr, correctedRTs[rtIndex]))
rtRaw <- rawRTs[rtIndex]
#
# if (filenum == 2) {
# print (correctedRTs)
# print (rtIndex)
# print(paste(filenum, samplePeak, rtCorr, rtRaw))
# q()
# }
peakMSData <- getMSData(xraw, rtRaw, mz)
#print('')
allData[samplePeak] <- list(peakMSData)
}
}
return(allData)
}
#
#mask <- which(xset3@peaks[,'sample'] == 1)
#
#head (xset3@peaks[mask,],10)
#
#mask <- which(xset3@peaks[,'sample'] == 2)
#
#head (xset3@peaks[mask,],10)
#
#print('2 raw')
#print(xset3@rt[['raw']][[2]])
#print('2 corrected')
#print(xset3@rt[['corrected']][[2]])
#
print ('getting ms data')
# print (xset3@peaks)
rawData <- getRawData(xset3, rawfiles)
print ('writing results')
# each group contains the peak indices that have been aligned into a single feature
for (groupNumber in groupIndices) {
group <- allGroups[[groupNumber]]
groupLen <- length(group)
# loop through all peaks in the feature group
for (peak in group) {
sampleNumber <- getSampleNumberFromPeakIndex(peak, xset3)
targetMZ <- getPeakMZFromIndex(peak, xset3)
# check if peak in filled
filledPeak <- peak %in% xset3@filled
if (filledPeak == TRUE) {
filled <- 1
#targetMZ <- xset3@peaks[peak,1]
#sample <- xset3@peaks[peak, 'sample']
recoveredIndices <- getMissedPeaks(targetMZ, sampleNumber, peak, xset3)
} else {
filled <- 0
recoveredIndices <- NULL
}
# filled = 0 > directly detected peak
# filled = 1 > filled peak
# filled = 2 > potential direct candidate for a filled peak
eicData <- getEICdataForPeak(groupNumber, sampleNumber, EICs)
# write data to file, this could be a regular or filled peak
#data <- unlist(do.call(paste, as.data.frame(xset3@peaks[peak,])))
data <- getStandardPeakColumns(xset3, peak)
peakMassSpec <- rawData[[peak]]
x <- peakMassSpec[['mz']]
#print(x)
#print(typeof(x))
#print(max(x))
# if (targetMZ < min(peakMassSpec[['mz']]) | targetMZ > max(peakMassSpec[['mz']])) {
# print('')
# print (paste(sampleNumber, targetMZ, peakMassSpec['mzTarget']))
# print (peakMassSpec['mz'])
# }
mzs <- paste(unlist(peakMassSpec[['mz']]), sep = '', collapse = ' ')
ints <- paste(unlist(peakMassSpec[['int']]), sep = '', collapse = ' ')
# print('')
# print('')
outStr <- paste(paste(data,collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, filled, 'None', 0, eicData['rts'], eicData['ints'], mzs, ints, collapse = ', ', sep = ', ')
# outStr <- paste(paste(data,collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, filled, 0, 0, 0, 0, collapse = ', ', sep = ', ')
cat(outStr, file = of1, append = TRUE, sep = '\n')
# if filled, write any other candidate peaks to file as well
if (!is.null(recoveredIndices)) {
if ( length(recoveredIndices) > 0) {
for (recoveredIndex in recoveredIndices) {
if (recoveredIndex %in% groupEntries == FALSE) {
# special case, need to get EICs for ungrouped peaks separately
# data <- unlist(do.call(paste, as.data.frame(xset3@peaks[recoveredIndex,])))
data <- getStandardPeakColumns(xset3, recoveredIndex)
outStr <- paste(paste(data, collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, 2, 'None', 0, 0, 0, mzs, ints, collapse = ', ', sep = ', ')
cat(outStr, file = of1, append = TRUE, sep = '\n')
}
}
}
}
}
}
warnings()
#
#
#of1 <- paste('peakGroup.dat', sep = '')
#
## careful
## seems that the exact cols present in the xcmsSet@Peaks array depend on the method used to generate it
#
## these cols present in every case according to https://rdrr.io/bioc/xcms/man/findPeaks-methods.html
## mz, mzmin, mzmax, rt, rtmin, rtmax, into, maxo
#
#
## 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
##headers <- '#mz, mzmin, mzmax, rt, rtmin, rtmax, into, intf, maxo, maxf, i, sn, sample, group, index, filled, accepted, score, rts, ints'
#
## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#standardHeaders <- '#mz, mzmin, mzmax, rt, rtmin, rtmax, into, maxo, sample, group, index, filled, accepted, score, eicRTs, eicINTs'
#
#cat(standardHeaders, file = of1, append = FALSE, sep = '\n')
#
#getMissedPeaks <- function(mz, sample, index, xset, tol = 0.1) {
# # Don't want to select other filled peaks so need to get matrix of only directly detected peaks
# firstFilled <- head(xset@filled,1)
# lastReal <- firstFilled - 1
# xReal <- xset@peaks[0:lastReal,]
#
# # search for alternative peaks in the same data file within mz tols
# indices <- which(xReal[,'sample'] == sample & xReal[,1] > mz - tol & xReal[,1] < mz + tol)
# return (indices)
#}
#
#getEICdataForPeak <- function(groupNumber, sampleNumber, EICs) {
# dataFrame <- as.data.frame(do.call(rbind, EICs@eic[[sampleNumber]][groupNumber]))
# rts <- paste(unlist(dataFrame['rt']), sep = '', collapse = ' ')
# ints <- paste(unlist(round(dataFrame['intensity'])), sep = '', collapse = ' ')
# eicData <- list('rts' = rts, 'ints' = ints)
# return(eicData)
#}
#
#getStandardPeakColumns <- function(xset, row) {
#
# # some peak edetection algos in xcms add extra columns to the xset@peaks matrix
# # don't want this to muck around with the output formatting
# # extract data from only the columns that are present in all groups
#
# result <- unlist(
# do.call(paste,
# as.data.frame(
# xset@peaks[row, c("mz", "mzmin", "mzmax", "rt", "rtmin", "rtmax", "into", "maxo")]
# )
# )
# )
# return (result)
#}
#
#
#getGroupNumberFromName <- function(name, xset) {
# return (which(groupnames(xset) == name))
#}
#getGroupNameFromNumber <- function(number, xset) {
# return(groupnames(xset)[number])
#}
#getSampleNumberFromPeakIndex <- function(peakIndex, xset) {
# return(xset@peaks[peakIndex, 'sample'])
#}
#getPeakMZFromIndex <- function(peakIndex, xset) {
# return(xset@peaks[peakIndex, 1])
#}
#
#
## each group contains the peak indices that have been aligned into a single feature
#for (groupNumber in groupIndices) {
#
# group <- allGroups[[groupNumber]]
# groupLen <- length(group)
#
# # loop through all peaks in the feature group
# for (peak in group) {
#
# sampleNumber <- getSampleNumberFromPeakIndex(peak, xset3)
# targetMZ <- getPeakMZFromIndex(peak, xset3)
#
# # check if peak in filled
# filledPeak <- peak %in% xset3@filled
## print(paste('filledPeak?', filledPeak))
#
# if (filledPeak == TRUE) {
# filled <- 1
# #targetMZ <- xset3@peaks[peak,1]
# #sample <- xset3@peaks[peak, 'sample']
# recoveredIndices <- getMissedPeaks(targetMZ, sampleNumber, peak, xset3)
# } else {
# filled <- 0
# recoveredIndices <- NULL
# }
#
# # filled = 0 > directly detected peak
# # filled = 1 > filled peak
# # filled = 2 > potential direct candidate for a filled peak
#
# eicData <- getEICdataForPeak(groupNumber, sampleNumber, EICs)
#
# # write data to file, this could be a regular or filled peak
# #data <- unlist(do.call(paste, as.data.frame(xset3@peaks[peak,])))
# data <- getStandardPeakColumns(xset3, peak)
#
# outStr <- paste(paste(data,collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, filled, 0, 0, eicData['rts'], eicData['ints'], collapse = ', ', sep = ', ')
## outStr <- paste(paste(data,collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, filled, 0, 0, 0, 0, collapse = ', ', sep = ', ')
# cat(outStr, file = of1, append = TRUE, sep = '\n')
#
# # if filled, write any other candidate peaks to file as well
# if (!is.null(recoveredIndices)) {
# if ( length(recoveredIndices) > 0) {
# for (recoveredIndex in recoveredIndices) {
# if (recoveredIndex %in% groupEntries == FALSE) {
# # special case, need to get EICs for ungrouped peaks separately
# # data <- unlist(do.call(paste, as.data.frame(xset3@peaks[recoveredIndex,])))
# data <- getStandardPeakColumns(xset3, recoveredIndex)
# outStr <- paste(paste(data, collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, 2, 0, 0, 0, 0, collapse = ', ', sep = ', ')
#
# cat(outStr, file = of1, append = TRUE, sep = '\n')
# }
# }
# }
# }
# }
#}
end_time <- Sys.time()
print( end_time - start_time )
| /r_analysis/spore_gp5_pbqcs/dataExtractor.R | no_license | MetabolomicsAustralia-Bioinformatics/advance-XCMS | R | false | false | 19,678 | r |
library(BiocParallel)
#register(MulticoreParam(6))
register(bpstart(MulticoreParam(6)))
bpparam()
#q()
#library(multtest)
library(xcms)
library(faahKO)
options(width = 160)
testing <- TRUE
start_time <- Sys.time()
# set path according to opsys
opSys <- Sys.info()[1]
isLinux <- grepl('Linux', opSys[1])
filepath <- '/media/sf_VM_share/singapore_batch5_pbqd'
# MS files should be grouped in folders below this directory.
datapath <- '/media/sf_VM_share/singapore_batch5_pbqd'
dir(datapath, recursive=TRUE)
# Load a "typical" MS file which can be considered a reference.
ref <- "/media/sf_VM_share/singapore_batch5_pbqd/pbqd/pbQC70.mzXML"
# Load all the MS files
rawfiles <- dir(datapath, full.names=TRUE,pattern="\\.mzXML*", recursive=TRUE)
rawfiles
of1 <- paste('gp5_pbqcs_2_iters.dat', sep = '')
cat('$Parameters', file = of1, append = FALSE, sep = '\n')
for (i in 1:length(rawfiles)) {
outStr <- paste('FILELIST', i, rawfiles[i], sep = ',')
cat(outStr, file = of1, append = TRUE, sep = '\n')
}
##########################################################################
#
# Set some parameters
#
##########################################################################
err_ppm = 20
PeakWidth = 25
# IntThresh used for noise in centwave
IntThresh = 100
mzdiff = 0.001
SNThresh = 10
rtStart <- 60
rtEnd <- 1860
# for graphics set width of window
width <- 25
###########################################################################
#
# Deal with reference file
#
###########################################################################
# Load a reference file & define the scan range START----------------------
refRaw <- xcmsRaw(ref, profstep= 0.1, includeMSn= FALSE, mslevel= NULL,
scanrange= NULL)
refRaw
scanStart <- head(which(refRaw@scantime > rtStart & refRaw@scantime < rtEnd),
n= 1)
scanEnd <- tail(which(refRaw@scantime > rtStart & refRaw@scantime < rtEnd),
n= 1)
scanRange <- c(scanStart,scanEnd)
# Find Peaks in Ref -------------------------------------------------------
refRaw <- xcmsRaw(ref, profstep= 0.1, includeMSn= FALSE, mslevel= NULL,
scanrange= scanRange)
refRaw
#refPks <- findPeaks(refRaw, method= 'centWave', ppm= err_ppm,
# peakwidth= PeakWidth, snthresh= SNThresh,
# prefilter= c(3,IntThresh), mzCenterFun= "mean",
# integrate= 1, mzdiff= mzdiff, verbose.columns= TRUE,
# fitgauss= FALSE, noise=IntThresh)
refPks <- findPeaks(refRaw)
###########################################################################
#
# Deal with all other LC/MS files
#
###########################################################################
print ('Create xset')
# Create xset -------------------------------------------------------------
#xset <- xcmsSet(rawfiles, method='centWave', ppm= err_ppm,
# peakwidth= PeakWidth, snthresh= SNThresh,
# prefilter= c(3,IntThresh), mzCenterFun= "mean",
# integrate= 1, mzdiff= mzdiff, verbose.columns= FALSE,
# fitgauss= FALSE, BPPARAM = MulticoreParam(workers = 6))
xset <- xcmsSet(rawfiles)
############################################################################
#
# Set Grouping & Alignment Parameters
#
##############################################################################
bw = 5
minsamp = 2
mzwid = 0.015
max_pks = 100
#############################################################################
#
# Grouping Happens here
#
#############################################################################
print ('grouping')
xset <- group(
xset, method= "density", bw= bw, minfrac= 0.2, minsamp=minsamp,
mzwid= mzwid, max= max_pks, sleep= 0)
###########################################################################
#
# Retention Time Alignment
#
###########################################################################
print ('alignment')
# RT alignment ------------------------------------------------------------
align_ref <- match(basename(ref),basename(rawfiles[]))
numIter <- 2
for (i in 1:numIter) {
print (paste('processing RTCORR loop',i))
xset <- retcor(xset)
# max is different when doing multiple retcor passes
xset <- group(xset, method= "density", bw= bw, minfrac= 0.5,
minsamp= minsamp, mzwid= mzwid, max=max_pks, sleep= 0
)
}
##############################################################################
#
# Retrieve missing data
#
##############################################################################
print ('rt correction')
xset3 <- fillPeaks(xset, method="chrom", BPPARAM = MulticoreParam(workers = 6))
#//////////////////////////////////////////////////////////////////////////////
#//////////////////////////////////////////////////////////////////////////////
#//////////////////////////////////////////////////////////////////////////////
#//////////////////////////////////////////////////////////////////////////////
#cdffiles <- list.files(filepath, recursive = TRUE, full.names = TRUE)
#print(cdffiles)
#xset <- xcmsSet(cdffiles)
#xset <- group(xset)
#xset2 <- retcor(xset, family = 'symmetric')
#xset2 <- group(xset2, bw = 10)
#xset3 <- fillPeaks(xset2)
#xset3 <- group(xset3)
groupIndices <- which(groups(xset3)[,1] > 1)
EICs <- getEIC(xset3, groupidx = groupIndices, rt = 'corrected')
allGroups <- xset3@groupidx
groupEntries <- unlist(xset3@groupidx, recursive = TRUE)
# careful
# seems that the exact cols present in the xcmsSet@Peaks array depend on the method used to generate it
# these cols present in every case according to https://rdrr.io/bioc/xcms/man/findPeaks-methods.html
# mz, mzmin, mzmax, rt, rtmin, rtmax, into, maxo
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#headers <- '#mz, mzmin, mzmax, rt, rtmin, rtmax, into, intf, maxo, maxf, i, sn, sample, group, index, filled, accepted, score, rts, ints'
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
standardHeaders <- '#mz, mzmin, mzmax, rt, rtmin, rtmax, into, maxo, sample, group, index, filled, accepted, score, eicRTs, eicINTs, specMZs, specINTs'
cat(standardHeaders, file = of1, append = TRUE, sep = '\n')
getMissedPeaks <- function(mz, sample, index, xset, tol = 0.1) {
# Don't want to select other filled peaks so need to get matrix of only directly detected peaks
firstFilled <- head(xset@filled,1)
lastReal <- firstFilled - 1
xReal <- xset@peaks[0:lastReal,]
# search for alternative peaks in the same data file within mz tols
indices <- which(xReal[,'sample'] == sample & xReal[,1] > mz - tol & xReal[,1] < mz + tol)
return (indices)
}
getEICdataForPeak <- function(groupNumber, sampleNumber, EICs) {
dataFrame <- as.data.frame(do.call(rbind, EICs@eic[[sampleNumber]][groupNumber]))
rts <- paste(unlist(dataFrame['rt']), sep = '', collapse = ' ')
ints <- paste(unlist(round(dataFrame['intensity'])), sep = '', collapse = ' ')
eicData <- list('rts' = rts, 'ints' = ints)
return(eicData)
}
getStandardPeakColumns <- function(xset, row) {
# some peak edetection algos in xcms add extra columns to the xset@peaks matrix
# don't want this to muck around with the output formatting
# extract data from only the columns that are present in all groups
result <- unlist(
do.call(paste,
as.data.frame(
xset@peaks[row, c("mz", "mzmin", "mzmax", "rt", "rtmin", "rtmax", "into", "maxo")]
)
)
)
return (result)
}
getGroupNumberFromName <- function(name, xset) {
return (which(groupnames(xset) == name))
}
getGroupNameFromNumber <- function(number, xset) {
return(groupnames(xset)[number])
}
getSampleNumberFromPeakIndex <- function(peakIndex, xset) {
return(xset@peaks[peakIndex, 'sample'])
}
getPeakMZFromIndex <- function(peakIndex, xset) {
return(xset@peaks[peakIndex, 1])
}
getMSData <- function(xraw, rt, mz) {
mzH <- mz + 10
mzL <- mz - 10
scanTimes <- xraw@scantime
scanIndex <- match(rt, scanTimes)
#print (scanIndex)
#print(scanIndex)
spec <- getScan(xraw, scanIndex)
mask <- which(spec[,'mz'] > mzL & spec[,'mz'] < mzH)
#print(spec)
subset <- spec[mask,]
# NB: subset class = matrix if more than 1 entry
# if # entries == 1, class = numeric
if (class(subset) == 'matrix') {
mzs <- subset[,'mz']
ints <- subset[,'intensity']
} else {
mzs <- subset[1]
ints <- subset[2]
}
#mzs <- paste(unlist(subset[,'mz']), sep = '', collapse = ' ')
#ints <- paste(unlist(subset[,'intensity']), sep = '', collapse = ' ')
msdata <- list('mz' = mzs, 'int' = ints, 'mzTarget' = mz, 'rtTarget' = rt)
return (msdata)
}
getRawData <- function(xset, files) {
totalPeaks <- length(xset@peaks[,'mz'])
allData <- vector('list', totalPeaks)
for (filenum in 1:length(files)) {
print (paste('processing filenum', filenum))
# get peaks from file
mask <- which(xset@peaks[,'sample'] == filenum)
if (length(mask) == 0) { next }
# get raw data for file
xraw <- xcmsRaw(files[filenum])
rawRTs <- xset@rt[['raw']][[filenum]]
correctedRTs <- xset@rt[['corrected']][[filenum]]
# if (filenum == 2) {
# print('raw')
# print(length(rawRTs))
# print(rawRTs)
# print('corrected')
# print(length(correctedRTs))
# print(correctedRTs)
# }
for (samplePeak in mask) {
mz <- xset@peaks[samplePeak,'mz']
# careful here, rt corrections changes the rt in xset@peaks
# this RT is the CORRECTED value
rtCorr <- xset@peaks[samplePeak,'rt']
# need to get corresponding raw RT
# seem that the number of decimal can differ b/w raw and
# corrected RT lists for cerain rtcorr algos
# ---> need to minimise difference
#rtIndex <- match(rtCorr, correctedRTs)
rtIndex <- which(abs(correctedRTs - rtCorr) == min(abs(correctedRTs - rtCorr)))
# print(paste(rtCorr, correctedRTs[rtIndex]))
rtRaw <- rawRTs[rtIndex]
#
# if (filenum == 2) {
# print (correctedRTs)
# print (rtIndex)
# print(paste(filenum, samplePeak, rtCorr, rtRaw))
# q()
# }
peakMSData <- getMSData(xraw, rtRaw, mz)
#print('')
allData[samplePeak] <- list(peakMSData)
}
}
return(allData)
}
#
#mask <- which(xset3@peaks[,'sample'] == 1)
#
#head (xset3@peaks[mask,],10)
#
#mask <- which(xset3@peaks[,'sample'] == 2)
#
#head (xset3@peaks[mask,],10)
#
#print('2 raw')
#print(xset3@rt[['raw']][[2]])
#print('2 corrected')
#print(xset3@rt[['corrected']][[2]])
#
print ('getting ms data')
# print (xset3@peaks)
rawData <- getRawData(xset3, rawfiles)
print ('writing results')
# each group contains the peak indices that have been aligned into a single feature
for (groupNumber in groupIndices) {
group <- allGroups[[groupNumber]]
groupLen <- length(group)
# loop through all peaks in the feature group
for (peak in group) {
sampleNumber <- getSampleNumberFromPeakIndex(peak, xset3)
targetMZ <- getPeakMZFromIndex(peak, xset3)
# check if peak in filled
filledPeak <- peak %in% xset3@filled
if (filledPeak == TRUE) {
filled <- 1
#targetMZ <- xset3@peaks[peak,1]
#sample <- xset3@peaks[peak, 'sample']
recoveredIndices <- getMissedPeaks(targetMZ, sampleNumber, peak, xset3)
} else {
filled <- 0
recoveredIndices <- NULL
}
# filled = 0 > directly detected peak
# filled = 1 > filled peak
# filled = 2 > potential direct candidate for a filled peak
eicData <- getEICdataForPeak(groupNumber, sampleNumber, EICs)
# write data to file, this could be a regular or filled peak
#data <- unlist(do.call(paste, as.data.frame(xset3@peaks[peak,])))
data <- getStandardPeakColumns(xset3, peak)
peakMassSpec <- rawData[[peak]]
x <- peakMassSpec[['mz']]
#print(x)
#print(typeof(x))
#print(max(x))
# if (targetMZ < min(peakMassSpec[['mz']]) | targetMZ > max(peakMassSpec[['mz']])) {
# print('')
# print (paste(sampleNumber, targetMZ, peakMassSpec['mzTarget']))
# print (peakMassSpec['mz'])
# }
mzs <- paste(unlist(peakMassSpec[['mz']]), sep = '', collapse = ' ')
ints <- paste(unlist(peakMassSpec[['int']]), sep = '', collapse = ' ')
# print('')
# print('')
outStr <- paste(paste(data,collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, filled, 'None', 0, eicData['rts'], eicData['ints'], mzs, ints, collapse = ', ', sep = ', ')
# outStr <- paste(paste(data,collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, filled, 0, 0, 0, 0, collapse = ', ', sep = ', ')
cat(outStr, file = of1, append = TRUE, sep = '\n')
# if filled, write any other candidate peaks to file as well
if (!is.null(recoveredIndices)) {
if ( length(recoveredIndices) > 0) {
for (recoveredIndex in recoveredIndices) {
if (recoveredIndex %in% groupEntries == FALSE) {
# special case, need to get EICs for ungrouped peaks separately
# data <- unlist(do.call(paste, as.data.frame(xset3@peaks[recoveredIndex,])))
data <- getStandardPeakColumns(xset3, recoveredIndex)
outStr <- paste(paste(data, collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, 2, 'None', 0, 0, 0, mzs, ints, collapse = ', ', sep = ', ')
cat(outStr, file = of1, append = TRUE, sep = '\n')
}
}
}
}
}
}
warnings()
#
#
#of1 <- paste('peakGroup.dat', sep = '')
#
## careful
## seems that the exact cols present in the xcmsSet@Peaks array depend on the method used to generate it
#
## these cols present in every case according to https://rdrr.io/bioc/xcms/man/findPeaks-methods.html
## mz, mzmin, mzmax, rt, rtmin, rtmax, into, maxo
#
#
## 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
##headers <- '#mz, mzmin, mzmax, rt, rtmin, rtmax, into, intf, maxo, maxf, i, sn, sample, group, index, filled, accepted, score, rts, ints'
#
## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#standardHeaders <- '#mz, mzmin, mzmax, rt, rtmin, rtmax, into, maxo, sample, group, index, filled, accepted, score, eicRTs, eicINTs'
#
#cat(standardHeaders, file = of1, append = FALSE, sep = '\n')
#
#getMissedPeaks <- function(mz, sample, index, xset, tol = 0.1) {
# # Don't want to select other filled peaks so need to get matrix of only directly detected peaks
# firstFilled <- head(xset@filled,1)
# lastReal <- firstFilled - 1
# xReal <- xset@peaks[0:lastReal,]
#
# # search for alternative peaks in the same data file within mz tols
# indices <- which(xReal[,'sample'] == sample & xReal[,1] > mz - tol & xReal[,1] < mz + tol)
# return (indices)
#}
#
#getEICdataForPeak <- function(groupNumber, sampleNumber, EICs) {
# dataFrame <- as.data.frame(do.call(rbind, EICs@eic[[sampleNumber]][groupNumber]))
# rts <- paste(unlist(dataFrame['rt']), sep = '', collapse = ' ')
# ints <- paste(unlist(round(dataFrame['intensity'])), sep = '', collapse = ' ')
# eicData <- list('rts' = rts, 'ints' = ints)
# return(eicData)
#}
#
#getStandardPeakColumns <- function(xset, row) {
#
# # some peak edetection algos in xcms add extra columns to the xset@peaks matrix
# # don't want this to muck around with the output formatting
# # extract data from only the columns that are present in all groups
#
# result <- unlist(
# do.call(paste,
# as.data.frame(
# xset@peaks[row, c("mz", "mzmin", "mzmax", "rt", "rtmin", "rtmax", "into", "maxo")]
# )
# )
# )
# return (result)
#}
#
#
#getGroupNumberFromName <- function(name, xset) {
# return (which(groupnames(xset) == name))
#}
#getGroupNameFromNumber <- function(number, xset) {
# return(groupnames(xset)[number])
#}
#getSampleNumberFromPeakIndex <- function(peakIndex, xset) {
# return(xset@peaks[peakIndex, 'sample'])
#}
#getPeakMZFromIndex <- function(peakIndex, xset) {
# return(xset@peaks[peakIndex, 1])
#}
#
#
## each group contains the peak indices that have been aligned into a single feature
#for (groupNumber in groupIndices) {
#
# group <- allGroups[[groupNumber]]
# groupLen <- length(group)
#
# # loop through all peaks in the feature group
# for (peak in group) {
#
# sampleNumber <- getSampleNumberFromPeakIndex(peak, xset3)
# targetMZ <- getPeakMZFromIndex(peak, xset3)
#
# # check if peak in filled
# filledPeak <- peak %in% xset3@filled
## print(paste('filledPeak?', filledPeak))
#
# if (filledPeak == TRUE) {
# filled <- 1
# #targetMZ <- xset3@peaks[peak,1]
# #sample <- xset3@peaks[peak, 'sample']
# recoveredIndices <- getMissedPeaks(targetMZ, sampleNumber, peak, xset3)
# } else {
# filled <- 0
# recoveredIndices <- NULL
# }
#
# # filled = 0 > directly detected peak
# # filled = 1 > filled peak
# # filled = 2 > potential direct candidate for a filled peak
#
# eicData <- getEICdataForPeak(groupNumber, sampleNumber, EICs)
#
# # write data to file, this could be a regular or filled peak
# #data <- unlist(do.call(paste, as.data.frame(xset3@peaks[peak,])))
# data <- getStandardPeakColumns(xset3, peak)
#
# outStr <- paste(paste(data,collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, filled, 0, 0, eicData['rts'], eicData['ints'], collapse = ', ', sep = ', ')
## outStr <- paste(paste(data,collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, filled, 0, 0, 0, 0, collapse = ', ', sep = ', ')
# cat(outStr, file = of1, append = TRUE, sep = '\n')
#
# # if filled, write any other candidate peaks to file as well
# if (!is.null(recoveredIndices)) {
# if ( length(recoveredIndices) > 0) {
# for (recoveredIndex in recoveredIndices) {
# if (recoveredIndex %in% groupEntries == FALSE) {
# # special case, need to get EICs for ungrouped peaks separately
# # data <- unlist(do.call(paste, as.data.frame(xset3@peaks[recoveredIndex,])))
# data <- getStandardPeakColumns(xset3, recoveredIndex)
# outStr <- paste(paste(data, collapse = ', ', sep = ', '), sampleNumber, groupNumber, peak, 2, 0, 0, 0, 0, collapse = ', ', sep = ', ')
#
# cat(outStr, file = of1, append = TRUE, sep = '\n')
# }
# }
# }
# }
# }
#}
end_time <- Sys.time()
print( end_time - start_time )
|
## setting working directory, creating any needed folders, downdloading data set,
## and importing the data file.
## NOTE: SOME OF THESE STEPS MAY NOT NEED TO BE RAN AGAIN.
setwd("C:/Users/bdfitzgerald/Desktop/Data Science Specialist")
if(!file.exists("exploratory_data_analysis")){dir.create("exploratory_data_analysis")}
setwd("C:/Users/bdfitzgerald/Desktop/Data Science Specialist/exploratory_data_analysis")
if(!file.exists("course_project_1")){dir.create("course_project_1")}
setwd("C:/Users/bdfitzgerald/Desktop/Data Science Specialist/
exploratory_data_analysis/course_project_1")
fileurl <- ("https://d396qusza40orc.cloudfront.net/
exdata%2Fdata%2Fhousehold_power_consumption.zip")
download.file(fileurl, destfile = 'electric_power_consumption.zip', mode = "wb")
unzip("electric_power_consumption.zip")
files <- list.files(, full.names = TRUE)
data_full <- read.csv(files[2], sep = ";", header = TRUE)
## making data adjustments, and subsetting the data
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
## Making plot and saving it
with(data, {
plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off() | /plot4.R | no_license | BarrieFitzgerald/Exploratory_Data_Analysis | R | false | false | 1,899 | r | ## setting working directory, creating any needed folders, downdloading data set,
## and importing the data file.
## NOTE: SOME OF THESE STEPS MAY NOT NEED TO BE RAN AGAIN.
setwd("C:/Users/bdfitzgerald/Desktop/Data Science Specialist")
if(!file.exists("exploratory_data_analysis")){dir.create("exploratory_data_analysis")}
setwd("C:/Users/bdfitzgerald/Desktop/Data Science Specialist/exploratory_data_analysis")
if(!file.exists("course_project_1")){dir.create("course_project_1")}
setwd("C:/Users/bdfitzgerald/Desktop/Data Science Specialist/
exploratory_data_analysis/course_project_1")
fileurl <- ("https://d396qusza40orc.cloudfront.net/
exdata%2Fdata%2Fhousehold_power_consumption.zip")
download.file(fileurl, destfile = 'electric_power_consumption.zip', mode = "wb")
unzip("electric_power_consumption.zip")
files <- list.files(, full.names = TRUE)
data_full <- read.csv(files[2], sep = ";", header = TRUE)
## making data adjustments, and subsetting the data
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
## Making plot and saving it
with(data, {
plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off() |
setwd("C:/Users/enerc/OneDrive/Desktop/data science/sessions/r_training")
getwd()
customer <- read.csv("Customer_churn.csv")
placement <- read.csv("Placement.csv")
View(customer)
library(dplyr)
# Using customer churn dataset:
# 1. Calculate the standard deviation of 'tenure' column and store it in sd_tenure.
# 2. Calculate the standard deviation of 'Monthly Charges' column and store it in
# sd_MonthlyCharges.
# 3. Calculate the standard deviation of 'Total Charges' column and store it in
# sd_TotalCharges.
sd_tenure <- sd(customer$tenure)
sd_tenure
sd_MonthlyCharges <- sd(customer$MonthlyCharges)
sd_MonthlyCharges
sd_TotalCharges <- sd(customer$TotalCharges, na.rm = T)
sd_TotalCharges
# Using student's placement dataset:
# 1. Calculate the standard deviation of etest and store it in sd_etest.
# 2. Calculate the standard deviation of salary and store it in sd_salary.
# 3. Calculate the standard deviation of percentage score by students in MBA (mba_p)
# and store it in sd_mba.
sd_etest <- sd(placement$etest_p)
sd_etest
sd_salary <- sd(placement$salary,na.rm = T)
sd_salary
sd_mba <- sd(placement$mba_p)
sd_mba
| /Module - 2/Assignment5.R | no_license | nitinsingh27/DataScience-With-R | R | false | false | 1,199 | r | setwd("C:/Users/enerc/OneDrive/Desktop/data science/sessions/r_training")
getwd()
customer <- read.csv("Customer_churn.csv")
placement <- read.csv("Placement.csv")
View(customer)
library(dplyr)
# Using customer churn dataset:
# 1. Calculate the standard deviation of 'tenure' column and store it in sd_tenure.
# 2. Calculate the standard deviation of 'Monthly Charges' column and store it in
# sd_MonthlyCharges.
# 3. Calculate the standard deviation of 'Total Charges' column and store it in
# sd_TotalCharges.
sd_tenure <- sd(customer$tenure)
sd_tenure
sd_MonthlyCharges <- sd(customer$MonthlyCharges)
sd_MonthlyCharges
sd_TotalCharges <- sd(customer$TotalCharges, na.rm = T)
sd_TotalCharges
# Using student's placement dataset:
# 1. Calculate the standard deviation of etest and store it in sd_etest.
# 2. Calculate the standard deviation of salary and store it in sd_salary.
# 3. Calculate the standard deviation of percentage score by students in MBA (mba_p)
# and store it in sd_mba.
sd_etest <- sd(placement$etest_p)
sd_etest
sd_salary <- sd(placement$salary,na.rm = T)
sd_salary
sd_mba <- sd(placement$mba_p)
sd_mba
|
#'
#' sumGroups.Spectra2D
#'
#' @noRd
#' @export
#'
sumGroups.Spectra2D <- function(spectra) {
.chkArgs(mode = 21L)
chkSpectra(spectra)
gr.l <- levels(spectra$group)
count <- length(gr.l)
g.sum <- data.frame(group = NA, no. = NA, color = NA)
for (n in 1:count) {
gi <- match(gr.l[n], spectra$groups)
gr <- gr.l[n]
no. <- length(which(gr == spectra$groups))
col <- spectra$colors[gi]
g.sum <- rbind(g.sum, data.frame(group = gr, no. = no., color = col))
}
g.sum <- g.sum[-1, ]
g.sum <- subset(g.sum, no. > 0) # drop groups with no members
rownames(g.sum) <- c(1:nrow(g.sum))
return(g.sum)
}
| /R/sumGroups.Spectra2D.R | no_license | Tejasvigupta/ChemoSpecUtils | R | false | false | 635 | r | #'
#' sumGroups.Spectra2D
#'
#' @noRd
#' @export
#'
sumGroups.Spectra2D <- function(spectra) {
.chkArgs(mode = 21L)
chkSpectra(spectra)
gr.l <- levels(spectra$group)
count <- length(gr.l)
g.sum <- data.frame(group = NA, no. = NA, color = NA)
for (n in 1:count) {
gi <- match(gr.l[n], spectra$groups)
gr <- gr.l[n]
no. <- length(which(gr == spectra$groups))
col <- spectra$colors[gi]
g.sum <- rbind(g.sum, data.frame(group = gr, no. = no., color = col))
}
g.sum <- g.sum[-1, ]
g.sum <- subset(g.sum, no. > 0) # drop groups with no members
rownames(g.sum) <- c(1:nrow(g.sum))
return(g.sum)
}
|
#### CREATE STRATA PILOT BENCHMARKING APP ####
## R script that renders a Shiny app to do cost benchmarking for Strata
## Winter 2018
## Civis Analytics
## R version 3.4.2
#### PREPARE WORKSPACE ####
install.packages(c('devtools',
'shiny',
'shinythemes',
'shinyWidgets',
# 'ggplot2',
# 'tidyverse',
# 'readr',
'cowplot',
# 'lazyeval',
'rlang',
'civis',
# 'rsconnect',
'DT',
'data.table'
),
repos='https://cran.rstudio.com/')
#devtools::install_github("civisanalytics/civis_deckR")
library(ggplot2)
library(tidyverse)
library(readr)
library(cowplot)
#library(lazyeval)
library(rlang)
library(civis)
#library(civis.deckR)
library(shiny)
library(shinythemes)
library(shinyWidgets)
#library(plotly)
#library(viridis)
#library(rsconnect)
library(DT)
library(data.table)
library(stringr)
#### UI ####
ui <- fluidPage(
theme = shinythemes::shinytheme("lumen"),
# headerPanel("PILOT DEMO PROTOTYPE: Strata Cost Benchmarking"), # title of app; remove because there's a title on Platform
tabsetPanel(type = "tabs",
## -----------< 1. Create Benchmark >-----------
tabPanel("Create Benchmark",
fluidRow(
## -----------<< Column 1.1: Input Hospital and Benchmark Selections >>-----------
column(2,
# parameters for "Me"
h3("Hospital and APR-DRG to Benchmark"),
# select customer / hospital system
selectizeInput("customer_entity", "Select a customer and entity to benchmark:",
choices = c("Customer 1, Entity 1", "Customer 1, Entity 8",
"Customer 3, Entity 2", "Customer 3, Entity 3",
"Customer 4, Entity 5", "Customer 4, Entity 26",
"Customer 4, Entity 6", "Customer 5, Entity 6",
"Customer 6, Entity 1", "Customer 7, Entity 2",
"Customer 9, Entity 2", "Customer 11, Entity 1",
"Customer 12, Entity 1")),
# select APRDRG to benchmark (options change based off which customer is selected)
uiOutput("APRDRG_selector"),
h3(""),
# parameters for Baseline / hospitals to bencmark against
h3("Benchmark Hospitals"),
# select specific hospitals to compare against
uiOutput("benchmark_selector"),
# select hospital regions to compare against
selectizeInput("region", "Select region(s):",
choices = c(ALL = "", "South", "Midwest", "West"),
multiple = TRUE),
# select bedsizes to compare against
selectizeInput("size", "Select bedsize(s):",
choices = c(ALL = "", "less than 200", "200+"),
multiple = TRUE),
# select specialties to compare against
selectizeInput("specialty", "Select specialty(ies):",
choices = c(ALL = "", "Pediatric"),
multiple = TRUE),
selectizeInput("costmodel", "Select cost model(s):",
choices = c(ALL = "",
"Hospitals with Strata Standardized Cost Models" = "standard",
"Hospitals without Strata Standardized Cost Models" = "non"),
multiple = TRUE),
# h3(""),
# checkboxInput("group_individual", "Check this box to see the distribution of the Benchmark patient population/encounters broken down by the specific Customers/Entities.",
# value = FALSE),
# button to update data, plot, and tables
actionButton("hospital_refresh", "Compare Patient Populations")),
## -----------<< Column 1.2: Output Hospital and Benchmark Characteristics >>-----------
column(2,
h3("Hospital Characteristics"),
# output characteristics about the hospital selected as "Me"
htmlOutput("hospital_institution"), # hospital institution you're benchmarking
htmlOutput("hospital_region"), # hospital region (e.g. Midwest))
htmlOutput("hospital_size"), # hospital size (e.g. 200+ beds)
htmlOutput("hospital_specialty"), # specialty (e.g. Pediatric)
h3(""),
h3("Benchmark Characteristics"),
# output characteristics about the benchmark
htmlOutput("benchmark_institutions"), # institutions in the benchmark
htmlOutput("benchmark_region"), # benchmark region
htmlOutput("benchmark_size"), # benchmark size (e.g. 200+ beds)
htmlOutput("benchmark_specialty") # specialty (e.g. Pediatric)
),
## -----------<< Column 1.3: Highlighted Hospitals >>-----------
column(2,
h3("Highlighted Institutions:"),
strong("Select specific dots by dragging your cursor on the plot, and you can see which customer(s)/entity(ies) you've highlighted below."),
htmlOutput("plotbrush_output")),
## -----------<< Column 1.4: Distribution Plots >>-----------
column(6,
tabsetPanel(type = "tabs",
tabPanel("APR-DRG Codes", plotOutput("aprdrg_plot",
brush = brushOpts(id = "aprdrg_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("ROM", plotOutput("rom_plot",
brush = brushOpts(id = "rom_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("SOI", plotOutput("soi_plot",
brush = brushOpts(id = "soi_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("Patient Age", plotOutput("age_plot",
brush = brushOpts(id = "age_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("Patient Type", plotOutput("type_plot",
brush = brushOpts(id = "type_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("Patient Discharge Status", plotOutput("discharge_plot",
brush = brushOpts(id = "discharge_plotbrush", direction = "x"),
width = "100%", height = "800px"))
)
)
)
),
## -----------< 2. Cost Saving Opportunities >-----------
tabPanel("Cost Saving Opportunities -- APR-DRG Codes",
fluidRow(
# button to update plot
actionButton("view_opportunities", "View Cost Saving Opportunities")),
fluidRow(plotOutput("costsavings_plot", width = "100%", height = "800px"))),
## -----------< 3. View Benchmark >-----------
tabPanel("Cost Benchmark Drill-Down",
fluidRow(
## -----------<< Column 2.1: Benchmark and Cost Breakdowns >>-----------
column(2,
# breakdowns by benchmarking groups (changes y-axis)
h3("Benchmark Breakdowns"),
checkboxGroupInput("benchmarkbreakdowns", strong("Select variables to breakdown costs by:"),
choiceNames = c("Risk of Mortality (ROM)",
"Severity of Illness (SOI)",
"Patient Age Bucket",
"Patient Type",
"Patient Discharge Status"),
choiceValues = c("ROM",
"SOI",
"AgeBucket",
"PatientTypeRollup",
"DischargeStatusGroup")),
dropdownButton(tags$h3("Risk of Mortality (ROM) Grouping Options"),
selectizeInput(inputId = 'rom_1', label = 'Select the ROM categories for the first group:',
choices = c(1, 2, 3, 4), multiple = TRUE),
uiOutput("ROM_2"),
tags$h3("Severity of Illness (SOI) Grouping Options"),
selectizeInput(inputId = 'soi_1', label = 'Select the SOI categories for the first group:',
choices = c(1, 2, 3, 4), multiple = TRUE),
uiOutput("SOI_2"),
circle = TRUE, status = "default", icon = icon("arrow-circle-down"), width = "300px",
tooltip = tooltipOptions(title = "Options for Grouping Risk of Mortality and Severity of Illness")
),
# breakdowns by cost (changes faceting)
h3("Cost Breakdowns"),
checkboxGroupInput("costbreakdowns", strong("Select how to breakdown costs:"),
choiceNames = c("Fixed/Variable",
"Direct/Indirect",
"Cost Drivers"),
choiceValues = c("FixedVariable",
"DirectIndirect",
"CostDriver")),
# other options for displaying / breaking down data
h3("Other Options"),
checkboxInput("scale", "Change x-axis (costs) to log scale? (default is normal)",
value = FALSE)
),
## -----------<< Column 2.2: Data Filters >>-----------
column(3,
# options to remove / filter data
h3("Filter Data"),
selectizeInput("ROM", "Select Risk of Mortality (ROM) value(s):",
choices = c(ALL = "", "1", "2", "3", "4"),
multiple = TRUE),
selectizeInput("SOI", "Select Severity of Illness (SOI) value(s):",
choices = c(ALL = "", "1", "2", "3", "4"),
multiple = TRUE),
selectizeInput("age", "Select patient age(s):",
choices = c(ALL = "",
"Infant (less than 1 yr)" = "Infant",
"Toddler (13 mos - 23 mos)" = "Toddler",
"Early Childhood (2 yrs - 5 yrs)" = "Early Childhood",
"Middle Childhood (6 yrs - 11 yrs)" = "Middle Childhood",
"Adolescence (12 yrs - 17 yrs)" = "Adolescence",
"Adult (18 years or older)" = "Adult"),
multiple = TRUE),
selectizeInput("patienttype", "Select patient type(s):",
choices = c(ALL = "",
"Inpatient",
"Outpatient",
"Emergency"),
multiple = TRUE),
selectizeInput("dischargestatus", "Select patient discharge status(es):",
choices = c(ALL = "",
"Still a Patient",
"Discharged to home or other self care",
"Discharged to home health services",
"Left against medical advice (AMA)",
"Died",
"Transferred to other facility",
"Transferred to other short-term care facility",
"Transferred to intermediate care facility",
"Not Specified"),
multiple = TRUE),
selectizeInput("costs", "Select cost(s):",
choices = list(ALL = "",
`Cost Types` = c("Fixed",
"Variable",
"Direct",
"Indirect"),
`Cost Drivers` = c("Dialysis",
"Excluded",
"Imaging",
"Laboratory",
"LOS",
"OR Time",
"Other Diagnostic Services",
"Pharmacy",
"Supplies",
"Blood",
"Therapeutic Services",
"Cardiovascular")),
multiple = TRUE),
selectizeInput("qltyincidents", "Select whether to keep/remove hospital-caused quality incidents:",
choices = c(BOTH = "",
"Only Encounters without Hospital-Caused Quality Incidents" = "Remove",
"Only Encounters with Hospital-Caused Quality Incidents" = "Keep"),
multiple = TRUE),
# option to remove data
checkboxGroupInput("otherfilteroptions", strong("Other data filters:"),
choiceNames = c("Remove Cost Outliers (based off interquartile range (IQR))",
"Remove Cost Outliers (based off standard deviation (sd))",
"Remove Length of Stay Outliers (based off interquartile range (IQR))",
"Remove Length of Stay Outliers (based off standard deviation (sd))"
),
choiceValues = c("cost_IQR",
"cost_SD",
"LOS_IQR",
"LOS_SD")),
# button to update data, plot, and tables
actionButton("refresh", "Update")
),
## -----------<< Column 2.3: Output >>-----------
column(7,
"Select benchmarking parameters and hit the 'UPDATE' button at the bottom right to generate benchmarks.",
tabsetPanel(type = "tabs",
# tab with the plot
tabPanel("Plot", plotOutput("plot", width = "100%", height = "800px")),
# tab with data tables
tabPanel("Tables",
# baseline data / data for other hospitals
h4(strong("Baseline")),
dataTableOutput("summary_df_benchmark"),
# me data / data for hospital being benchmarked
h4(strong("Me")),
dataTableOutput("summary_df_me"),
# comparison data
h4(strong("Difference")),
dataTableOutput("compare_df"))
)
)
)
)
)
)
#### SERVER ####
server <- function(input, output, session){
## -----------< Load Helper Functions and Data >-----------
source("StrataFunctions.R", local = TRUE)
#source("/Users/cwang/Desktop/Strata/StrataPIlotPrototype/StrataFunctions.R")
# read in tables from S3 (deprecated; files have now been written to Platform)
full <- read_civis(x = 10051504)
hospital_info <- read_civis(x = 10051505)
# full <- read_civis(x = "public.full", database = "Strata Decision Technologies", verbose = TRUE)
# names(full) <- c("Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "APRDRGCODE", "customer_entity",
# "IsStrataStandardCost", "EncounterID", "ROM", "SOI", "AgeBucket", "PatientTypeRollup", "DischargeStatusGroup",
# "CostDriver", "HospitalAcqCondition", "LengthOfStay", "CostKey", "Costs")
#
# hospital_info <- read_civis(x = "public.hospital_info", database = "Strata Decision Technologies", verbose = TRUE)
# names(hospital_info) <- c("CustomerID", "EntityID", "Beds", "City", "State", "Region", "Sub_Region", "Bedsize_Bucket",
# "IsStrataStandardCost", "EntityID_fixed", "Beds_fixed", "Specialty", "customer_entity")
## -----------< UI Inputs and Outputs >-----------
## Dependent UI Inputs
# APR-DRG Code -- input options change based off which Customer & Entity are selected
output$APRDRG_selector = renderUI({
selectizeInput(inputId = "APRDRG", "Select an APR-DRG to benchmark:",
choices = labelAPRDRG(unique(full$APRDRGCODE[full$CustomerID == hospital_info$CustomerID[hospital_info$customer_entity == input$customer_entity] &
full$EntityID == hospital_info$EntityID_fixed[hospital_info$customer_entity == input$customer_entity]])))
})
# Customer ID and Entity ID
output$benchmark_selector = renderUI({
selectizeInput(inputId = "customer_entity_benchmark", "Select customer(s) and entity(ies) to benchmark against:",
choices = c(ALL = "", hospital_info$customer_entity[hospital_info$customer_entity != input$customer_entity]),
multiple = TRUE)
})
# ROM_2 -- input options change based off groups for rom_1
output$ROM_2 = renderUI({
selectizeInput(inputId = "rom_2", "Select the ROM categories for the second group:",
choices = setdiff(c(1, 2, 3, 4), input$rom_1),
multiple = TRUE)
})
# SOI_2 -- input options change based off groups for soi_1
output$SOI_2 = renderUI({
selectizeInput(inputId = "soi_2", "Select the SOI categories for the second group:",
choices = setdiff(c(1, 2, 3, 4), input$soi_1),
multiple = TRUE)
})
## UI output hospital information
# Hospital Institution -- outputs the institution you're benchmarking
output$hospital_institution = renderText({
paste("<b>Hospital Institution:</b><br/>",
input$customer_entity)
})
# Region -- outputs the region of the Customer & Entity selected
output$hospital_region = renderText({
paste("<b>Hospital Region:</b><br/>",
hospital_info$Region[hospital_info$customer_entity == input$customer_entity])
})
# Size -- outputs the bedsize of the Customer & Entity selected
output$hospital_size = renderText({
paste("<b>Hospital Bed Size:</b><br/>",
hospital_info$Beds_fixed[hospital_info$customer_entity == input$customer_entity])
})
# Specialty -- outputs the specialty of the Customer & Entity selected (e.g. Pediatric)
output$hospital_specialty = renderText({
paste("<b>Hospital Specialty:</b><br/>",
hospital_info$Specialty[hospital_info$customer_entity == input$customer_entity])
})
## UI output hospital benchmark information
# Customer and Entity -- outputs the Customers(s) and Entity(ies) that make up the benchmark
output$benchmark_institutions = renderText({
if(length(input$costmodel) == 1 & "standard" %in% input$costmodel){
df <- hospital_info %>%
filter(IsStrataStandardCost == TRUE)
paste("<b>Benchmark Institution(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$region) & is.null(input$size) & is.null(input$specialty),
# if no inputs, then take all the hospitals that aren't the one selected
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity])), collapse = "<br/>"),
ifelse(is.null(input$region) & is.null(input$size) & is.null(input$specialty),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& df$customer_entity %in% input$customer_entity_benchmark])), collapse = "<br/>"),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& (df$customer_entity %in% input$customer_entity_benchmark
| ((df$Region %in% input$region | is.null(input$region))
& (df$Beds_fixed %in% input$size | is.null(input$size))
& (df$Specialty %in% input$specialty | is.null(input$specialty))))])),
collapse = "<br/>")
)))
}
else if(length(input$costmodel) == 1 & "non" %in% input$costmodel){
df <- hospital_info %>%
filter(IsStrataStandardCost == FALSE)
paste("<b>Benchmark Institution(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$region) & is.null(input$size) & is.null(input$specialty),
# if no inputs, then take all the hospitals that aren't the one selected
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity])), collapse = "<br/>"),
ifelse(is.null(input$region) & is.null(input$size) & is.null(input$specialty),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& df$customer_entity %in% input$customer_entity_benchmark])), collapse = "<br/>"),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& (df$customer_entity %in% input$customer_entity_benchmark
| ((df$Region %in% input$region | is.null(input$region))
& (df$Beds_fixed %in% input$size | is.null(input$size))
& (df$Specialty %in% input$specialty | is.null(input$specialty))))])),
collapse = "<br/>")
)))
}
else {
df <- hospital_info
paste("<b>Benchmark Institution(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$region) & is.null(input$size) & is.null(input$specialty),
# if no inputs, then take all the hospitals that aren't the one selected
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity])), collapse = "<br/>"),
ifelse(is.null(input$region) & is.null(input$size) & is.null(input$specialty),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& df$customer_entity %in% input$customer_entity_benchmark])), collapse = "<br/>"),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& (df$customer_entity %in% input$customer_entity_benchmark
| ((df$Region %in% input$region | is.null(input$region))
& (df$Beds_fixed %in% input$size | is.null(input$size))
& (df$Specialty %in% input$specialty | is.null(input$specialty))))])),
collapse = "<br/>")
)))
}
})
# Region -- outputs the region of the Customer(s) & Entity(ies) selected
output$benchmark_region = renderText({
paste("<b>Benchmark Region(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$region),
paste(as.vector(unique(hospital_info$Region[hospital_info$customer_entity != input$customer_entity])), collapse = ", "),
paste(as.vector(unique(hospital_info$Region[hospital_info$customer_entity %in% input$customer_entity_benchmark | hospital_info$Region %in% input$region])), collapse = ", ")))
})
# Size -- outputs the bedsize of the Customer(s) & Entity(ies) selected
output$benchmark_size = renderText({
paste("<b>Benchmark Bed Size(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$size),
paste(as.vector(unique(hospital_info$Beds_fixed[hospital_info$customer_entity != input$customer_entity])), collapse = ", "),
paste(as.vector(unique(hospital_info$Beds_fixed[hospital_info$customer_entity %in% input$customer_entity_benchmark | hospital_info$Beds_fixed %in% input$size])), collapse = ", ")))
})
# Specialty -- outputs the specialty of the Customer(s) & Entity(ies) selected (e.g. Pediatric)
output$benchmark_specialty = renderText({
paste("<b>Benchmark Specialty(ies):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$specialty),
paste(as.vector(unique(hospital_info$Specialty[hospital_info$customer_entity != input$customer_entity])), collapse = ", "),
paste(as.vector(unique(hospital_info$Specialty[hospital_info$customer_entity %in% input$customer_entity_benchmark | hospital_info$Specialty %in% input$specialty])), collapse = ", ")))
})
# Cost Model -- outputs the cost models of the hospitals
output$benchmark_specialty = renderText({
if(length(input$costmodel) == 1 & "standard" %in% input$costmodel){
out <- c("Strata Standard Cost Model")
}
else if(length(input$costmodel) == 1 & "non" %in% input$costmodel){
out <- c("Not Strata Standard Cost Model")
}
else {
out <- c("Strata Standard Cost Model", "Not Strata Standard Cost Model")
}
paste("<b>Benchmark Cost Model(s):</b><br/>",
paste(out, collapse = ", "))
})
## -----------< Data Munging >-----------
## -----------<< hospital_df >>-----------
hospital_df <- eventReactive(input$hospital_refresh | input$view_opportunities, {
hospital_df <- full
## APRDRG code filter
hospital_df$m1 <- ifelse(hospital_df$APRDRGCODE == input$APRDRG, TRUE, FALSE)
## "me" / hospital filter
hospital_df$h1 <- ifelse(hospital_df$customer_entity == input$customer_entity, TRUE, FALSE) # filter for input Customer ID and Entity ID
## hospital comparison filters
if(!is.null(input$region)){
hospital_df$c1 <- ifelse(hospital_df$Region %in% input$region, TRUE, FALSE) # filter for hospital region
} else {
hospital_df$c1 <- TRUE
}
if(!is.null(input$size)){
hospital_df$c2 <- ifelse(hospital_df$Beds_fixed %in% input$size, TRUE, FALSE) # filter for hospital size
} else {
hospital_df$c2 <- TRUE
}
if(!is.null(input$specialty)){
hospital_df$c3 <- ifelse(hospital_df$Specialty %in% input$specialty, TRUE, FALSE) # filter for hospital specialty
} else {
hospital_df$c3 <- TRUE
}
# filter for specific hospital inputs
if(!is.null(input$customer_entity_benchmark)){
hospital_df$c4 <- ifelse(hospital_df$customer_entity %in% input$customer_entity_benchmark, TRUE, FALSE)
} else {
hospital_df$c4 <- TRUE
}
# if only select one of the two options for input costmodel, then it's standard or non-standard
if(length(input$costmodel) == 1){
if("standard" %in% input$costmodel){
hospital_df$c_costmodel <- ifelse(hospital_df$IsStrataStandardCost == "TRUE", TRUE, FALSE)
} else if("non" %in% input$costmodel){
hospital_df$c_costmodel <- ifelse(hospital_df$IsStrataStandardCost == "FALSE", TRUE, FALSE)
}
}
# if select none or both of the two options for input cost model, then it's all of them
else {
hospital_df$c_costmodel <- TRUE
}
# master hospital benchmark filter
# if only input customers/entities to benchmark against, only use that column to filter
# all of them need to meet the hospital_df$c_costmodel condition
if(all(is.null(input$region), is.null(input$size), is.null(input$specialty)) & !is.null(input$customer_entity_benchmark)){
hospital_df$c5 <- ifelse(hospital_df$c4, TRUE, FALSE)
}
# if input region/size/specialty filters, but not customer entity filters, then only use those filters
else if(any(!is.null(input$region), !is.null(input$size), !is.null(input$specialty)) & is.null(input$customer_entity_benchmark)){
hospital_df$c5 <- ifelse(hospital_df$c1 & hospital_df$c2 & hospital_df$c3, TRUE, FALSE)
}
# if input region/size/specialty filters and customer entity filters, then
else if(any(!is.null(input$region), !is.null(input$size), !is.null(input$specialty)) & !is.null(input$customer_entity_benchmark)){
hospital_df$c5 <- ifelse((hospital_df$c1 & hospital_df$c2 & hospital_df$c3) | hospital_df$c4, TRUE, FALSE)
}
# if none selected; then else
else {
hospital_df$c5 <- TRUE
}
# filter for only hospital to benchmark & benchmark hospitals
hospital_df <- hospital_df %>%
filter(h1 | (c5 & c_costmodel)) %>%
mutate("Group" = ifelse(h1, "Me", "Baseline"),
"APRDRG_benchmark" = ifelse(m1, APRDRGCODE, NA)) %>%
group_by(Region, Beds_fixed, Specialty, customer_entity, APRDRGCODE, EncounterID, ROM, SOI, AgeBucket, PatientTypeRollup, DischargeStatusGroup,
Group, APRDRG_benchmark) %>%
summarise("Count" = 1,
"Costs" = sum(Costs)) %>% ungroup()
return(hospital_df)
})
## -----------<< main_df >>-----------
# Encounter-level dataframe with benchmark grouping columns and cost grouping columns as well as columns with cost information;
# the code below filters the full dataframe of all cost data, based off user inputs about how to filter the data
# the data is also labeled as "Me" or "Baseline" to indicate which costs go towards the benchmark, and which go to the hospital of interest
main_df <- eventReactive(input$refresh, {
## grab full dataframe of customer data from global environment; summarised at the most granular level of grouping
main_df <- full
## APRDRG code filter
main_df$m1 <- ifelse(main_df$APRDRGCODE == input$APRDRG, TRUE, FALSE)
## "me" / hospital filter
main_df$h1 <- ifelse(main_df$customer_entity == input$customer_entity, TRUE, FALSE) # filter for input Customer ID and Entity ID
## hospital comparison filters
if(!is.null(input$region)){
main_df$c1 <- ifelse(main_df$Region %in% input$region, TRUE, FALSE) # filter for hospital region
} else {
main_df$c1 <- TRUE
}
if(!is.null(input$size)){
main_df$c2 <- ifelse(main_df$Beds_fixed %in% input$size, TRUE, FALSE) # filter for hospital size
} else {
main_df$c2 <- TRUE
}
if(!is.null(input$specialty)){
main_df$c3 <- ifelse(main_df$Specialty %in% input$specialty, TRUE, FALSE) # filter for hospital specialty
} else {
main_df$c3 <- TRUE
}
# filter for specific hospital inputs
if(!is.null(input$customer_entity_benchmark)){
main_df$c4 <- ifelse(main_df$customer_entity %in% input$customer_entity_benchmark, TRUE, FALSE)
} else {
main_df$c4 <- TRUE
}
# if only select one of the two options for input costmodel, then it's standard or non-standard
if(length(input$costmodel) == 1){
if("standard" %in% input$costmodel){
main_df$c_costmodel <- ifelse(main_df$IsStrataStandardCost == "TRUE", TRUE, FALSE)
} else if("non" %in% input$costmodel){
main_df$c_costmodel <- ifelse(main_df$IsStrataStandardCost == "FALSE", TRUE, FALSE)
}
}
# if select none or both of the two options for input cost model, then it's all of them
else {
main_df$c_costmodel <- TRUE
}
# master hospital benchmark filter
# if only input customers/entities to benchmark against, only use that column to filter
if(all(is.null(input$region), is.null(input$size), is.null(input$specialty)) & !is.null(input$customer_entity_benchmark)){
main_df$c5 <- ifelse(main_df$c4, TRUE, FALSE)
}
# if input region/size/specialty filters, but not customer entity filters, then only use those filters
else if(any(!is.null(input$region), !is.null(input$size), !is.null(input$specialty)) & is.null(input$customer_entity_benchmark)){
main_df$c5 <- ifelse(main_df$c1 & main_df$c2 & main_df$c3, TRUE, FALSE)
}
# if input region/size/specialty filters and customer entity filters, then
else if(any(!is.null(input$region), !is.null(input$size), !is.null(input$specialty)) & !is.null(input$customer_entity_benchmark)){
main_df$c5 <- ifelse((main_df$c1 & main_df$c2 & main_df$c3) | main_df$c4, TRUE, FALSE)
}
# if none selected; then else
else {
main_df$c5 <- TRUE
}
## benchmark filters
if(!is.null(input$ROM)){
main_df$m2 <- ifelse(main_df$ROM %in% input$ROM, TRUE, FALSE) # filter ROM
} else {
main_df$m2 <- TRUE
}
if(!is.null(input$SOI)){
main_df$m3 <- ifelse(main_df$SOI %in% input$SOI, TRUE, FALSE) # filter SOI
} else {
main_df$m3 <- TRUE
}
if(!is.null(input$age)){
main_df$m4 <- ifelse(main_df$AgeBucket %in% input$age, TRUE, FALSE) # filter patient age buckets
} else {
main_df$m4 <- TRUE
}
if(!is.null(input$patienttype)){
main_df$m5 <- ifelse(main_df$PatientTypeRollup %in% input$patienttype, TRUE, FALSE) # filter patient types
} else {
main_df$m5 <- TRUE
}
if(!is.null(input$dischargestatus)){
main_df$m6 <- ifelse(main_df$DischargeStatusGroup %in% input$dischargestatus, TRUE, FALSE) # filter patient discharge statuses
} else {
main_df$m6 <- TRUE
}
## cost filters
if(length(input$costs) > 0){
main_df$temp1 <- ifelse(main_df$FixedVariable %in% input$costs, 1, 0) # if filtering Fixed/Variable costs, mark with 1
main_df$temp2 <- ifelse(main_df$DirectIndirect %in% input$costs, 1, 0) # if filtering Direct/Indirect costs, mark with 1
main_df$temp3 <- ifelse(main_df$CostDriver %in% input$costs, 1, 0) # if filtering CostDrivers, mark with 1
main_df$temp_all <- main_df$temp1 + main_df$temp2 + main_df$temp3 # create column with sum of all cost filters (min 0, max 3)
if(max(main_df$temp_all) == 1){
main_df$m7<- ifelse(main_df$temp_all == 1, TRUE, FALSE) # filtering by 1 of the 3 cost column filters
}
if(max(main_df$temp_all) == 2){
main_df$m7 <- ifelse(main_df$temp_all == 2, TRUE, FALSE) # filtering by 2 of the 3 cost column filters
}
if(max(main_df$temp_all) == 3){
main_df$m7 <- ifelse(main_df$temp_all == 3, TRUE, FALSE) # filtering by 3 of the 3 cost column filters
}
} else {
main_df$m7 <- TRUE
}
## hospital-acquired quality incident filters
# if only one quality incident filter selected
if(length(input$qltyincidents) == 1){
# if only have "Only Keep Encounters without Hospital-Acquired Quality Incidents"
if(min(input$qltyincidents) == "Remove"){
main_df$m8 <- ifelse(main_df$HospitalAcqCondition == "0", TRUE, FALSE) # filter out hospital-acquired conditions/hospital-caused quality incidents
}
# if only have "Only Keep Encounters with Hospital-Acquired Quality Incidents"
else if(min(input$qltyincidents) == "Keep"){
main_df$m8 <- ifelse(main_df$HospitalAcqCondition == "1", TRUE, FALSE) # filter out NON hospital-acquired conditions/hospital-caused quality incidents
}
}
# if both selected, or neither selected, keep both
else if(is.null(input$qltyincidents) | length(input$qltyincidents) == 2){
main_df$m8 <- TRUE
}
## set conditions for filtering
hospital_conditions <- c((main_df$c5 & main_df$c_costmodel) | main_df$h1) # filters for "Me" and "Baseline"
filter_conditions <- c(main_df$m1 & main_df$m2 & main_df$m3 & main_df$m4 & main_df$m5 & main_df$m6 & main_df$m7 & main_df$m8) # parameter filters
## filter data frame
main_df <- main_df[hospital_conditions & filter_conditions,] %>%
mutate(Group = factor(ifelse(h1, "Me", "Baseline"), # create variable to facet Baseline vs. Hospital to Compare (Me)
levels = c("Baseline", "Me"),
ordered = TRUE),
Name = "Benchmark")
## only check that both ROM groupings are filled in if one of them is specified
if(!is.null(input$rom_1) | !is.null(input$rom_2)){
validate(
need(all(c(input$rom_1, input$rom_2) %in% unique(main_df$ROM)), "You're missing some of the Risk of Mortality (ROM) values you're grouping by. You probably filtered them out. Please add them back in.")
)
# check to see that the ROM values that we're custom grouping by haven't been filtered out
if("ROM" %in% input$benchmarkbreakdowns){
validate(
need(!is.null(input$rom_1) & !is.null(input$rom_2), "You've custom selected Risk of Mortality (ROM) values for only one group. Please select values for both groups.")
)
}
}
## only check that both SOI groupings are filled in if one of them is specified
if(!is.null(input$soi_1) | !is.null(input$soi_2)){
validate(
need(all(c(input$soi_1, input$soi_2) %in% unique(main_df$SOI)), "You're missing some of the Severity of Illness (SOI) values you're grouping by. You probably filtered them out. Please add them back in.")
)
# check to see that the SOI values that we're custom grouping by haven't been filtered out
if("SOI" %in% input$benchmarkbreakdowns){
validate(
need(!is.null(input$soi_1) & !is.null(input$soi_2), "You've custom selected Severity of Illness (SOI) values for only one group. Please select values for both groups.")
)
}
}
if("ROM" %in% input$benchmarkbreakdowns){
main_df <- main_df %>%
# if have custom groupings for ROM, create column with custom grouping for ROM Group 1
mutate(ROM_group1 = ifelse(!is.null(input$rom_1) & ROM %in% input$rom_1,
paste0("ROM ", paste0(input$rom_1, collapse = "/")), # e.g. "ROM 1/2"
NA),
# if have custom groupings for ROM, create column with custom grouping for ROM Group 2
ROM_group2 = ifelse(!is.null(input$rom_2) & ROM %in% input$rom_2,
paste0("ROM ", paste0(input$rom_2, collapse = "/")), # e.g. "ROM
NA),
# coalesce custom groupings into one column
ROM_group = coalesce(ROM_group1, ROM_group2))
}
if("SOI" %in% input$benchmarkbreakdowns){
main_df <- main_df %>%
# if have custom groupings for SOI, create column with custom groupings for SOI Group 1
mutate(SOI_group1 = ifelse(!is.null(input$soi_1) & SOI %in% input$soi_1,
paste0("SOI ", paste0(input$soi_1, collapse = "/")),
NA),
# if have custom groupings for SOI, create column with custom groupings for SOI Group 2
SOI_group2 = ifelse(!is.null(input$soi_2) & SOI %in% input$soi_2,
paste0("SOI ", paste0(input$soi_2, collapse = "/")),
NA),
# coalesce custom groupings into one column
SOI_group = coalesce(SOI_group1, SOI_group2))
}
## check to see if there's still data after filtering
validate(
need(nrow(main_df) > 0, "There data has zero rows due to filtering. Please adjust your filters.")
)
## add column for benchmark breakdowns; if multiple benchmark breakdowns selected, concatenate columns with "&" in between columns
if(!is.null(input$benchmarkbreakdowns)){
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
# coalesce all of the grouping column values into one column for axis naming purposes
main_df <- tidyr::unite_(main_df, "BenchmarkGrouping", keep_benchmarkgroups, sep = " & ", remove = FALSE)
} else {
main_df$BenchmarkGrouping <- main_df$APRDRGCODE # if no breakdowns selected, use APRDRGCODE as default y-axis
}
## add column for cost breakdowns; if multiple cost breakdowns selected, concatenate columns with "&" in between columns
if(!is.null(input$costbreakdowns)){
main_df <- tidyr::unite_(main_df, "CostGrouping", input$costbreakdowns, sep = " & ", remove = FALSE)
} else {
main_df$CostGrouping <- NA # if no cost breakdowns selected, default is NA
}
## group data based off benchmark breakdowns and cost breakdowns
# if inputs for both benchmark and cost breakdowns
if(!is.null(input$benchmarkbreakdowns) & !is.null(input$costbreakdowns)){
groupings <- c("Name", "Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "Group", "APRDRGCODE", "LengthOfStay",
"CostGrouping", "BenchmarkGrouping", "EncounterID", keep_benchmarkgroups, input$costbreakdowns)
outlier_groupings <- c("Name", "Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", keep_benchmarkgroups, input$costbreakdowns)
}
# if inputs for only benchmark breakdowns
if(!is.null(input$benchmarkbreakdowns) & is.null(input$costbreakdowns)){
groupings <- c("Name", "Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "Group", "APRDRGCODE", "LengthOfStay",
"CostGrouping", "BenchmarkGrouping", "EncounterID", keep_benchmarkgroups)
outlier_groupings <- c("Name", "Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", keep_benchmarkgroups)
}
# if inputs for only cost breakdowns
if(!is.null(input$costbreakdowns) & is.null(input$benchmarkbreakdowns)){
groupings <- c("Name", "Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "Group", "APRDRGCODE", "LengthOfStay",
"CostGrouping", "BenchmarkGrouping", "EncounterID", input$costbreakdowns)
outlier_groupings <- c("Name", "Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", input$costbreakdowns)
}
# if no inputs for both benchmark and cost breakdowns
if(is.null(input$costbreakdowns) & is.null(input$benchmarkbreakdowns)){
groupings <- c("Name", "Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "Group", "APRDRGCODE", "LengthOfStay",
"CostGrouping", "BenchmarkGrouping", "EncounterID")
outlier_groupings <- c("Name", "Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping")
}
## group by input groupings and re-calculate so that data is at the most granular grouping specified by the user
# if no grouping parameters specified (e.g. no benchmark or cost breakdowns), most granular level is Encounter level
main_df <- main_df %>%
group_by(.dots = groupings) %>%
summarise(Costs = sum(Costs)) %>% ungroup()
## remove length of stay outliers if selected
if(TRUE %in% (grepl("LOS", input$otherfilteroptions))){
# grab current column names of main df; will use these later to select original columns
# (to avoid duplicate column name issues if user selects to remove both LOS and cost outliers)
save <- colnames(main_df)
# calculate LOS summary statistics and outlier cutoffs based off IQR and standard deviation
LOS_filters <- main_df %>%
calcSummary(df = ., summary_var = "LengthOfStay", outlier_threshold = 2, grouping_vars = outlier_groupings)
# join summary statistics and outlier cutoffs to main df
main_df <- main_df %>%
left_join(LOS_filters, by = outlier_groupings)
# remove LOS IQR outliers if selected
if("LOS_IQR" %in% input$otherfilteroptions){
main_df$o1_los <- case_when(
main_df$obs == 1 ~ TRUE, # if only one observation, keep (can't be an outlier if you're solo)
main_df$LengthOfStay > main_df$IQR_outlier_high | main_df$LengthOfStay < main_df$IQR_outlier_low ~ FALSE, # IQR outliers
TRUE ~ TRUE) # keep non-outliers
}
else {
main_df$o1_los <- TRUE
}
# remove LOS standard deviation outliers if selected
if("LOS_SD" %in% input$otherfilteroptions){
main_df$o2_los <- case_when(
main_df$obs == 1 ~ TRUE, # if only one observation, keep (can't be an outlier if you're solo)
main_df$LengthOfStay > main_df$IQR_outlier_high | main_df$LengthOfStay < main_df$IQR_outlier_low ~ FALSE, # IQR outliers
TRUE ~ TRUE) # keep non-outliers
}
else {
main_df$o2_los <- TRUE
}
# remove LOS outliers
main_df <- main_df[c(main_df$o1_los & main_df$o2_los), save]
}
## remove cost outliers if selected
if(TRUE %in% (grepl("cost", input$otherfilteroptions))){
# grab current column names of main df; will use these later to select original columns
# (to avoid duplicate column name issues if user selects to remove both LOS and cost outliers)
save <- colnames(main_df)
# calculate LOS summary statistics and outlier cutoffs based off IQR and standard deviation
cost_filters <- main_df %>%
calcSummary(df = ., summary_var = "Costs", outlier_threshold = 2, grouping_vars = outlier_groupings)
# join summary statistics and outlier cutoffs to main df
main_df <- main_df %>%
left_join(cost_filters, by = outlier_groupings)
# remove cost IQR outliers if selected
if("cost_IQR" %in% input$otherfilteroptions){
main_df$o1_cost <- case_when(
main_df$obs == 1 ~ TRUE, # if only one observation, keep (can't be an outlier if you're solo)
main_df$Costs > main_df$IQR_outlier_high | main_df$Costs < main_df$IQR_outlier_low ~ FALSE, # IQR outliers
TRUE ~ TRUE) # keep non-outliers
}
else {
main_df$o1_cost <- TRUE
}
# remove cost standard deviation outliers if selected
if("cost_SD" %in% input$otherfilteroptions){
main_df$o2_cost <- case_when(
main_df$obs == 1 ~ TRUE, # if only one observation, keep (can't be an outlier if you're solo)
main_df$Costs > main_df$sd_outlier_high | main_df$Costs < main_df$sd_outlier_low ~ FALSE, # standard deviation outliers
TRUE ~ TRUE) # keep non-outliers
}
else {
main_df$o2_cost <- TRUE
}
# remove cost outliers
main_df <- main_df[c(main_df$o1_cost & main_df$o2_cost), save]
}
## check to see if there's still data after filtering for outliers
validate(
need(nrow(main_df > 0), "The data has zero rows due to outlier filtering. Please adjust your filters.")
)
return(main_df)
})
## -----------<< summary_df_benchmark >>-----------
# data frame with summary statistics for all the baseline hospitals
# this data frame is used to create the labels for the boxplots, as well as the data tables
summary_df_benchmark <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
groups <- c("Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", keep_benchmarkgroups, input$costbreakdowns)
summary_df_benchmark <- main_df() %>%
filter(Group == "Baseline") %>%
calcSummary(df = ., summary_var = "Costs", outlier_threshold = 2, grouping_vars = groups)
## check to see there's still data to benchmark against after filtering main_df for just the baseline data
validate(
need(nrow(summary_df_benchmark) > 0, "There is no baseline data due to filtering (i.e. there is no data for the 'Baseline'). Please adjust your data filters.")
)
return(summary_df_benchmark)
})
## -----------<< summary_df_me >>-----------
# data frame with summary statistics for the hospital of interest
# this data frame is used to create the labels for the boxplots, as well as the data tables
summary_df_me <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
groups <- c("Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", keep_benchmarkgroups, input$costbreakdowns)
summary_df_me <- main_df() %>%
filter(Group == "Me") %>%
calcSummary(df = ., summary_var = "Costs", outlier_threshold = 2, grouping_vars = groups)
## check to see there's still data to benchmark after filtering main_df for just the "Me" data
validate(
need(nrow(summary_df_me) > 0, "There is no data to benchmark due to filtering (i.e. there is no data for 'Me'). Please adjust your data filters.")
)
return(summary_df_me)
})
## -----------<< compare_df >>-----------
# data frame with the summary information for "Me" and the "Baseline" next to each other in order to calculate differences
# used to create labels for the difference barplots, as well as the comparison data table
compare_df <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
groups <- c("APRDRGCODE", "BenchmarkGrouping", "CostGrouping", keep_benchmarkgroups, input$costbreakdowns)
# grab summary df of "Me"
me <- summary_df_me()
# append "_ME" to end of summary column names (excluding join keys (i.e. groups))
colnames(me)[!(colnames(me) %in% groups)] <- paste0(colnames(me)[!(colnames(me) %in% groups)], "_ME")
# full join the summary df of all the benchmark hospitals, and the summary df of "Me"
# use the groupings as join keys
compare_out <- summary_df_benchmark() %>%
full_join(me, by = groups)
compare_out <- compare_out %>%
mutate(diff_median = round(median_ME - median, 2), # difference in medians b/w "Me" and benchmark
diff_mean = round(mean_ME - mean, 2), # difference in mean b/w "Me" and benchmark
# percent difference in median b/w "Me" and benchmark
proport_diff_median = ifelse(is.infinite(diff_median/median_ME),
round(coalesce(diff_median / 1, 0), 2), # if division by zero, divide by 1 instead
round(coalesce(diff_median / median_ME, 0), 2)),
# percent difference in mean b/w "Me" and benchmark
proport_diff_mean = ifelse(is.infinite(diff_mean/mean_ME),
round(coalesce(diff_mean / 1, 0), 2), # if division by zero, divide by 1 instead
round(coalesce(diff_mean / mean_ME, 0), 2)),
Difference = "Difference", # column to indicate this is the "difference" data frame; used for faceting
empty_flag = ifelse(is.na(min) | is.na(min_ME), 1, 0)) # flag for if missing data
return(compare_out)
})
## -----------< Reactive Plotting >-----------
## -----------<< Patient/Benchmark Comparison Plots >>-----------
## -----------<<< APR-DRG Code Volume Distribution >>>-----------
aprdrg_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
group_by(Group, APRDRGCODE, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(APRDRGCODE = str_wrap(labelAPRDRG(APRDRGCODE, values = TRUE), width = 20))
aprdrg_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("APRDRGCODE") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(aprdrg_plot)
})
## -----------<<< SOI Distribution >>>-----------
soi_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, SOI, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
soi_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("SOI") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(soi_plot)
})
## -----------<<< ROM Distribution >>>-----------
rom_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, ROM, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
rom_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("ROM") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(rom_plot)
})
## -----------<<< Patient Age Distribution >>>-----------
age_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, AgeBucket, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(AgeBucket = case_when(AgeBucket == "Infant" ~ "Infant (less than 1 yr)",
AgeBucket == "Toddler" ~ "Toddler (13 mos - 23 mos)",
AgeBucket == "Early Childhood" ~ "Early Childhood (2 yrs - 5 yrs)",
AgeBucket == "Middle Childhood" ~ "Middle Childhood (6 yrs - 11 yrs)",
AgeBucket == "Adolescence" ~ "Adolescence (12 yrs - 17 yrs)",
AgeBucket == "Adult" ~ "Adult (18 years or older)"))
age_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("AgeBucket") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(age_plot)
})
## -----------<<< Patient Type Distribution >>>-----------
type_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, PatientTypeRollup, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
type_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("PatientTypeRollup") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(type_plot)
})
## -----------<<< Patient Discharge Status Distribution >>>-----------
discharge_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, DischargeStatusGroup, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(DischargeStatusGroup = ifelse(DischargeStatusGroup == "Still a Patient", "Inhouse", DischargeStatusGroup))
discharge_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("DischargeStatusGroup") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(discharge_plot)
})
## -----------<< Benchmark Plot >>-----------
plot <- eventReactive(input$refresh, {
## grab all reactive data frames
main_df <- main_df()
benchmark <- summary_df_benchmark()
me <- summary_df_me()
comparison <- compare_df()
## stack together "Baseline" summary df and "Me" summary df for boxplot labels
all <- union_all(benchmark, me)
## -----------<<< Set Plotting Parameters >>>-----------
# if no cost grouping, don't facet
if(is.null(input$costbreakdowns)){
facet_group <- as.formula(".~Name") # faceting for "gg"
facet_diff_group <- as.formula(".~Difference") # faceting for "diff"
}
# if there is cost grouping, facet by cost grouping
else {
facet_group <- as.formula("CostGrouping~Name") # faceting for "gg
facet_diff_group <- as.formula("CostGrouping~Difference") # faceting for "diff"
}
# if no benchmark grouping, set axis name as "APR-DRG Code" for default
if(is.null(input$benchmarkbreakdowns)){
axis_name <- "APR-DRG Code"
}
# if benchmark grouping, set axis name as combo of all the grouping column names
else {
axis_name <- paste0(input$benchmarkbreakdowns, collapse = " & ")
}
## -----------<<< gg -- "Baseline" vs. "Me" plot >>>-----------
gg <- ggplot(main_df) +
geom_boxplot(aes(x = BenchmarkGrouping, y = Costs, color = Group), position = "dodge") +
geom_text(data = all,
aes(x = BenchmarkGrouping, y = median, label = paste0("$", scales::comma(median)), group = Group,
hjust = -0.2, vjust = -0.5,
fontface = "bold"),
position = position_dodge(width = 0.75), size = 5) +
coord_flip() +
facet_grid(facet_group) +
scale_x_discrete(name = axis_name) +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
# lines that separate different groupings
# remove first value in sequence (0.5) because don't want one between panel border and first plot
geom_vline(xintercept = seq(from = 0.5, to = length(unique(comparison[["BenchmarkGrouping"]])) -0.5, by = 1)[-1],
color = "black") +
theme_bw() +
theme(plot.title = element_text(size = 18, face = "bold"),
panel.background = element_rect(fill = "white"),
panel.grid.minor = element_line(color = "lightgray"),
strip.background = element_blank(),
strip.text.y = element_blank(),
axis.ticks = element_blank(),
axis.text = element_text(size = 15),
strip.text.x = element_text(size = 15),
axis.title = element_text(size = 15),
legend.position = "bottom",
legend.text = element_text(size = 24)) +
guides(colour = guide_legend(override.aes = list(size = 2))) +
labs(title = paste0("APR-DRG: ",
case_when(input$APRDRG == "221" ~ "221 - Major Small and Large Bowel Procedures",
input$APRDRG == "225" ~ "225 - Appendectomy",
input$APRDRG == "303" ~ "303 - Dorsal and Lumbar Fusion Proc for Curvature of Back",
input$APRDRG == "420" ~ "420 - Diabetes",
input$APRDRG == "693" ~ "693 - Chemotherapy",
input$APRDRG == "696" ~ "696 - Other Chemotherapy")))
## set axis for costs to be either normal or log based on user input
if(input$scale == TRUE){
gg <- gg +
scale_y_log10(name = "Cost per Encounter\n($)",
labels = scales::dollar)
}
# use normal scale if no input or input is "normal" (default)
if(input$scale == FALSE){
gg <- gg +
scale_y_continuous(name = "Cost per Encounter\n($)",
labels = scales::dollar)
}
## -----------<<< diff -- plot showing differences between "Baseline" and "Me" >>>-----------
diff <- ggplot(comparison,
aes(fill = ifelse(proport_diff_median > 0, 'pos', 'neg'))) + # if % difference positive, then "pos", else "neg" (for setting colors)
geom_bar(aes(x = BenchmarkGrouping, y = proport_diff_median),
stat = 'identity', width = .95) +
# line at 0 mark
geom_hline(color = 'black', yintercept = 0) +
# lines that separate different groupings
# remove first value in sequence (0.5) because don't want one between panel border and first plot
geom_vline(xintercept = seq(from = 0.5, to = length(unique(comparison[["BenchmarkGrouping"]]))-0.5, by = 1)[-1],
color = "black") +
geom_text(aes(label = ifelse(empty_flag == 1,
" NA", # if NA, label "NA" (extra spaces for aesthetic purposes to move it right of vertical line)
paste0(round(proport_diff_median*100, 2), "%")), # label with %, round to 2 decimal places
x = BenchmarkGrouping,
y = case_when(diff_median >= 0 ~ 0.12*max(abs(proport_diff_median)), # if positive %, put it to the right
diff_median < 0 ~ -0.4*max(abs(proport_diff_median)), # if negative %, put it to the left
is.na(diff_median) ~ 0), # if NA because no comparisons, put it at zero and should have "NA" label
fontface = "bold"), size = 5,
hjust = 0.15) +
scale_y_continuous(name = "Difference\n(%)",
labels = scales::percent,
breaks = scales::pretty_breaks(2),
limits = c(-max(abs(comparison$proport_diff_median)), max(abs(comparison$proport_diff_median)))) +
scale_fill_manual(values = c("neg" = "#33a02c", # green
"pos" = "#e31a1c"), # red
guide = FALSE) +
scale_color_manual(values = c("big" = 'white', "small" = 'grey20'), guide = FALSE) +
coord_flip() +
facet_grid(facet_diff_group) +
theme_bw() +
theme(panel.background = element_rect(fill = "white"),
panel.grid = element_blank(),
strip.background = element_blank(),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_text(size = 15),
strip.text.x = element_text(size = 15),
axis.text.x = element_text(size = 15))
## -----------<<< full -- gg and diff plots together >>>-----------
full <- plot_grid(gg, diff, ncol = 2, align = "h", axis = "bt", rel_widths = c(1.5, 0.5))
return(full)
})
## -----------<< Cost Savings Plot >>-----------
costsavings_plot <- eventReactive(input$view_opportunities, {
df <- hospital_df()
df <- df %>%
group_by(APRDRGCODE, Group) %>%
summarise(MedianCost = median(Costs),
N = sum(Count)) %>% ungroup()
df <- data.table::dcast(setDT(df), APRDRGCODE ~ Group, value.var = c("MedianCost", "N")) %>%
mutate(MedianCost_Diff = MedianCost_Me - MedianCost_Baseline,
N_Diff = N_Me - N_Baseline,
Impact = N_Me * MedianCost_Diff,
Direction = ifelse(Impact < 0, "Below the Benchmark", "Cost Savings Opportunity"),
APRDRGCODE = labelAPRDRG(APRDRGCODE, values = TRUE)) %>%
filter(!is.na(MedianCost_Diff))
df$APRDRGCODE <- factor(df$APRDRGCODE, levels = df$APRDRGCODE[order(df$Impact)], ordered = TRUE)
costsavings_plot <- ggplot(df) +
geom_bar(aes(x = APRDRGCODE, y = Impact, fill = Direction),
stat = 'identity', width = .95) +
# line at 0 mark
geom_hline(color = 'black', yintercept = 0) +
# lines that separate different groupings
# remove first value in sequence (0.5) because don't want one between panel border and first plot
geom_vline(xintercept = seq(from = 0.5, to = length(unique(df[["APRDRGCODE"]]))-0.5, by = 1)[-1],
color = "black") +
geom_text(aes(label = ifelse(Impact >= 0,
paste0("Potential Savings: ", scales::dollar(Impact), "\n# of Encounters: ", N_Me, "\nCost Difference per Encounter: ", scales::dollar(MedianCost_Diff)),
paste0("Current Savings: ", scales::dollar(Impact), "\n# of Encounters: ", N_Me, "\nCost Difference per Encounter: ", scales::dollar(MedianCost_Diff))),
x = APRDRGCODE,
y = case_when(is.na(MedianCost_Diff) ~ 0, # if NA because no comparisons, put it at zero and should have "NA" label
Impact >= 0 ~ 0.15*(min(abs(coalesce(df$Impact, 0)))),
Impact < 0 ~ -0.15*(min(abs(coalesce(df$Impact, 0))))),
fontface = "bold"), size = 5, hjust = ifelse(df$Impact >= 0, 0, 1)) +
scale_fill_manual(values = c("Below the Benchmark" = "#dadaeb", # light purple
"Cost Savings Opportunity" = "#807dba"), # darker purple
guide = FALSE) +
scale_y_continuous(name = paste0("Cost Difference between Me and the Benchmark\n(My Cost per Encounter - Benchmark Cost per Encounter) * My Volume of Encounters"),
labels = scales::dollar,
expand = c(0.8, 0.8)
) +
coord_flip() +
labs(x = "APR-DRG Code")
return(costsavings_plot)
})
## -----------< Reactive Tables >-----------
# table for comparison hospitals / benchmark
benchmark_table <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
benchmark_df <- summary_df_benchmark()
# only select grouping parameters, median, mean, and number of observations
select <- c(keep_benchmarkgroups, input$costbreakdowns, "median", "mean", "obs")
benchmark_df <- benchmark_df[,select]
# rename columns
colnames(benchmark_df) <- c(keep_benchmarkgroups, input$costbreakdowns, "Median", "Mean", "N")
return(benchmark_df)
})
# table for hospital to benchmark / "Me"
me_table <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
me_df <- summary_df_me()
# only select grouping parameters, median, mean, and number of observations
select <- c(keep_benchmarkgroups, input$costbreakdowns, "median", "mean", "obs")
me_df <- me_df[,select]
# rename columns
colnames(me_df) <- c(keep_benchmarkgroups, input$costbreakdowns, "Median", "Mean", "N")
return(me_df)
})
# table with comparison information between benchmarks and "Me"
compare_table <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
compare_df <- compare_df() %>%
mutate(proport_diff_median = proport_diff_median*100, # calculate % diff in medians
proport_diff_mean = proport_diff_mean*100) # calculate % diff in means
# only select grouping parameters, difference in medians, % difference in medians, difference in means, and % difference in means
select <- c(keep_benchmarkgroups, input$costbreakdowns, "diff_median", "proport_diff_median", "diff_mean", "proport_diff_mean")
compare_df <- compare_df[,select]
# rename columns
colnames(compare_df) <- c(keep_benchmarkgroups, input$costbreakdowns,
"Difference in Medians", "% Difference in Median", "Difference in Means", "% Difference in Mean")
return(compare_df)
})
## -----------< Stable Outputs >-----------
output$aprdrg_plot <- renderPlot({
aprdrg_plot()
})
output$rom_plot <- renderPlot({
rom_plot()
})
output$soi_plot <- renderPlot({
soi_plot()
})
output$age_plot <- renderPlot({
age_plot()
})
output$type_plot <- renderPlot({
type_plot()
})
output$discharge_plot <- renderPlot({
discharge_plot()
})
output$plotbrush_output <- renderText({
# if haven't created benchmark, can't select points so output empty string
if(input$hospital_refresh == FALSE){
out <- ""
}
# if created benchmark, can start selecting points
else {
df <- hospital_df()
if(any(!is.null(input$aprdrg_plotbrush), !is.null(input$rom_plotbrush), !is.null(input$soi_plotbrush),
!is.null(input$age_plotbrush), !is.null(input$type_plotbrush), !is.null(input$discharge_plotbrush))){
out_aprdrg <- c()
out_rom <- c()
out_soi <- c()
out_age <- c()
out_type <- c()
out_discharge <- c()
# if brushed over APRDRG distribution plot
if(!is.null(input$aprdrg_plotbrush)){
df1 <- df %>%
filter(Group == "Baseline") %>%
group_by(Group, APRDRGCODE, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(APRDRGCODE = str_wrap(labelAPRDRG(APRDRGCODE, values = TRUE), width = 20))
out_aprdrg <- brushedPoints(df = df1, brush = input$aprdrg_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over ROM distribution plot
if(!is.null(input$rom_plotbrush)){
df2 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, ROM, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
out_rom <- brushedPoints(df = df2, brush = input$rom_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over SOI distribution plot
if(!is.null(input$soi_plotbrush)){
df3 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, SOI, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
out_soi <- brushedPoints(df = df3, brush = input$soi_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over age distribution plot
if(!is.null(input$age_plotbrush)){
df4 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, AgeBucket, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(AgeBucket = case_when(AgeBucket == "Infant" ~ "Infant (less than 1 yr)",
AgeBucket == "Toddler" ~ "Toddler (13 mos - 23 mos)",
AgeBucket == "Early Childhood" ~ "Early Childhood (2 yrs - 5 yrs)",
AgeBucket == "Middle Childhood" ~ "Middle Childhood (6 yrs - 11 yrs)",
AgeBucket == "Adolescence" ~ "Adolescence (12 yrs - 17 yrs)",
AgeBucket == "Adult" ~ "Adult (18 years or older)"))
out_age <- brushedPoints(df = df4, brush = input$age_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over patient type distribution plot
if(!is.null(input$type_plotbrush)){
df5 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, PatientTypeRollup, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
out_type <- brushedPoints(df = df5, brush = input$type_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over patient discharge status distribution plot
if(!is.null(input$discharge_plotbrush)){
df6 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, DischargeStatusGroup, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(DischargeStatusGroup = ifelse(DischargeStatusGroup == "Still a Patient", "Inhouse", DischargeStatusGroup))
out_discharge <- brushedPoints(df = df6, brush = input$discharge_plotbrush, xvar = "Count")$customer_entity
}
out <- paste0(unique(c(out_aprdrg, out_rom, out_soi, out_age, out_type, out_discharge)), collapse = "<br/>")
}
# if all are null
else {
out <- ""
}
}
return(out)
})
output$soi_plot <- renderPlot({
soi_plot()
})
output$costsavings_plot <- renderPlot({
costsavings_plot()
})
# benchmarking plot
output$plot <- renderPlot({
plot()
})
# table for benchmarks
output$summary_df_benchmark <- renderDataTable({
benchmark_table()
})
# table for "Me"
output$summary_df_me <- renderDataTable({
me_table()
})
# table with comparisons between benchmark and "Me"
output$compare_df <- renderDataTable({
compare_table()
})
## -----------< Session >-----------
session$allowReconnect("force")
}
#### RUN APP ####
shinyApp(ui = ui, server = server)
| /CostBenchmarkingForChildren/app.R | no_license | lmebrennan/benchwarmers | R | false | false | 90,677 | r | #### CREATE STRATA PILOT BENCHMARKING APP ####
## R script that renders a Shiny app to do cost benchmarking for Strata
## Winter 2018
## Civis Analytics
## R version 3.4.2
#### PREPARE WORKSPACE ####
install.packages(c('devtools',
'shiny',
'shinythemes',
'shinyWidgets',
# 'ggplot2',
# 'tidyverse',
# 'readr',
'cowplot',
# 'lazyeval',
'rlang',
'civis',
# 'rsconnect',
'DT',
'data.table'
),
repos='https://cran.rstudio.com/')
#devtools::install_github("civisanalytics/civis_deckR")
library(ggplot2)
library(tidyverse)
library(readr)
library(cowplot)
#library(lazyeval)
library(rlang)
library(civis)
#library(civis.deckR)
library(shiny)
library(shinythemes)
library(shinyWidgets)
#library(plotly)
#library(viridis)
#library(rsconnect)
library(DT)
library(data.table)
library(stringr)
#### UI ####
ui <- fluidPage(
theme = shinythemes::shinytheme("lumen"),
# headerPanel("PILOT DEMO PROTOTYPE: Strata Cost Benchmarking"), # title of app; remove because there's a title on Platform
tabsetPanel(type = "tabs",
## -----------< 1. Create Benchmark >-----------
tabPanel("Create Benchmark",
fluidRow(
## -----------<< Column 1.1: Input Hospital and Benchmark Selections >>-----------
column(2,
# parameters for "Me"
h3("Hospital and APR-DRG to Benchmark"),
# select customer / hospital system
selectizeInput("customer_entity", "Select a customer and entity to benchmark:",
choices = c("Customer 1, Entity 1", "Customer 1, Entity 8",
"Customer 3, Entity 2", "Customer 3, Entity 3",
"Customer 4, Entity 5", "Customer 4, Entity 26",
"Customer 4, Entity 6", "Customer 5, Entity 6",
"Customer 6, Entity 1", "Customer 7, Entity 2",
"Customer 9, Entity 2", "Customer 11, Entity 1",
"Customer 12, Entity 1")),
# select APRDRG to benchmark (options change based off which customer is selected)
uiOutput("APRDRG_selector"),
h3(""),
# parameters for Baseline / hospitals to bencmark against
h3("Benchmark Hospitals"),
# select specific hospitals to compare against
uiOutput("benchmark_selector"),
# select hospital regions to compare against
selectizeInput("region", "Select region(s):",
choices = c(ALL = "", "South", "Midwest", "West"),
multiple = TRUE),
# select bedsizes to compare against
selectizeInput("size", "Select bedsize(s):",
choices = c(ALL = "", "less than 200", "200+"),
multiple = TRUE),
# select specialties to compare against
selectizeInput("specialty", "Select specialty(ies):",
choices = c(ALL = "", "Pediatric"),
multiple = TRUE),
selectizeInput("costmodel", "Select cost model(s):",
choices = c(ALL = "",
"Hospitals with Strata Standardized Cost Models" = "standard",
"Hospitals without Strata Standardized Cost Models" = "non"),
multiple = TRUE),
# h3(""),
# checkboxInput("group_individual", "Check this box to see the distribution of the Benchmark patient population/encounters broken down by the specific Customers/Entities.",
# value = FALSE),
# button to update data, plot, and tables
actionButton("hospital_refresh", "Compare Patient Populations")),
## -----------<< Column 1.2: Output Hospital and Benchmark Characteristics >>-----------
column(2,
h3("Hospital Characteristics"),
# output characteristics about the hospital selected as "Me"
htmlOutput("hospital_institution"), # hospital institution you're benchmarking
htmlOutput("hospital_region"), # hospital region (e.g. Midwest))
htmlOutput("hospital_size"), # hospital size (e.g. 200+ beds)
htmlOutput("hospital_specialty"), # specialty (e.g. Pediatric)
h3(""),
h3("Benchmark Characteristics"),
# output characteristics about the benchmark
htmlOutput("benchmark_institutions"), # institutions in the benchmark
htmlOutput("benchmark_region"), # benchmark region
htmlOutput("benchmark_size"), # benchmark size (e.g. 200+ beds)
htmlOutput("benchmark_specialty") # specialty (e.g. Pediatric)
),
## -----------<< Column 1.3: Highlighted Hospitals >>-----------
column(2,
h3("Highlighted Institutions:"),
strong("Select specific dots by dragging your cursor on the plot, and you can see which customer(s)/entity(ies) you've highlighted below."),
htmlOutput("plotbrush_output")),
## -----------<< Column 1.4: Distribution Plots >>-----------
column(6,
tabsetPanel(type = "tabs",
tabPanel("APR-DRG Codes", plotOutput("aprdrg_plot",
brush = brushOpts(id = "aprdrg_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("ROM", plotOutput("rom_plot",
brush = brushOpts(id = "rom_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("SOI", plotOutput("soi_plot",
brush = brushOpts(id = "soi_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("Patient Age", plotOutput("age_plot",
brush = brushOpts(id = "age_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("Patient Type", plotOutput("type_plot",
brush = brushOpts(id = "type_plotbrush", direction = "x"),
width = "100%", height = "800px")),
tabPanel("Patient Discharge Status", plotOutput("discharge_plot",
brush = brushOpts(id = "discharge_plotbrush", direction = "x"),
width = "100%", height = "800px"))
)
)
)
),
## -----------< 2. Cost Saving Opportunities >-----------
tabPanel("Cost Saving Opportunities -- APR-DRG Codes",
fluidRow(
# button to update plot
actionButton("view_opportunities", "View Cost Saving Opportunities")),
fluidRow(plotOutput("costsavings_plot", width = "100%", height = "800px"))),
## -----------< 3. View Benchmark >-----------
tabPanel("Cost Benchmark Drill-Down",
fluidRow(
## -----------<< Column 2.1: Benchmark and Cost Breakdowns >>-----------
column(2,
# breakdowns by benchmarking groups (changes y-axis)
h3("Benchmark Breakdowns"),
checkboxGroupInput("benchmarkbreakdowns", strong("Select variables to breakdown costs by:"),
choiceNames = c("Risk of Mortality (ROM)",
"Severity of Illness (SOI)",
"Patient Age Bucket",
"Patient Type",
"Patient Discharge Status"),
choiceValues = c("ROM",
"SOI",
"AgeBucket",
"PatientTypeRollup",
"DischargeStatusGroup")),
dropdownButton(tags$h3("Risk of Mortality (ROM) Grouping Options"),
selectizeInput(inputId = 'rom_1', label = 'Select the ROM categories for the first group:',
choices = c(1, 2, 3, 4), multiple = TRUE),
uiOutput("ROM_2"),
tags$h3("Severity of Illness (SOI) Grouping Options"),
selectizeInput(inputId = 'soi_1', label = 'Select the SOI categories for the first group:',
choices = c(1, 2, 3, 4), multiple = TRUE),
uiOutput("SOI_2"),
circle = TRUE, status = "default", icon = icon("arrow-circle-down"), width = "300px",
tooltip = tooltipOptions(title = "Options for Grouping Risk of Mortality and Severity of Illness")
),
# breakdowns by cost (changes faceting)
h3("Cost Breakdowns"),
checkboxGroupInput("costbreakdowns", strong("Select how to breakdown costs:"),
choiceNames = c("Fixed/Variable",
"Direct/Indirect",
"Cost Drivers"),
choiceValues = c("FixedVariable",
"DirectIndirect",
"CostDriver")),
# other options for displaying / breaking down data
h3("Other Options"),
checkboxInput("scale", "Change x-axis (costs) to log scale? (default is normal)",
value = FALSE)
),
## -----------<< Column 2.2: Data Filters >>-----------
column(3,
# options to remove / filter data
h3("Filter Data"),
selectizeInput("ROM", "Select Risk of Mortality (ROM) value(s):",
choices = c(ALL = "", "1", "2", "3", "4"),
multiple = TRUE),
selectizeInput("SOI", "Select Severity of Illness (SOI) value(s):",
choices = c(ALL = "", "1", "2", "3", "4"),
multiple = TRUE),
selectizeInput("age", "Select patient age(s):",
choices = c(ALL = "",
"Infant (less than 1 yr)" = "Infant",
"Toddler (13 mos - 23 mos)" = "Toddler",
"Early Childhood (2 yrs - 5 yrs)" = "Early Childhood",
"Middle Childhood (6 yrs - 11 yrs)" = "Middle Childhood",
"Adolescence (12 yrs - 17 yrs)" = "Adolescence",
"Adult (18 years or older)" = "Adult"),
multiple = TRUE),
selectizeInput("patienttype", "Select patient type(s):",
choices = c(ALL = "",
"Inpatient",
"Outpatient",
"Emergency"),
multiple = TRUE),
selectizeInput("dischargestatus", "Select patient discharge status(es):",
choices = c(ALL = "",
"Still a Patient",
"Discharged to home or other self care",
"Discharged to home health services",
"Left against medical advice (AMA)",
"Died",
"Transferred to other facility",
"Transferred to other short-term care facility",
"Transferred to intermediate care facility",
"Not Specified"),
multiple = TRUE),
selectizeInput("costs", "Select cost(s):",
choices = list(ALL = "",
`Cost Types` = c("Fixed",
"Variable",
"Direct",
"Indirect"),
`Cost Drivers` = c("Dialysis",
"Excluded",
"Imaging",
"Laboratory",
"LOS",
"OR Time",
"Other Diagnostic Services",
"Pharmacy",
"Supplies",
"Blood",
"Therapeutic Services",
"Cardiovascular")),
multiple = TRUE),
selectizeInput("qltyincidents", "Select whether to keep/remove hospital-caused quality incidents:",
choices = c(BOTH = "",
"Only Encounters without Hospital-Caused Quality Incidents" = "Remove",
"Only Encounters with Hospital-Caused Quality Incidents" = "Keep"),
multiple = TRUE),
# option to remove data
checkboxGroupInput("otherfilteroptions", strong("Other data filters:"),
choiceNames = c("Remove Cost Outliers (based off interquartile range (IQR))",
"Remove Cost Outliers (based off standard deviation (sd))",
"Remove Length of Stay Outliers (based off interquartile range (IQR))",
"Remove Length of Stay Outliers (based off standard deviation (sd))"
),
choiceValues = c("cost_IQR",
"cost_SD",
"LOS_IQR",
"LOS_SD")),
# button to update data, plot, and tables
actionButton("refresh", "Update")
),
## -----------<< Column 2.3: Output >>-----------
column(7,
"Select benchmarking parameters and hit the 'UPDATE' button at the bottom right to generate benchmarks.",
tabsetPanel(type = "tabs",
# tab with the plot
tabPanel("Plot", plotOutput("plot", width = "100%", height = "800px")),
# tab with data tables
tabPanel("Tables",
# baseline data / data for other hospitals
h4(strong("Baseline")),
dataTableOutput("summary_df_benchmark"),
# me data / data for hospital being benchmarked
h4(strong("Me")),
dataTableOutput("summary_df_me"),
# comparison data
h4(strong("Difference")),
dataTableOutput("compare_df"))
)
)
)
)
)
)
#### SERVER ####
server <- function(input, output, session){
## -----------< Load Helper Functions and Data >-----------
source("StrataFunctions.R", local = TRUE)
#source("/Users/cwang/Desktop/Strata/StrataPIlotPrototype/StrataFunctions.R")
# read in tables from S3 (deprecated; files have now been written to Platform)
full <- read_civis(x = 10051504)
hospital_info <- read_civis(x = 10051505)
# full <- read_civis(x = "public.full", database = "Strata Decision Technologies", verbose = TRUE)
# names(full) <- c("Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "APRDRGCODE", "customer_entity",
# "IsStrataStandardCost", "EncounterID", "ROM", "SOI", "AgeBucket", "PatientTypeRollup", "DischargeStatusGroup",
# "CostDriver", "HospitalAcqCondition", "LengthOfStay", "CostKey", "Costs")
#
# hospital_info <- read_civis(x = "public.hospital_info", database = "Strata Decision Technologies", verbose = TRUE)
# names(hospital_info) <- c("CustomerID", "EntityID", "Beds", "City", "State", "Region", "Sub_Region", "Bedsize_Bucket",
# "IsStrataStandardCost", "EntityID_fixed", "Beds_fixed", "Specialty", "customer_entity")
## -----------< UI Inputs and Outputs >-----------
## Dependent UI Inputs
# APR-DRG Code -- input options change based off which Customer & Entity are selected
output$APRDRG_selector = renderUI({
selectizeInput(inputId = "APRDRG", "Select an APR-DRG to benchmark:",
choices = labelAPRDRG(unique(full$APRDRGCODE[full$CustomerID == hospital_info$CustomerID[hospital_info$customer_entity == input$customer_entity] &
full$EntityID == hospital_info$EntityID_fixed[hospital_info$customer_entity == input$customer_entity]])))
})
# Customer ID and Entity ID
output$benchmark_selector = renderUI({
selectizeInput(inputId = "customer_entity_benchmark", "Select customer(s) and entity(ies) to benchmark against:",
choices = c(ALL = "", hospital_info$customer_entity[hospital_info$customer_entity != input$customer_entity]),
multiple = TRUE)
})
# ROM_2 -- input options change based off groups for rom_1
output$ROM_2 = renderUI({
selectizeInput(inputId = "rom_2", "Select the ROM categories for the second group:",
choices = setdiff(c(1, 2, 3, 4), input$rom_1),
multiple = TRUE)
})
# SOI_2 -- input options change based off groups for soi_1
output$SOI_2 = renderUI({
selectizeInput(inputId = "soi_2", "Select the SOI categories for the second group:",
choices = setdiff(c(1, 2, 3, 4), input$soi_1),
multiple = TRUE)
})
## UI output hospital information
# Hospital Institution -- outputs the institution you're benchmarking
output$hospital_institution = renderText({
paste("<b>Hospital Institution:</b><br/>",
input$customer_entity)
})
# Region -- outputs the region of the Customer & Entity selected
output$hospital_region = renderText({
paste("<b>Hospital Region:</b><br/>",
hospital_info$Region[hospital_info$customer_entity == input$customer_entity])
})
# Size -- outputs the bedsize of the Customer & Entity selected
output$hospital_size = renderText({
paste("<b>Hospital Bed Size:</b><br/>",
hospital_info$Beds_fixed[hospital_info$customer_entity == input$customer_entity])
})
# Specialty -- outputs the specialty of the Customer & Entity selected (e.g. Pediatric)
output$hospital_specialty = renderText({
paste("<b>Hospital Specialty:</b><br/>",
hospital_info$Specialty[hospital_info$customer_entity == input$customer_entity])
})
## UI output hospital benchmark information
# Customer and Entity -- outputs the Customers(s) and Entity(ies) that make up the benchmark
output$benchmark_institutions = renderText({
if(length(input$costmodel) == 1 & "standard" %in% input$costmodel){
df <- hospital_info %>%
filter(IsStrataStandardCost == TRUE)
paste("<b>Benchmark Institution(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$region) & is.null(input$size) & is.null(input$specialty),
# if no inputs, then take all the hospitals that aren't the one selected
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity])), collapse = "<br/>"),
ifelse(is.null(input$region) & is.null(input$size) & is.null(input$specialty),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& df$customer_entity %in% input$customer_entity_benchmark])), collapse = "<br/>"),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& (df$customer_entity %in% input$customer_entity_benchmark
| ((df$Region %in% input$region | is.null(input$region))
& (df$Beds_fixed %in% input$size | is.null(input$size))
& (df$Specialty %in% input$specialty | is.null(input$specialty))))])),
collapse = "<br/>")
)))
}
else if(length(input$costmodel) == 1 & "non" %in% input$costmodel){
df <- hospital_info %>%
filter(IsStrataStandardCost == FALSE)
paste("<b>Benchmark Institution(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$region) & is.null(input$size) & is.null(input$specialty),
# if no inputs, then take all the hospitals that aren't the one selected
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity])), collapse = "<br/>"),
ifelse(is.null(input$region) & is.null(input$size) & is.null(input$specialty),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& df$customer_entity %in% input$customer_entity_benchmark])), collapse = "<br/>"),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& (df$customer_entity %in% input$customer_entity_benchmark
| ((df$Region %in% input$region | is.null(input$region))
& (df$Beds_fixed %in% input$size | is.null(input$size))
& (df$Specialty %in% input$specialty | is.null(input$specialty))))])),
collapse = "<br/>")
)))
}
else {
df <- hospital_info
paste("<b>Benchmark Institution(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$region) & is.null(input$size) & is.null(input$specialty),
# if no inputs, then take all the hospitals that aren't the one selected
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity])), collapse = "<br/>"),
ifelse(is.null(input$region) & is.null(input$size) & is.null(input$specialty),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& df$customer_entity %in% input$customer_entity_benchmark])), collapse = "<br/>"),
paste(as.vector(unique(df$customer_entity[df$customer_entity != input$customer_entity
& (df$customer_entity %in% input$customer_entity_benchmark
| ((df$Region %in% input$region | is.null(input$region))
& (df$Beds_fixed %in% input$size | is.null(input$size))
& (df$Specialty %in% input$specialty | is.null(input$specialty))))])),
collapse = "<br/>")
)))
}
})
# Region -- outputs the region of the Customer(s) & Entity(ies) selected
output$benchmark_region = renderText({
paste("<b>Benchmark Region(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$region),
paste(as.vector(unique(hospital_info$Region[hospital_info$customer_entity != input$customer_entity])), collapse = ", "),
paste(as.vector(unique(hospital_info$Region[hospital_info$customer_entity %in% input$customer_entity_benchmark | hospital_info$Region %in% input$region])), collapse = ", ")))
})
# Size -- outputs the bedsize of the Customer(s) & Entity(ies) selected
output$benchmark_size = renderText({
paste("<b>Benchmark Bed Size(s):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$size),
paste(as.vector(unique(hospital_info$Beds_fixed[hospital_info$customer_entity != input$customer_entity])), collapse = ", "),
paste(as.vector(unique(hospital_info$Beds_fixed[hospital_info$customer_entity %in% input$customer_entity_benchmark | hospital_info$Beds_fixed %in% input$size])), collapse = ", ")))
})
# Specialty -- outputs the specialty of the Customer(s) & Entity(ies) selected (e.g. Pediatric)
output$benchmark_specialty = renderText({
paste("<b>Benchmark Specialty(ies):</b><br/>",
ifelse(is.null(input$customer_entity_benchmark) & is.null(input$specialty),
paste(as.vector(unique(hospital_info$Specialty[hospital_info$customer_entity != input$customer_entity])), collapse = ", "),
paste(as.vector(unique(hospital_info$Specialty[hospital_info$customer_entity %in% input$customer_entity_benchmark | hospital_info$Specialty %in% input$specialty])), collapse = ", ")))
})
# Cost Model -- outputs the cost models of the hospitals
output$benchmark_specialty = renderText({
if(length(input$costmodel) == 1 & "standard" %in% input$costmodel){
out <- c("Strata Standard Cost Model")
}
else if(length(input$costmodel) == 1 & "non" %in% input$costmodel){
out <- c("Not Strata Standard Cost Model")
}
else {
out <- c("Strata Standard Cost Model", "Not Strata Standard Cost Model")
}
paste("<b>Benchmark Cost Model(s):</b><br/>",
paste(out, collapse = ", "))
})
## -----------< Data Munging >-----------
## -----------<< hospital_df >>-----------
hospital_df <- eventReactive(input$hospital_refresh | input$view_opportunities, {
hospital_df <- full
## APRDRG code filter
hospital_df$m1 <- ifelse(hospital_df$APRDRGCODE == input$APRDRG, TRUE, FALSE)
## "me" / hospital filter
hospital_df$h1 <- ifelse(hospital_df$customer_entity == input$customer_entity, TRUE, FALSE) # filter for input Customer ID and Entity ID
## hospital comparison filters
if(!is.null(input$region)){
hospital_df$c1 <- ifelse(hospital_df$Region %in% input$region, TRUE, FALSE) # filter for hospital region
} else {
hospital_df$c1 <- TRUE
}
if(!is.null(input$size)){
hospital_df$c2 <- ifelse(hospital_df$Beds_fixed %in% input$size, TRUE, FALSE) # filter for hospital size
} else {
hospital_df$c2 <- TRUE
}
if(!is.null(input$specialty)){
hospital_df$c3 <- ifelse(hospital_df$Specialty %in% input$specialty, TRUE, FALSE) # filter for hospital specialty
} else {
hospital_df$c3 <- TRUE
}
# filter for specific hospital inputs
if(!is.null(input$customer_entity_benchmark)){
hospital_df$c4 <- ifelse(hospital_df$customer_entity %in% input$customer_entity_benchmark, TRUE, FALSE)
} else {
hospital_df$c4 <- TRUE
}
# if only select one of the two options for input costmodel, then it's standard or non-standard
if(length(input$costmodel) == 1){
if("standard" %in% input$costmodel){
hospital_df$c_costmodel <- ifelse(hospital_df$IsStrataStandardCost == "TRUE", TRUE, FALSE)
} else if("non" %in% input$costmodel){
hospital_df$c_costmodel <- ifelse(hospital_df$IsStrataStandardCost == "FALSE", TRUE, FALSE)
}
}
# if select none or both of the two options for input cost model, then it's all of them
else {
hospital_df$c_costmodel <- TRUE
}
# master hospital benchmark filter
# if only input customers/entities to benchmark against, only use that column to filter
# all of them need to meet the hospital_df$c_costmodel condition
if(all(is.null(input$region), is.null(input$size), is.null(input$specialty)) & !is.null(input$customer_entity_benchmark)){
hospital_df$c5 <- ifelse(hospital_df$c4, TRUE, FALSE)
}
# if input region/size/specialty filters, but not customer entity filters, then only use those filters
else if(any(!is.null(input$region), !is.null(input$size), !is.null(input$specialty)) & is.null(input$customer_entity_benchmark)){
hospital_df$c5 <- ifelse(hospital_df$c1 & hospital_df$c2 & hospital_df$c3, TRUE, FALSE)
}
# if input region/size/specialty filters and customer entity filters, then
else if(any(!is.null(input$region), !is.null(input$size), !is.null(input$specialty)) & !is.null(input$customer_entity_benchmark)){
hospital_df$c5 <- ifelse((hospital_df$c1 & hospital_df$c2 & hospital_df$c3) | hospital_df$c4, TRUE, FALSE)
}
# if none selected; then else
else {
hospital_df$c5 <- TRUE
}
# filter for only hospital to benchmark & benchmark hospitals
hospital_df <- hospital_df %>%
filter(h1 | (c5 & c_costmodel)) %>%
mutate("Group" = ifelse(h1, "Me", "Baseline"),
"APRDRG_benchmark" = ifelse(m1, APRDRGCODE, NA)) %>%
group_by(Region, Beds_fixed, Specialty, customer_entity, APRDRGCODE, EncounterID, ROM, SOI, AgeBucket, PatientTypeRollup, DischargeStatusGroup,
Group, APRDRG_benchmark) %>%
summarise("Count" = 1,
"Costs" = sum(Costs)) %>% ungroup()
return(hospital_df)
})
## -----------<< main_df >>-----------
# Encounter-level dataframe with benchmark grouping columns and cost grouping columns as well as columns with cost information;
# the code below filters the full dataframe of all cost data, based off user inputs about how to filter the data
# the data is also labeled as "Me" or "Baseline" to indicate which costs go towards the benchmark, and which go to the hospital of interest
main_df <- eventReactive(input$refresh, {
## grab full dataframe of customer data from global environment; summarised at the most granular level of grouping
main_df <- full
## APRDRG code filter
main_df$m1 <- ifelse(main_df$APRDRGCODE == input$APRDRG, TRUE, FALSE)
## "me" / hospital filter
main_df$h1 <- ifelse(main_df$customer_entity == input$customer_entity, TRUE, FALSE) # filter for input Customer ID and Entity ID
## hospital comparison filters
if(!is.null(input$region)){
main_df$c1 <- ifelse(main_df$Region %in% input$region, TRUE, FALSE) # filter for hospital region
} else {
main_df$c1 <- TRUE
}
if(!is.null(input$size)){
main_df$c2 <- ifelse(main_df$Beds_fixed %in% input$size, TRUE, FALSE) # filter for hospital size
} else {
main_df$c2 <- TRUE
}
if(!is.null(input$specialty)){
main_df$c3 <- ifelse(main_df$Specialty %in% input$specialty, TRUE, FALSE) # filter for hospital specialty
} else {
main_df$c3 <- TRUE
}
# filter for specific hospital inputs
if(!is.null(input$customer_entity_benchmark)){
main_df$c4 <- ifelse(main_df$customer_entity %in% input$customer_entity_benchmark, TRUE, FALSE)
} else {
main_df$c4 <- TRUE
}
# if only select one of the two options for input costmodel, then it's standard or non-standard
if(length(input$costmodel) == 1){
if("standard" %in% input$costmodel){
main_df$c_costmodel <- ifelse(main_df$IsStrataStandardCost == "TRUE", TRUE, FALSE)
} else if("non" %in% input$costmodel){
main_df$c_costmodel <- ifelse(main_df$IsStrataStandardCost == "FALSE", TRUE, FALSE)
}
}
# if select none or both of the two options for input cost model, then it's all of them
else {
main_df$c_costmodel <- TRUE
}
# master hospital benchmark filter
# if only input customers/entities to benchmark against, only use that column to filter
if(all(is.null(input$region), is.null(input$size), is.null(input$specialty)) & !is.null(input$customer_entity_benchmark)){
main_df$c5 <- ifelse(main_df$c4, TRUE, FALSE)
}
# if input region/size/specialty filters, but not customer entity filters, then only use those filters
else if(any(!is.null(input$region), !is.null(input$size), !is.null(input$specialty)) & is.null(input$customer_entity_benchmark)){
main_df$c5 <- ifelse(main_df$c1 & main_df$c2 & main_df$c3, TRUE, FALSE)
}
# if input region/size/specialty filters and customer entity filters, then
else if(any(!is.null(input$region), !is.null(input$size), !is.null(input$specialty)) & !is.null(input$customer_entity_benchmark)){
main_df$c5 <- ifelse((main_df$c1 & main_df$c2 & main_df$c3) | main_df$c4, TRUE, FALSE)
}
# if none selected; then else
else {
main_df$c5 <- TRUE
}
## benchmark filters
if(!is.null(input$ROM)){
main_df$m2 <- ifelse(main_df$ROM %in% input$ROM, TRUE, FALSE) # filter ROM
} else {
main_df$m2 <- TRUE
}
if(!is.null(input$SOI)){
main_df$m3 <- ifelse(main_df$SOI %in% input$SOI, TRUE, FALSE) # filter SOI
} else {
main_df$m3 <- TRUE
}
if(!is.null(input$age)){
main_df$m4 <- ifelse(main_df$AgeBucket %in% input$age, TRUE, FALSE) # filter patient age buckets
} else {
main_df$m4 <- TRUE
}
if(!is.null(input$patienttype)){
main_df$m5 <- ifelse(main_df$PatientTypeRollup %in% input$patienttype, TRUE, FALSE) # filter patient types
} else {
main_df$m5 <- TRUE
}
if(!is.null(input$dischargestatus)){
main_df$m6 <- ifelse(main_df$DischargeStatusGroup %in% input$dischargestatus, TRUE, FALSE) # filter patient discharge statuses
} else {
main_df$m6 <- TRUE
}
## cost filters
if(length(input$costs) > 0){
main_df$temp1 <- ifelse(main_df$FixedVariable %in% input$costs, 1, 0) # if filtering Fixed/Variable costs, mark with 1
main_df$temp2 <- ifelse(main_df$DirectIndirect %in% input$costs, 1, 0) # if filtering Direct/Indirect costs, mark with 1
main_df$temp3 <- ifelse(main_df$CostDriver %in% input$costs, 1, 0) # if filtering CostDrivers, mark with 1
main_df$temp_all <- main_df$temp1 + main_df$temp2 + main_df$temp3 # create column with sum of all cost filters (min 0, max 3)
if(max(main_df$temp_all) == 1){
main_df$m7<- ifelse(main_df$temp_all == 1, TRUE, FALSE) # filtering by 1 of the 3 cost column filters
}
if(max(main_df$temp_all) == 2){
main_df$m7 <- ifelse(main_df$temp_all == 2, TRUE, FALSE) # filtering by 2 of the 3 cost column filters
}
if(max(main_df$temp_all) == 3){
main_df$m7 <- ifelse(main_df$temp_all == 3, TRUE, FALSE) # filtering by 3 of the 3 cost column filters
}
} else {
main_df$m7 <- TRUE
}
## hospital-acquired quality incident filters
# if only one quality incident filter selected
if(length(input$qltyincidents) == 1){
# if only have "Only Keep Encounters without Hospital-Acquired Quality Incidents"
if(min(input$qltyincidents) == "Remove"){
main_df$m8 <- ifelse(main_df$HospitalAcqCondition == "0", TRUE, FALSE) # filter out hospital-acquired conditions/hospital-caused quality incidents
}
# if only have "Only Keep Encounters with Hospital-Acquired Quality Incidents"
else if(min(input$qltyincidents) == "Keep"){
main_df$m8 <- ifelse(main_df$HospitalAcqCondition == "1", TRUE, FALSE) # filter out NON hospital-acquired conditions/hospital-caused quality incidents
}
}
# if both selected, or neither selected, keep both
else if(is.null(input$qltyincidents) | length(input$qltyincidents) == 2){
main_df$m8 <- TRUE
}
## set conditions for filtering
hospital_conditions <- c((main_df$c5 & main_df$c_costmodel) | main_df$h1) # filters for "Me" and "Baseline"
filter_conditions <- c(main_df$m1 & main_df$m2 & main_df$m3 & main_df$m4 & main_df$m5 & main_df$m6 & main_df$m7 & main_df$m8) # parameter filters
## filter data frame
main_df <- main_df[hospital_conditions & filter_conditions,] %>%
mutate(Group = factor(ifelse(h1, "Me", "Baseline"), # create variable to facet Baseline vs. Hospital to Compare (Me)
levels = c("Baseline", "Me"),
ordered = TRUE),
Name = "Benchmark")
## only check that both ROM groupings are filled in if one of them is specified
if(!is.null(input$rom_1) | !is.null(input$rom_2)){
validate(
need(all(c(input$rom_1, input$rom_2) %in% unique(main_df$ROM)), "You're missing some of the Risk of Mortality (ROM) values you're grouping by. You probably filtered them out. Please add them back in.")
)
# check to see that the ROM values that we're custom grouping by haven't been filtered out
if("ROM" %in% input$benchmarkbreakdowns){
validate(
need(!is.null(input$rom_1) & !is.null(input$rom_2), "You've custom selected Risk of Mortality (ROM) values for only one group. Please select values for both groups.")
)
}
}
## only check that both SOI groupings are filled in if one of them is specified
if(!is.null(input$soi_1) | !is.null(input$soi_2)){
validate(
need(all(c(input$soi_1, input$soi_2) %in% unique(main_df$SOI)), "You're missing some of the Severity of Illness (SOI) values you're grouping by. You probably filtered them out. Please add them back in.")
)
# check to see that the SOI values that we're custom grouping by haven't been filtered out
if("SOI" %in% input$benchmarkbreakdowns){
validate(
need(!is.null(input$soi_1) & !is.null(input$soi_2), "You've custom selected Severity of Illness (SOI) values for only one group. Please select values for both groups.")
)
}
}
if("ROM" %in% input$benchmarkbreakdowns){
main_df <- main_df %>%
# if have custom groupings for ROM, create column with custom grouping for ROM Group 1
mutate(ROM_group1 = ifelse(!is.null(input$rom_1) & ROM %in% input$rom_1,
paste0("ROM ", paste0(input$rom_1, collapse = "/")), # e.g. "ROM 1/2"
NA),
# if have custom groupings for ROM, create column with custom grouping for ROM Group 2
ROM_group2 = ifelse(!is.null(input$rom_2) & ROM %in% input$rom_2,
paste0("ROM ", paste0(input$rom_2, collapse = "/")), # e.g. "ROM
NA),
# coalesce custom groupings into one column
ROM_group = coalesce(ROM_group1, ROM_group2))
}
if("SOI" %in% input$benchmarkbreakdowns){
main_df <- main_df %>%
# if have custom groupings for SOI, create column with custom groupings for SOI Group 1
mutate(SOI_group1 = ifelse(!is.null(input$soi_1) & SOI %in% input$soi_1,
paste0("SOI ", paste0(input$soi_1, collapse = "/")),
NA),
# if have custom groupings for SOI, create column with custom groupings for SOI Group 2
SOI_group2 = ifelse(!is.null(input$soi_2) & SOI %in% input$soi_2,
paste0("SOI ", paste0(input$soi_2, collapse = "/")),
NA),
# coalesce custom groupings into one column
SOI_group = coalesce(SOI_group1, SOI_group2))
}
## check to see if there's still data after filtering
validate(
need(nrow(main_df) > 0, "There data has zero rows due to filtering. Please adjust your filters.")
)
## add column for benchmark breakdowns; if multiple benchmark breakdowns selected, concatenate columns with "&" in between columns
if(!is.null(input$benchmarkbreakdowns)){
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
# coalesce all of the grouping column values into one column for axis naming purposes
main_df <- tidyr::unite_(main_df, "BenchmarkGrouping", keep_benchmarkgroups, sep = " & ", remove = FALSE)
} else {
main_df$BenchmarkGrouping <- main_df$APRDRGCODE # if no breakdowns selected, use APRDRGCODE as default y-axis
}
## add column for cost breakdowns; if multiple cost breakdowns selected, concatenate columns with "&" in between columns
if(!is.null(input$costbreakdowns)){
main_df <- tidyr::unite_(main_df, "CostGrouping", input$costbreakdowns, sep = " & ", remove = FALSE)
} else {
main_df$CostGrouping <- NA # if no cost breakdowns selected, default is NA
}
## group data based off benchmark breakdowns and cost breakdowns
# if inputs for both benchmark and cost breakdowns
if(!is.null(input$benchmarkbreakdowns) & !is.null(input$costbreakdowns)){
groupings <- c("Name", "Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "Group", "APRDRGCODE", "LengthOfStay",
"CostGrouping", "BenchmarkGrouping", "EncounterID", keep_benchmarkgroups, input$costbreakdowns)
outlier_groupings <- c("Name", "Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", keep_benchmarkgroups, input$costbreakdowns)
}
# if inputs for only benchmark breakdowns
if(!is.null(input$benchmarkbreakdowns) & is.null(input$costbreakdowns)){
groupings <- c("Name", "Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "Group", "APRDRGCODE", "LengthOfStay",
"CostGrouping", "BenchmarkGrouping", "EncounterID", keep_benchmarkgroups)
outlier_groupings <- c("Name", "Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", keep_benchmarkgroups)
}
# if inputs for only cost breakdowns
if(!is.null(input$costbreakdowns) & is.null(input$benchmarkbreakdowns)){
groupings <- c("Name", "Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "Group", "APRDRGCODE", "LengthOfStay",
"CostGrouping", "BenchmarkGrouping", "EncounterID", input$costbreakdowns)
outlier_groupings <- c("Name", "Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", input$costbreakdowns)
}
# if no inputs for both benchmark and cost breakdowns
if(is.null(input$costbreakdowns) & is.null(input$benchmarkbreakdowns)){
groupings <- c("Name", "Region", "Beds_fixed", "Specialty", "CustomerID", "EntityID", "Group", "APRDRGCODE", "LengthOfStay",
"CostGrouping", "BenchmarkGrouping", "EncounterID")
outlier_groupings <- c("Name", "Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping")
}
## group by input groupings and re-calculate so that data is at the most granular grouping specified by the user
# if no grouping parameters specified (e.g. no benchmark or cost breakdowns), most granular level is Encounter level
main_df <- main_df %>%
group_by(.dots = groupings) %>%
summarise(Costs = sum(Costs)) %>% ungroup()
## remove length of stay outliers if selected
if(TRUE %in% (grepl("LOS", input$otherfilteroptions))){
# grab current column names of main df; will use these later to select original columns
# (to avoid duplicate column name issues if user selects to remove both LOS and cost outliers)
save <- colnames(main_df)
# calculate LOS summary statistics and outlier cutoffs based off IQR and standard deviation
LOS_filters <- main_df %>%
calcSummary(df = ., summary_var = "LengthOfStay", outlier_threshold = 2, grouping_vars = outlier_groupings)
# join summary statistics and outlier cutoffs to main df
main_df <- main_df %>%
left_join(LOS_filters, by = outlier_groupings)
# remove LOS IQR outliers if selected
if("LOS_IQR" %in% input$otherfilteroptions){
main_df$o1_los <- case_when(
main_df$obs == 1 ~ TRUE, # if only one observation, keep (can't be an outlier if you're solo)
main_df$LengthOfStay > main_df$IQR_outlier_high | main_df$LengthOfStay < main_df$IQR_outlier_low ~ FALSE, # IQR outliers
TRUE ~ TRUE) # keep non-outliers
}
else {
main_df$o1_los <- TRUE
}
# remove LOS standard deviation outliers if selected
if("LOS_SD" %in% input$otherfilteroptions){
main_df$o2_los <- case_when(
main_df$obs == 1 ~ TRUE, # if only one observation, keep (can't be an outlier if you're solo)
main_df$LengthOfStay > main_df$IQR_outlier_high | main_df$LengthOfStay < main_df$IQR_outlier_low ~ FALSE, # IQR outliers
TRUE ~ TRUE) # keep non-outliers
}
else {
main_df$o2_los <- TRUE
}
# remove LOS outliers
main_df <- main_df[c(main_df$o1_los & main_df$o2_los), save]
}
## remove cost outliers if selected
if(TRUE %in% (grepl("cost", input$otherfilteroptions))){
# grab current column names of main df; will use these later to select original columns
# (to avoid duplicate column name issues if user selects to remove both LOS and cost outliers)
save <- colnames(main_df)
# calculate LOS summary statistics and outlier cutoffs based off IQR and standard deviation
cost_filters <- main_df %>%
calcSummary(df = ., summary_var = "Costs", outlier_threshold = 2, grouping_vars = outlier_groupings)
# join summary statistics and outlier cutoffs to main df
main_df <- main_df %>%
left_join(cost_filters, by = outlier_groupings)
# remove cost IQR outliers if selected
if("cost_IQR" %in% input$otherfilteroptions){
main_df$o1_cost <- case_when(
main_df$obs == 1 ~ TRUE, # if only one observation, keep (can't be an outlier if you're solo)
main_df$Costs > main_df$IQR_outlier_high | main_df$Costs < main_df$IQR_outlier_low ~ FALSE, # IQR outliers
TRUE ~ TRUE) # keep non-outliers
}
else {
main_df$o1_cost <- TRUE
}
# remove cost standard deviation outliers if selected
if("cost_SD" %in% input$otherfilteroptions){
main_df$o2_cost <- case_when(
main_df$obs == 1 ~ TRUE, # if only one observation, keep (can't be an outlier if you're solo)
main_df$Costs > main_df$sd_outlier_high | main_df$Costs < main_df$sd_outlier_low ~ FALSE, # standard deviation outliers
TRUE ~ TRUE) # keep non-outliers
}
else {
main_df$o2_cost <- TRUE
}
# remove cost outliers
main_df <- main_df[c(main_df$o1_cost & main_df$o2_cost), save]
}
## check to see if there's still data after filtering for outliers
validate(
need(nrow(main_df > 0), "The data has zero rows due to outlier filtering. Please adjust your filters.")
)
return(main_df)
})
## -----------<< summary_df_benchmark >>-----------
# data frame with summary statistics for all the baseline hospitals
# this data frame is used to create the labels for the boxplots, as well as the data tables
summary_df_benchmark <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
groups <- c("Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", keep_benchmarkgroups, input$costbreakdowns)
summary_df_benchmark <- main_df() %>%
filter(Group == "Baseline") %>%
calcSummary(df = ., summary_var = "Costs", outlier_threshold = 2, grouping_vars = groups)
## check to see there's still data to benchmark against after filtering main_df for just the baseline data
validate(
need(nrow(summary_df_benchmark) > 0, "There is no baseline data due to filtering (i.e. there is no data for the 'Baseline'). Please adjust your data filters.")
)
return(summary_df_benchmark)
})
## -----------<< summary_df_me >>-----------
# data frame with summary statistics for the hospital of interest
# this data frame is used to create the labels for the boxplots, as well as the data tables
summary_df_me <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
groups <- c("Group", "APRDRGCODE", "CostGrouping", "BenchmarkGrouping", keep_benchmarkgroups, input$costbreakdowns)
summary_df_me <- main_df() %>%
filter(Group == "Me") %>%
calcSummary(df = ., summary_var = "Costs", outlier_threshold = 2, grouping_vars = groups)
## check to see there's still data to benchmark after filtering main_df for just the "Me" data
validate(
need(nrow(summary_df_me) > 0, "There is no data to benchmark due to filtering (i.e. there is no data for 'Me'). Please adjust your data filters.")
)
return(summary_df_me)
})
## -----------<< compare_df >>-----------
# data frame with the summary information for "Me" and the "Baseline" next to each other in order to calculate differences
# used to create labels for the difference barplots, as well as the comparison data table
compare_df <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
groups <- c("APRDRGCODE", "BenchmarkGrouping", "CostGrouping", keep_benchmarkgroups, input$costbreakdowns)
# grab summary df of "Me"
me <- summary_df_me()
# append "_ME" to end of summary column names (excluding join keys (i.e. groups))
colnames(me)[!(colnames(me) %in% groups)] <- paste0(colnames(me)[!(colnames(me) %in% groups)], "_ME")
# full join the summary df of all the benchmark hospitals, and the summary df of "Me"
# use the groupings as join keys
compare_out <- summary_df_benchmark() %>%
full_join(me, by = groups)
compare_out <- compare_out %>%
mutate(diff_median = round(median_ME - median, 2), # difference in medians b/w "Me" and benchmark
diff_mean = round(mean_ME - mean, 2), # difference in mean b/w "Me" and benchmark
# percent difference in median b/w "Me" and benchmark
proport_diff_median = ifelse(is.infinite(diff_median/median_ME),
round(coalesce(diff_median / 1, 0), 2), # if division by zero, divide by 1 instead
round(coalesce(diff_median / median_ME, 0), 2)),
# percent difference in mean b/w "Me" and benchmark
proport_diff_mean = ifelse(is.infinite(diff_mean/mean_ME),
round(coalesce(diff_mean / 1, 0), 2), # if division by zero, divide by 1 instead
round(coalesce(diff_mean / mean_ME, 0), 2)),
Difference = "Difference", # column to indicate this is the "difference" data frame; used for faceting
empty_flag = ifelse(is.na(min) | is.na(min_ME), 1, 0)) # flag for if missing data
return(compare_out)
})
## -----------< Reactive Plotting >-----------
## -----------<< Patient/Benchmark Comparison Plots >>-----------
## -----------<<< APR-DRG Code Volume Distribution >>>-----------
aprdrg_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
group_by(Group, APRDRGCODE, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(APRDRGCODE = str_wrap(labelAPRDRG(APRDRGCODE, values = TRUE), width = 20))
aprdrg_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("APRDRGCODE") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(aprdrg_plot)
})
## -----------<<< SOI Distribution >>>-----------
soi_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, SOI, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
soi_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("SOI") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(soi_plot)
})
## -----------<<< ROM Distribution >>>-----------
rom_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, ROM, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
rom_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("ROM") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(rom_plot)
})
## -----------<<< Patient Age Distribution >>>-----------
age_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, AgeBucket, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(AgeBucket = case_when(AgeBucket == "Infant" ~ "Infant (less than 1 yr)",
AgeBucket == "Toddler" ~ "Toddler (13 mos - 23 mos)",
AgeBucket == "Early Childhood" ~ "Early Childhood (2 yrs - 5 yrs)",
AgeBucket == "Middle Childhood" ~ "Middle Childhood (6 yrs - 11 yrs)",
AgeBucket == "Adolescence" ~ "Adolescence (12 yrs - 17 yrs)",
AgeBucket == "Adult" ~ "Adult (18 years or older)"))
age_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("AgeBucket") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(age_plot)
})
## -----------<<< Patient Type Distribution >>>-----------
type_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, PatientTypeRollup, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
type_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("PatientTypeRollup") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(type_plot)
})
## -----------<<< Patient Discharge Status Distribution >>>-----------
discharge_plot <- eventReactive(input$hospital_refresh, {
hospital_df <- hospital_df()
hospital_df <- hospital_df %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, DischargeStatusGroup, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(DischargeStatusGroup = ifelse(DischargeStatusGroup == "Still a Patient", "Inhouse", DischargeStatusGroup))
discharge_plot <- ggplot() +
geom_vline(data = hospital_df[hospital_df$Group == "Me", ],
aes(xintercept = Count, color = Group, fill = Group), size = 3, alpha = 0.75) +
geom_dotplot(data = hospital_df[hospital_df$Group == "Baseline", ],
aes(x = Count, fill = Group, color = Group), dotsize = 1) +
scale_fill_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
guide = FALSE) +
scale_y_continuous(expand = c(0,0)) +
facet_wrap("DischargeStatusGroup") +
labs(x = "# of Encounters",
y = "# of Benchmark Institutions") +
theme(axis.text.y = element_blank(),
axis.ticks.x = element_blank())
return(discharge_plot)
})
## -----------<< Benchmark Plot >>-----------
plot <- eventReactive(input$refresh, {
## grab all reactive data frames
main_df <- main_df()
benchmark <- summary_df_benchmark()
me <- summary_df_me()
comparison <- compare_df()
## stack together "Baseline" summary df and "Me" summary df for boxplot labels
all <- union_all(benchmark, me)
## -----------<<< Set Plotting Parameters >>>-----------
# if no cost grouping, don't facet
if(is.null(input$costbreakdowns)){
facet_group <- as.formula(".~Name") # faceting for "gg"
facet_diff_group <- as.formula(".~Difference") # faceting for "diff"
}
# if there is cost grouping, facet by cost grouping
else {
facet_group <- as.formula("CostGrouping~Name") # faceting for "gg
facet_diff_group <- as.formula("CostGrouping~Difference") # faceting for "diff"
}
# if no benchmark grouping, set axis name as "APR-DRG Code" for default
if(is.null(input$benchmarkbreakdowns)){
axis_name <- "APR-DRG Code"
}
# if benchmark grouping, set axis name as combo of all the grouping column names
else {
axis_name <- paste0(input$benchmarkbreakdowns, collapse = " & ")
}
## -----------<<< gg -- "Baseline" vs. "Me" plot >>>-----------
gg <- ggplot(main_df) +
geom_boxplot(aes(x = BenchmarkGrouping, y = Costs, color = Group), position = "dodge") +
geom_text(data = all,
aes(x = BenchmarkGrouping, y = median, label = paste0("$", scales::comma(median)), group = Group,
hjust = -0.2, vjust = -0.5,
fontface = "bold"),
position = position_dodge(width = 0.75), size = 5) +
coord_flip() +
facet_grid(facet_group) +
scale_x_discrete(name = axis_name) +
scale_color_manual(values = c("Baseline" = "#1f78b4", # blue
"Me" = "#ff7f00"), # orange
name = "") +
# lines that separate different groupings
# remove first value in sequence (0.5) because don't want one between panel border and first plot
geom_vline(xintercept = seq(from = 0.5, to = length(unique(comparison[["BenchmarkGrouping"]])) -0.5, by = 1)[-1],
color = "black") +
theme_bw() +
theme(plot.title = element_text(size = 18, face = "bold"),
panel.background = element_rect(fill = "white"),
panel.grid.minor = element_line(color = "lightgray"),
strip.background = element_blank(),
strip.text.y = element_blank(),
axis.ticks = element_blank(),
axis.text = element_text(size = 15),
strip.text.x = element_text(size = 15),
axis.title = element_text(size = 15),
legend.position = "bottom",
legend.text = element_text(size = 24)) +
guides(colour = guide_legend(override.aes = list(size = 2))) +
labs(title = paste0("APR-DRG: ",
case_when(input$APRDRG == "221" ~ "221 - Major Small and Large Bowel Procedures",
input$APRDRG == "225" ~ "225 - Appendectomy",
input$APRDRG == "303" ~ "303 - Dorsal and Lumbar Fusion Proc for Curvature of Back",
input$APRDRG == "420" ~ "420 - Diabetes",
input$APRDRG == "693" ~ "693 - Chemotherapy",
input$APRDRG == "696" ~ "696 - Other Chemotherapy")))
## set axis for costs to be either normal or log based on user input
if(input$scale == TRUE){
gg <- gg +
scale_y_log10(name = "Cost per Encounter\n($)",
labels = scales::dollar)
}
# use normal scale if no input or input is "normal" (default)
if(input$scale == FALSE){
gg <- gg +
scale_y_continuous(name = "Cost per Encounter\n($)",
labels = scales::dollar)
}
## -----------<<< diff -- plot showing differences between "Baseline" and "Me" >>>-----------
diff <- ggplot(comparison,
aes(fill = ifelse(proport_diff_median > 0, 'pos', 'neg'))) + # if % difference positive, then "pos", else "neg" (for setting colors)
geom_bar(aes(x = BenchmarkGrouping, y = proport_diff_median),
stat = 'identity', width = .95) +
# line at 0 mark
geom_hline(color = 'black', yintercept = 0) +
# lines that separate different groupings
# remove first value in sequence (0.5) because don't want one between panel border and first plot
geom_vline(xintercept = seq(from = 0.5, to = length(unique(comparison[["BenchmarkGrouping"]]))-0.5, by = 1)[-1],
color = "black") +
geom_text(aes(label = ifelse(empty_flag == 1,
" NA", # if NA, label "NA" (extra spaces for aesthetic purposes to move it right of vertical line)
paste0(round(proport_diff_median*100, 2), "%")), # label with %, round to 2 decimal places
x = BenchmarkGrouping,
y = case_when(diff_median >= 0 ~ 0.12*max(abs(proport_diff_median)), # if positive %, put it to the right
diff_median < 0 ~ -0.4*max(abs(proport_diff_median)), # if negative %, put it to the left
is.na(diff_median) ~ 0), # if NA because no comparisons, put it at zero and should have "NA" label
fontface = "bold"), size = 5,
hjust = 0.15) +
scale_y_continuous(name = "Difference\n(%)",
labels = scales::percent,
breaks = scales::pretty_breaks(2),
limits = c(-max(abs(comparison$proport_diff_median)), max(abs(comparison$proport_diff_median)))) +
scale_fill_manual(values = c("neg" = "#33a02c", # green
"pos" = "#e31a1c"), # red
guide = FALSE) +
scale_color_manual(values = c("big" = 'white', "small" = 'grey20'), guide = FALSE) +
coord_flip() +
facet_grid(facet_diff_group) +
theme_bw() +
theme(panel.background = element_rect(fill = "white"),
panel.grid = element_blank(),
strip.background = element_blank(),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_text(size = 15),
strip.text.x = element_text(size = 15),
axis.text.x = element_text(size = 15))
## -----------<<< full -- gg and diff plots together >>>-----------
full <- plot_grid(gg, diff, ncol = 2, align = "h", axis = "bt", rel_widths = c(1.5, 0.5))
return(full)
})
## -----------<< Cost Savings Plot >>-----------
costsavings_plot <- eventReactive(input$view_opportunities, {
df <- hospital_df()
df <- df %>%
group_by(APRDRGCODE, Group) %>%
summarise(MedianCost = median(Costs),
N = sum(Count)) %>% ungroup()
df <- data.table::dcast(setDT(df), APRDRGCODE ~ Group, value.var = c("MedianCost", "N")) %>%
mutate(MedianCost_Diff = MedianCost_Me - MedianCost_Baseline,
N_Diff = N_Me - N_Baseline,
Impact = N_Me * MedianCost_Diff,
Direction = ifelse(Impact < 0, "Below the Benchmark", "Cost Savings Opportunity"),
APRDRGCODE = labelAPRDRG(APRDRGCODE, values = TRUE)) %>%
filter(!is.na(MedianCost_Diff))
df$APRDRGCODE <- factor(df$APRDRGCODE, levels = df$APRDRGCODE[order(df$Impact)], ordered = TRUE)
costsavings_plot <- ggplot(df) +
geom_bar(aes(x = APRDRGCODE, y = Impact, fill = Direction),
stat = 'identity', width = .95) +
# line at 0 mark
geom_hline(color = 'black', yintercept = 0) +
# lines that separate different groupings
# remove first value in sequence (0.5) because don't want one between panel border and first plot
geom_vline(xintercept = seq(from = 0.5, to = length(unique(df[["APRDRGCODE"]]))-0.5, by = 1)[-1],
color = "black") +
geom_text(aes(label = ifelse(Impact >= 0,
paste0("Potential Savings: ", scales::dollar(Impact), "\n# of Encounters: ", N_Me, "\nCost Difference per Encounter: ", scales::dollar(MedianCost_Diff)),
paste0("Current Savings: ", scales::dollar(Impact), "\n# of Encounters: ", N_Me, "\nCost Difference per Encounter: ", scales::dollar(MedianCost_Diff))),
x = APRDRGCODE,
y = case_when(is.na(MedianCost_Diff) ~ 0, # if NA because no comparisons, put it at zero and should have "NA" label
Impact >= 0 ~ 0.15*(min(abs(coalesce(df$Impact, 0)))),
Impact < 0 ~ -0.15*(min(abs(coalesce(df$Impact, 0))))),
fontface = "bold"), size = 5, hjust = ifelse(df$Impact >= 0, 0, 1)) +
scale_fill_manual(values = c("Below the Benchmark" = "#dadaeb", # light purple
"Cost Savings Opportunity" = "#807dba"), # darker purple
guide = FALSE) +
scale_y_continuous(name = paste0("Cost Difference between Me and the Benchmark\n(My Cost per Encounter - Benchmark Cost per Encounter) * My Volume of Encounters"),
labels = scales::dollar,
expand = c(0.8, 0.8)
) +
coord_flip() +
labs(x = "APR-DRG Code")
return(costsavings_plot)
})
## -----------< Reactive Tables >-----------
# table for comparison hospitals / benchmark
benchmark_table <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
benchmark_df <- summary_df_benchmark()
# only select grouping parameters, median, mean, and number of observations
select <- c(keep_benchmarkgroups, input$costbreakdowns, "median", "mean", "obs")
benchmark_df <- benchmark_df[,select]
# rename columns
colnames(benchmark_df) <- c(keep_benchmarkgroups, input$costbreakdowns, "Median", "Mean", "N")
return(benchmark_df)
})
# table for hospital to benchmark / "Me"
me_table <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
me_df <- summary_df_me()
# only select grouping parameters, median, mean, and number of observations
select <- c(keep_benchmarkgroups, input$costbreakdowns, "median", "mean", "obs")
me_df <- me_df[,select]
# rename columns
colnames(me_df) <- c(keep_benchmarkgroups, input$costbreakdowns, "Median", "Mean", "N")
return(me_df)
})
# table with comparison information between benchmarks and "Me"
compare_table <- eventReactive(input$refresh, {
# initialize empty variables to indicate keep/remove ROM/SOI for custom grouping option
remove_ROM <- c()
keep_ROM <- c()
remove_SOI <- c()
keep_SOI <- c()
# if have custom grouping for ROM, remove ROM column from grouping and add custom grouping column for ROM
if((!is.null(input$rom_1) & !is.null(input$rom_2)) & "ROM" %in% input$benchmarkbreakdowns){
remove_ROM <- c("ROM")
keep_ROM <- c("ROM_group")
}
# if have custom grouping for SOI, remove SOI column from grouping and add custom grouping column for SOI
if((!is.null(input$soi_1) & !is.null(input$soi_2)) & "SOI" %in% input$benchmarkbreakdowns){
remove_SOI <- c("SOI")
keep_SOI <- ("SOI_group")
}
# grab all of the possible groupings
all_groupings <- c(input$benchmarkbreakdowns, keep_ROM, keep_SOI)
# remove unwanted groupings if specified
keep_benchmarkgroups <- setdiff(all_groupings, c(remove_ROM, remove_SOI))
compare_df <- compare_df() %>%
mutate(proport_diff_median = proport_diff_median*100, # calculate % diff in medians
proport_diff_mean = proport_diff_mean*100) # calculate % diff in means
# only select grouping parameters, difference in medians, % difference in medians, difference in means, and % difference in means
select <- c(keep_benchmarkgroups, input$costbreakdowns, "diff_median", "proport_diff_median", "diff_mean", "proport_diff_mean")
compare_df <- compare_df[,select]
# rename columns
colnames(compare_df) <- c(keep_benchmarkgroups, input$costbreakdowns,
"Difference in Medians", "% Difference in Median", "Difference in Means", "% Difference in Mean")
return(compare_df)
})
## -----------< Stable Outputs >-----------
output$aprdrg_plot <- renderPlot({
aprdrg_plot()
})
output$rom_plot <- renderPlot({
rom_plot()
})
output$soi_plot <- renderPlot({
soi_plot()
})
output$age_plot <- renderPlot({
age_plot()
})
output$type_plot <- renderPlot({
type_plot()
})
output$discharge_plot <- renderPlot({
discharge_plot()
})
output$plotbrush_output <- renderText({
# if haven't created benchmark, can't select points so output empty string
if(input$hospital_refresh == FALSE){
out <- ""
}
# if created benchmark, can start selecting points
else {
df <- hospital_df()
if(any(!is.null(input$aprdrg_plotbrush), !is.null(input$rom_plotbrush), !is.null(input$soi_plotbrush),
!is.null(input$age_plotbrush), !is.null(input$type_plotbrush), !is.null(input$discharge_plotbrush))){
out_aprdrg <- c()
out_rom <- c()
out_soi <- c()
out_age <- c()
out_type <- c()
out_discharge <- c()
# if brushed over APRDRG distribution plot
if(!is.null(input$aprdrg_plotbrush)){
df1 <- df %>%
filter(Group == "Baseline") %>%
group_by(Group, APRDRGCODE, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(APRDRGCODE = str_wrap(labelAPRDRG(APRDRGCODE, values = TRUE), width = 20))
out_aprdrg <- brushedPoints(df = df1, brush = input$aprdrg_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over ROM distribution plot
if(!is.null(input$rom_plotbrush)){
df2 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, ROM, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
out_rom <- brushedPoints(df = df2, brush = input$rom_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over SOI distribution plot
if(!is.null(input$soi_plotbrush)){
df3 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, SOI, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
out_soi <- brushedPoints(df = df3, brush = input$soi_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over age distribution plot
if(!is.null(input$age_plotbrush)){
df4 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, AgeBucket, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(AgeBucket = case_when(AgeBucket == "Infant" ~ "Infant (less than 1 yr)",
AgeBucket == "Toddler" ~ "Toddler (13 mos - 23 mos)",
AgeBucket == "Early Childhood" ~ "Early Childhood (2 yrs - 5 yrs)",
AgeBucket == "Middle Childhood" ~ "Middle Childhood (6 yrs - 11 yrs)",
AgeBucket == "Adolescence" ~ "Adolescence (12 yrs - 17 yrs)",
AgeBucket == "Adult" ~ "Adult (18 years or older)"))
out_age <- brushedPoints(df = df4, brush = input$age_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over patient type distribution plot
if(!is.null(input$type_plotbrush)){
df5 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, PatientTypeRollup, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup()
out_type <- brushedPoints(df = df5, brush = input$type_plotbrush, xvar = "Count")$customer_entity
}
# if brushed over patient discharge status distribution plot
if(!is.null(input$discharge_plotbrush)){
df6 <- df %>%
filter(Group == "Baseline") %>%
filter(!is.na(APRDRG_benchmark)) %>%
group_by(Group, DischargeStatusGroup, customer_entity) %>%
summarise(Count = sum(Count)) %>% ungroup() %>%
mutate(DischargeStatusGroup = ifelse(DischargeStatusGroup == "Still a Patient", "Inhouse", DischargeStatusGroup))
out_discharge <- brushedPoints(df = df6, brush = input$discharge_plotbrush, xvar = "Count")$customer_entity
}
out <- paste0(unique(c(out_aprdrg, out_rom, out_soi, out_age, out_type, out_discharge)), collapse = "<br/>")
}
# if all are null
else {
out <- ""
}
}
return(out)
})
output$soi_plot <- renderPlot({
soi_plot()
})
output$costsavings_plot <- renderPlot({
costsavings_plot()
})
# benchmarking plot
output$plot <- renderPlot({
plot()
})
# table for benchmarks
output$summary_df_benchmark <- renderDataTable({
benchmark_table()
})
# table for "Me"
output$summary_df_me <- renderDataTable({
me_table()
})
# table with comparisons between benchmark and "Me"
output$compare_df <- renderDataTable({
compare_table()
})
## -----------< Session >-----------
session$allowReconnect("force")
}
#### RUN APP ####
shinyApp(ui = ui, server = server)
|
######################################################################################
# Modified based on the supplemental materials of Yu, Downes, Carter, and O'Boyle (2018)
######################################################################################
library(metaSEM);require('matrixcalc');library(OpenMx);library(Matrix);library(MASS)
mySRMR <- function(oC,mC){
p = nrow(oC)
return(sqrt(sum((oC-mC)^2)/p/(p+1)))
}
# Data preparation
#----------------------------------------------------------
## Remove studies that did not report bivariate correlations
index <- Gnambs18$CorMat==1
Gnambs18 <- lapply(Gnambs18, function(x) x[index])
Ni = Gnambs18$n # sample sizes within primary studies
NpS = 1/mean(1/Ni) # harmonic mean of sample sizes within primary studies
#Nps = mean(Ni)
k = length(Gnambs18$data) # number of primary studies
reps <- 10000 # number of replications
#Reformat the data for TSSEM input
vnames <- paste('I',1:10)
cormats <- lapply(Gnambs18$data,function(x) x = x[c(1,3,4,7,10,2,5,6,8,9),c(1,3,4,7,10,2,5,6,8,9)])
# Conduct multivariate FIMASEM
#----------------------------------------------------------
# Stage 1: Run TSSEM to obtain mean correlation vector and its covariance matrix
step.one <- tssem1(cormats,Ni,method="REM",RE.type="Diag")
rho.mult <- diag(1,nrow=length(vnames),ncol=length(vnames))
rho.mult[lower.tri(rho.mult)] <- (coef(step.one,select="fixed"))
temp <- t(rho.mult)
rho.mult[upper.tri(rho.mult)] <- temp[upper.tri(temp)]
sigma.mult <- diag(coef(step.one,select="random"))
dimnames(rho.mult) <- list(vnames,vnames)
# Stage 2
# Step 1: Simulated a large numbers of correlation vectors
matrices.mult <- rCorPop(rho.mult,sigma.mult,corr=T,k=reps+500,nonPD.pop="nearPD")
matrices.mult <- matrices.mult[which(sapply(matrices.mult,is.positive.definite))][1:reps]
# Step 2: Fit the studied SEM model to each of the simulated correlation vectors
# CFA formulation
# Factor loading matrix
L.values = matrix(c(rep(0.6,5),rep(0,10),rep(0.6,5)),10,2,byrow = F)
L.lbound = matrix(c(0,rep(-1,4),rep(0,11),rep(-1,4)),10,2,byrow = F)
L.ubound = matrix(c(rep(1,5),rep(0,10),rep(1,5)),10,2,byrow = F)
L.labels = matrix(c(paste('L',1:5,sep=''),rep(NA,10),paste('L',6:10,sep='')),10,2,byrow = F)
L.free = L.values!=0
L <- mxMatrix(type = 'Full',free = L.free,values = L.values,labels = L.labels,
lbound = L.lbound,ubound = L.ubound,name="L")
# Factor correlation matrix
Phi <- mxMatrix(type = 'Symm',nrow = 2,ncol = 2,free = c(FALSE,TRUE,FALSE),values = c(1,.3,1),
labels = c(NA,'rho',NA),name = 'Phi',lbound = rep(-1,3),ubound = rep(1,3))
# Uniqueness
U.values = diag(0.1,10)
U.lbound = diag(0,10)
U.ubound = diag(1,10)
U.labels = matrix(NA,10,10)
diag(U.labels) = paste('s2e',1:10,sep='')
U.free = U.values!=0
U <- mxMatrix(type = 'Diag',free = U.free,values = U.values,labels = U.labels,
lbound = U.lbound,ubound = U.ubound, name="U")
# CFA model-implied covariance matrix
ecov <- mxAlgebra(L%*%Phi%*%t(L) + U , name="expCov")
expectation <- mxExpectationNormal(cov="expCov",dimnames = vnames)
# Run SEM on those random matrices
coefs.fits.multivariate.FIMASEM <- as.data.frame(t(sapply(1:reps,function(i) {
openmxmodel <- mxModel("temp",mxData(matrices.mult[[i]],type="cov",numObs = NpS),
L,Phi,U,ecov, expectation,funML=mxFitFunctionML());
openmxfit <- mxRun(openmxmodel,silent=T);
if (openmxfit$output$status[[1]] == 6) { openmxfit <- mxRun(openmxfit,silent=T) }
modelsummary <- summary(openmxfit);
coefs <- coef(openmxfit)
coef.names <- names(coefs)
mC <- openmxfit$expCov$result
oC <- matrices.mult[[i]]
output <- c(coefs,mySRMR(oC,mC),modelsummary$CFI,
openmxfit$output$status[[1]]);
names(output) <- c(names(coefs),'SRMR','CFI','openMxStatus')
output
}))) #returns a dataframe of SEM parameter estimates (i.e., fit indices and path coefficients)
# Get results
#----------------------------------------------------------
del.id = which(coefs.fits.multivariate.FIMASEM[,24]>0)
m <- sapply(coefs.fits.multivariate.FIMASEM,function(x){mean(x[-del.id])})
sdv <- sapply(coefs.fits.multivariate.FIMASEM,function(x){sd(x[-del.id])})
UL <- m + qnorm(.90)*sdv # upper limits
LL <- m - qnorm(.90)*sdv # lower limits
# SEM parameter means, sds, and lower & upper limits of credibility intervals
round(cbind(m,sdv,LL,UL),3)
print('% SRMR < .10')
print(sum(coefs.fits.multivariate.FIMASEM$SRMR[-del.id] < .1)/(reps-length(del.id)))
print('% CFI > .90')
print(sum(coefs.fits.multivariate.FIMASEM$CFI[-del.id] > .90)/(reps-length(del.id)))#
length(del.id)/reps
| /CFA/FIMASEM/FIMASEM_CorrelatedTraits.R | no_license | zijunke/Compare-3-MASEM-Approches | R | false | false | 4,675 | r | ######################################################################################
# Modified based on the supplemental materials of Yu, Downes, Carter, and O'Boyle (2018)
######################################################################################
library(metaSEM);require('matrixcalc');library(OpenMx);library(Matrix);library(MASS)
mySRMR <- function(oC,mC){
p = nrow(oC)
return(sqrt(sum((oC-mC)^2)/p/(p+1)))
}
# Data preparation
#----------------------------------------------------------
## Remove studies that did not report bivariate correlations
index <- Gnambs18$CorMat==1
Gnambs18 <- lapply(Gnambs18, function(x) x[index])
Ni = Gnambs18$n # sample sizes within primary studies
NpS = 1/mean(1/Ni) # harmonic mean of sample sizes within primary studies
#Nps = mean(Ni)
k = length(Gnambs18$data) # number of primary studies
reps <- 10000 # number of replications
#Reformat the data for TSSEM input
vnames <- paste('I',1:10)
cormats <- lapply(Gnambs18$data,function(x) x = x[c(1,3,4,7,10,2,5,6,8,9),c(1,3,4,7,10,2,5,6,8,9)])
# Conduct multivariate FIMASEM
#----------------------------------------------------------
# Stage 1: Run TSSEM to obtain mean correlation vector and its covariance matrix
step.one <- tssem1(cormats,Ni,method="REM",RE.type="Diag")
rho.mult <- diag(1,nrow=length(vnames),ncol=length(vnames))
rho.mult[lower.tri(rho.mult)] <- (coef(step.one,select="fixed"))
temp <- t(rho.mult)
rho.mult[upper.tri(rho.mult)] <- temp[upper.tri(temp)]
sigma.mult <- diag(coef(step.one,select="random"))
dimnames(rho.mult) <- list(vnames,vnames)
# Stage 2
# Step 1: Simulated a large numbers of correlation vectors
matrices.mult <- rCorPop(rho.mult,sigma.mult,corr=T,k=reps+500,nonPD.pop="nearPD")
matrices.mult <- matrices.mult[which(sapply(matrices.mult,is.positive.definite))][1:reps]
# Step 2: Fit the studied SEM model to each of the simulated correlation vectors
# CFA formulation
# Factor loading matrix
L.values = matrix(c(rep(0.6,5),rep(0,10),rep(0.6,5)),10,2,byrow = F)
L.lbound = matrix(c(0,rep(-1,4),rep(0,11),rep(-1,4)),10,2,byrow = F)
L.ubound = matrix(c(rep(1,5),rep(0,10),rep(1,5)),10,2,byrow = F)
L.labels = matrix(c(paste('L',1:5,sep=''),rep(NA,10),paste('L',6:10,sep='')),10,2,byrow = F)
L.free = L.values!=0
L <- mxMatrix(type = 'Full',free = L.free,values = L.values,labels = L.labels,
lbound = L.lbound,ubound = L.ubound,name="L")
# Factor correlation matrix
Phi <- mxMatrix(type = 'Symm',nrow = 2,ncol = 2,free = c(FALSE,TRUE,FALSE),values = c(1,.3,1),
labels = c(NA,'rho',NA),name = 'Phi',lbound = rep(-1,3),ubound = rep(1,3))
# Uniqueness
U.values = diag(0.1,10)
U.lbound = diag(0,10)
U.ubound = diag(1,10)
U.labels = matrix(NA,10,10)
diag(U.labels) = paste('s2e',1:10,sep='')
U.free = U.values!=0
U <- mxMatrix(type = 'Diag',free = U.free,values = U.values,labels = U.labels,
lbound = U.lbound,ubound = U.ubound, name="U")
# CFA model-implied covariance matrix
ecov <- mxAlgebra(L%*%Phi%*%t(L) + U , name="expCov")
expectation <- mxExpectationNormal(cov="expCov",dimnames = vnames)
# Run SEM on those random matrices
coefs.fits.multivariate.FIMASEM <- as.data.frame(t(sapply(1:reps,function(i) {
openmxmodel <- mxModel("temp",mxData(matrices.mult[[i]],type="cov",numObs = NpS),
L,Phi,U,ecov, expectation,funML=mxFitFunctionML());
openmxfit <- mxRun(openmxmodel,silent=T);
if (openmxfit$output$status[[1]] == 6) { openmxfit <- mxRun(openmxfit,silent=T) }
modelsummary <- summary(openmxfit);
coefs <- coef(openmxfit)
coef.names <- names(coefs)
mC <- openmxfit$expCov$result
oC <- matrices.mult[[i]]
output <- c(coefs,mySRMR(oC,mC),modelsummary$CFI,
openmxfit$output$status[[1]]);
names(output) <- c(names(coefs),'SRMR','CFI','openMxStatus')
output
}))) #returns a dataframe of SEM parameter estimates (i.e., fit indices and path coefficients)
# Get results
#----------------------------------------------------------
del.id = which(coefs.fits.multivariate.FIMASEM[,24]>0)
m <- sapply(coefs.fits.multivariate.FIMASEM,function(x){mean(x[-del.id])})
sdv <- sapply(coefs.fits.multivariate.FIMASEM,function(x){sd(x[-del.id])})
UL <- m + qnorm(.90)*sdv # upper limits
LL <- m - qnorm(.90)*sdv # lower limits
# SEM parameter means, sds, and lower & upper limits of credibility intervals
round(cbind(m,sdv,LL,UL),3)
print('% SRMR < .10')
print(sum(coefs.fits.multivariate.FIMASEM$SRMR[-del.id] < .1)/(reps-length(del.id)))
print('% CFI > .90')
print(sum(coefs.fits.multivariate.FIMASEM$CFI[-del.id] > .90)/(reps-length(del.id)))#
length(del.id)/reps
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/future_modify.R
\name{future_modify}
\alias{future_modify}
\alias{future_modify_at}
\alias{future_modify_if}
\title{Modify elements selectively via futures}
\usage{
future_modify(.x, .f, ..., .progress = FALSE, future.globals = TRUE,
future.packages = NULL, future.seed = FALSE, future.lazy = FALSE,
future.scheduling = 1)
future_modify_at(.x, .at, .f, ..., .progress = FALSE, future.globals = TRUE,
future.packages = NULL, future.seed = FALSE, future.lazy = FALSE,
future.scheduling = 1)
future_modify_if(.x, .p, .f, ..., .progress = FALSE, future.globals = TRUE,
future.packages = NULL, future.seed = FALSE, future.lazy = FALSE,
future.scheduling = 1)
}
\arguments{
\item{.x}{A list or atomic vector.}
\item{.f}{A function, formula, or atomic vector.
If a \strong{function}, it is used as is.
If a \strong{formula}, e.g. \code{~ .x + 2}, it is converted to a function. There
are three ways to refer to the arguments:
\itemize{
\item For a single argument function, use \code{.}
\item For a two argument function, use \code{.x} and \code{.y}
\item For more arguments, use \code{..1}, \code{..2}, \code{..3} etc
}
This syntax allows you to create very compact anonymous functions.
If \strong{character vector}, \strong{numeric vector}, or \strong{list}, it
is converted to an extractor function. Character vectors index by name
and numeric vectors index by position; use a list to index by position
and name at different levels. Within a list, wrap strings in \code{\link[=get-attr]{get-attr()}}
to extract named attributes. If a component is not present, the value of
\code{.default} will be returned.}
\item{...}{Additional arguments passed on to \code{.f}.}
\item{.progress}{A logical, for whether or not to print a progress bar for
multiprocess, multisession, and multicore plans.}
\item{future.globals}{A logical, a character vector, or a named list for
controlling how globals are handled. For details, see below section.}
\item{future.packages}{(optional) a character vector specifying packages
to be attached in the R environment evaluating the future.}
\item{future.seed}{A logical or an integer (of length one or seven),
or a list of \code{length(.x)} with pre-generated random seeds.
For details, see below section.}
\item{future.lazy}{Specifies whether the futures should be resolved
lazily or eagerly (default).}
\item{future.scheduling}{Average number of futures ("chunks") per worker.
If \code{0.0}, then a single future is used to process all elements
of \code{.x}.
If \code{1.0} or \code{TRUE}, then one future per worker is used.
If \code{2.0}, then each worker will process two futures
(if there are enough elements in \code{.x}).
If \code{Inf} or \code{FALSE}, then one future per element of
\code{.x} is used.}
\item{.at}{A character vector of names or a numeric vector of
positions. Only those elements corresponding to \code{.at} will be
modified.}
\item{.p}{A single predicate function, a formula describing such a
predicate function, or a logical vector of the same length as \code{.x}.
Alternatively, if the elements of \code{.x} are themselves lists of
objects, a string indicating the name of a logical element in the
inner lists. Only those elements where \code{.p} evaluates to
\code{TRUE} will be modified.}
}
\value{
An object the same class as .x
}
\description{
These functions work exactly the same as \code{\link[purrr:modify]{purrr::modify()}} functions, but allow
you to modify in parallel. There are a number of \code{future.*} arguments
to allow you to fine tune the parallel processing.
}
\details{
From purrr) Since the transformation can alter the structure of the input;
it's your responsibility to ensure that the transformation produces a valid
output. For example, if you're modifying a data frame, \code{.f} must preserve the
length of the input.
}
\section{Global variables}{
Argument \code{future.globals} may be used to control how globals
should be handled similarly how the \code{globals} argument is used with
\code{future()}.
Since all function calls use the same set of globals, this function can do
any gathering of globals upfront (once), which is more efficient than if
it would be done for each future independently.
If \code{TRUE}, \code{NULL} or not is specified (default), then globals
are automatically identified and gathered.
If a character vector of names is specified, then those globals are gathered.
If a named list, then those globals are used as is.
In all cases, \code{.f} and any \code{...} arguments are automatically
passed as globals to each future created as they are always needed.
}
\section{Reproducible random number generation (RNG)}{
Unless \code{future.seed = FALSE}, this function guarantees to generate
the exact same sequence of random numbers \emph{given the same initial
seed / RNG state} - this regardless of type of futures and scheduling
("chunking") strategy.
RNG reproducibility is achieved by pregenerating the random seeds for all
iterations (over \code{.x}) by using L'Ecuyer-CMRG RNG streams. In each
iteration, these seeds are set before calling \code{.f(.x[[ii]], ...)}.
\emph{Note, for large \code{length(.x)} this may introduce a large overhead.}
As input (\code{future.seed}), a fixed seed (integer) may be given, either
as a full L'Ecuyer-CMRG RNG seed (vector of 1+6 integers) or as a seed
generating such a full L'Ecuyer-CMRG seed.
If \code{future.seed = TRUE}, then \code{\link[base:Random]{.Random.seed}}
is returned if it holds a L'Ecuyer-CMRG RNG seed, otherwise one is created
randomly.
If \code{future.seed = NA}, a L'Ecuyer-CMRG RNG seed is randomly created.
If none of the function calls \code{.f(.x[[ii]], ...)} uses random number
generation, then \code{future.seed = FALSE} may be used.
In addition to the above, it is possible to specify a pre-generated
sequence of RNG seeds as a list such that
\code{length(future.seed) == length(.x)} and where each element is an
integer seed that can be assigned to \code{\link[base:Random]{.Random.seed}}.
Use this alternative with caution.
\strong{Note that \code{as.list(seq_along(.x))} is \emph{not} a valid set of such
\code{.Random.seed} values.}
In all cases but \code{future.seed = FALSE}, the RNG state of the calling
R processes after this function returns is guaranteed to be
"forwarded one step" from the RNG state that was before the call and
in the same way regardless of \code{future.seed}, \code{future.scheduling}
and future strategy used. This is done in order to guarantee that an \R
script calling \code{future_modify()} multiple times should be numerically
reproducible given the same initial seed.
}
\examples{
library(furrr)
library(dplyr) # for the pipe
plan(multiprocess)
# Convert each col to character, in parallel
future_modify(mtcars, as.character)
iris \%>\%
future_modify_if(is.factor, as.character) \%>\%
str()
mtcars \%>\% future_modify_at(c(1, 4, 5), as.character) \%>\% str()
}
| /man/future_modify.Rd | no_license | NanaAkwasiAbayieBoateng/furrr | R | false | true | 6,991 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/future_modify.R
\name{future_modify}
\alias{future_modify}
\alias{future_modify_at}
\alias{future_modify_if}
\title{Modify elements selectively via futures}
\usage{
future_modify(.x, .f, ..., .progress = FALSE, future.globals = TRUE,
future.packages = NULL, future.seed = FALSE, future.lazy = FALSE,
future.scheduling = 1)
future_modify_at(.x, .at, .f, ..., .progress = FALSE, future.globals = TRUE,
future.packages = NULL, future.seed = FALSE, future.lazy = FALSE,
future.scheduling = 1)
future_modify_if(.x, .p, .f, ..., .progress = FALSE, future.globals = TRUE,
future.packages = NULL, future.seed = FALSE, future.lazy = FALSE,
future.scheduling = 1)
}
\arguments{
\item{.x}{A list or atomic vector.}
\item{.f}{A function, formula, or atomic vector.
If a \strong{function}, it is used as is.
If a \strong{formula}, e.g. \code{~ .x + 2}, it is converted to a function. There
are three ways to refer to the arguments:
\itemize{
\item For a single argument function, use \code{.}
\item For a two argument function, use \code{.x} and \code{.y}
\item For more arguments, use \code{..1}, \code{..2}, \code{..3} etc
}
This syntax allows you to create very compact anonymous functions.
If \strong{character vector}, \strong{numeric vector}, or \strong{list}, it
is converted to an extractor function. Character vectors index by name
and numeric vectors index by position; use a list to index by position
and name at different levels. Within a list, wrap strings in \code{\link[=get-attr]{get-attr()}}
to extract named attributes. If a component is not present, the value of
\code{.default} will be returned.}
\item{...}{Additional arguments passed on to \code{.f}.}
\item{.progress}{A logical, for whether or not to print a progress bar for
multiprocess, multisession, and multicore plans.}
\item{future.globals}{A logical, a character vector, or a named list for
controlling how globals are handled. For details, see below section.}
\item{future.packages}{(optional) a character vector specifying packages
to be attached in the R environment evaluating the future.}
\item{future.seed}{A logical or an integer (of length one or seven),
or a list of \code{length(.x)} with pre-generated random seeds.
For details, see below section.}
\item{future.lazy}{Specifies whether the futures should be resolved
lazily or eagerly (default).}
\item{future.scheduling}{Average number of futures ("chunks") per worker.
If \code{0.0}, then a single future is used to process all elements
of \code{.x}.
If \code{1.0} or \code{TRUE}, then one future per worker is used.
If \code{2.0}, then each worker will process two futures
(if there are enough elements in \code{.x}).
If \code{Inf} or \code{FALSE}, then one future per element of
\code{.x} is used.}
\item{.at}{A character vector of names or a numeric vector of
positions. Only those elements corresponding to \code{.at} will be
modified.}
\item{.p}{A single predicate function, a formula describing such a
predicate function, or a logical vector of the same length as \code{.x}.
Alternatively, if the elements of \code{.x} are themselves lists of
objects, a string indicating the name of a logical element in the
inner lists. Only those elements where \code{.p} evaluates to
\code{TRUE} will be modified.}
}
\value{
An object the same class as .x
}
\description{
These functions work exactly the same as \code{\link[purrr:modify]{purrr::modify()}} functions, but allow
you to modify in parallel. There are a number of \code{future.*} arguments
to allow you to fine tune the parallel processing.
}
\details{
From purrr) Since the transformation can alter the structure of the input;
it's your responsibility to ensure that the transformation produces a valid
output. For example, if you're modifying a data frame, \code{.f} must preserve the
length of the input.
}
\section{Global variables}{
Argument \code{future.globals} may be used to control how globals
should be handled similarly how the \code{globals} argument is used with
\code{future()}.
Since all function calls use the same set of globals, this function can do
any gathering of globals upfront (once), which is more efficient than if
it would be done for each future independently.
If \code{TRUE}, \code{NULL} or not is specified (default), then globals
are automatically identified and gathered.
If a character vector of names is specified, then those globals are gathered.
If a named list, then those globals are used as is.
In all cases, \code{.f} and any \code{...} arguments are automatically
passed as globals to each future created as they are always needed.
}
\section{Reproducible random number generation (RNG)}{
Unless \code{future.seed = FALSE}, this function guarantees to generate
the exact same sequence of random numbers \emph{given the same initial
seed / RNG state} - this regardless of type of futures and scheduling
("chunking") strategy.
RNG reproducibility is achieved by pregenerating the random seeds for all
iterations (over \code{.x}) by using L'Ecuyer-CMRG RNG streams. In each
iteration, these seeds are set before calling \code{.f(.x[[ii]], ...)}.
\emph{Note, for large \code{length(.x)} this may introduce a large overhead.}
As input (\code{future.seed}), a fixed seed (integer) may be given, either
as a full L'Ecuyer-CMRG RNG seed (vector of 1+6 integers) or as a seed
generating such a full L'Ecuyer-CMRG seed.
If \code{future.seed = TRUE}, then \code{\link[base:Random]{.Random.seed}}
is returned if it holds a L'Ecuyer-CMRG RNG seed, otherwise one is created
randomly.
If \code{future.seed = NA}, a L'Ecuyer-CMRG RNG seed is randomly created.
If none of the function calls \code{.f(.x[[ii]], ...)} uses random number
generation, then \code{future.seed = FALSE} may be used.
In addition to the above, it is possible to specify a pre-generated
sequence of RNG seeds as a list such that
\code{length(future.seed) == length(.x)} and where each element is an
integer seed that can be assigned to \code{\link[base:Random]{.Random.seed}}.
Use this alternative with caution.
\strong{Note that \code{as.list(seq_along(.x))} is \emph{not} a valid set of such
\code{.Random.seed} values.}
In all cases but \code{future.seed = FALSE}, the RNG state of the calling
R processes after this function returns is guaranteed to be
"forwarded one step" from the RNG state that was before the call and
in the same way regardless of \code{future.seed}, \code{future.scheduling}
and future strategy used. This is done in order to guarantee that an \R
script calling \code{future_modify()} multiple times should be numerically
reproducible given the same initial seed.
}
\examples{
library(furrr)
library(dplyr) # for the pipe
plan(multiprocess)
# Convert each col to character, in parallel
future_modify(mtcars, as.character)
iris \%>\%
future_modify_if(is.factor, as.character) \%>\%
str()
mtcars \%>\% future_modify_at(c(1, 4, 5), as.character) \%>\% str()
}
|
context("test-HurdlePoisson")
test_that("print.HurdlePoisson works", {
expect_output(print(HurdlePoisson(1, 0.7)), regexp = "HurdlePoisson distribution")
})
test_that("random.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_length(random(p), 1)
expect_length(random(p, 100), 100)
expect_length(random(p[-1], 1), 0)
expect_length(random(p, 0), 0)
expect_error(random(p, -2))
# consistent with base R, using the `length` as number of samples to draw
expect_length(random(p, c(1, 2, 3)), 3)
expect_length(random(p, cbind(1, 2, 3)), 3)
expect_length(random(p, rbind(1, 2, 3)), 3)
})
test_that("pdf.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_equal(pdf(p, 0), dhpois(0, 1, 0.7))
expect_equal(pdf(p, 1), dhpois(1, 1, 0.7))
expect_equal(pdf(p, -12), 0)
expect_warning(pdf(p, 0.5))
expect_length(pdf(p, seq_len(0)), 0)
expect_length(pdf(p, seq_len(1)), 1)
expect_length(pdf(p, seq_len(10)), 10)
})
test_that("log_pdf.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_equal(log_pdf(p, 0), dhpois(0, 1, 0.7, log = TRUE))
expect_equal(log_pdf(p, 1), dhpois(1, 1, 0.7, log = TRUE))
expect_equal(log_pdf(p, -12), -Inf)
expect_warning(log_pdf(p, 0.5))
expect_length(log_pdf(p, seq_len(0)), 0)
expect_length(log_pdf(p, seq_len(1)), 1)
expect_length(log_pdf(p, seq_len(10)), 10)
})
test_that("cdf.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_equal(cdf(p, 0), phpois(0, 1, 0.7))
expect_equal(cdf(p, 1), phpois(1, 1, 0.7))
expect_length(cdf(p, seq_len(0)), 0)
expect_length(cdf(p, seq_len(1)), 1)
expect_length(cdf(p, seq_len(10)), 10)
})
test_that("quantile.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_equal(quantile(p, 0), 0)
expect_equal(quantile(p, 0.5), 1)
expect_length(quantile(p, seq_len(0)), 0)
expect_length(quantile(p, c(0, 1)), 2)
})
test_that("vectorization of a HurdlePoisson distribution work correctly", {
d <- HurdlePoisson(c(1, 2), 0.7)
d1 <- d[1]
d2 <- d[2]
## moments
expect_equal(mean(d), c(mean(d1), mean(d2)))
expect_equal(variance(d), c(variance(d1), variance(d2)))
expect_equal(skewness(d), c(skewness(d1), skewness(d2)))
expect_equal(kurtosis(d), c(kurtosis(d1), kurtosis(d2)))
## pdf, log_pdf, cdf
expect_equal(pdf(d, 0), c(pdf(d1, 0), pdf(d2, 0)))
expect_equal(log_pdf(d, 0), c(log_pdf(d1, 0), log_pdf(d2, 0)))
expect_equal(cdf(d, 0.5), c(cdf(d1, 0.5), cdf(d2, 0.5)))
## quantile
expect_equal(quantile(d, 0.5), c(quantile(d1, 0.5), quantile(d2, 0.5)))
expect_equal(quantile(d, c(0.5, 0.5)), c(quantile(d1, 0.5), quantile(d2, 0.5)))
expect_equal(
quantile(d, c(0.1, 0.5, 0.9)),
matrix(
rbind(quantile(d1, c(0.1, 0.5, 0.9)), quantile(d2, c(0.1, 0.5, 0.9))),
ncol = 3, dimnames = list(NULL, c("q_0.1", "q_0.5", "q_0.9"))
)
)
## elementwise
expect_equal(
pdf(d, c(0, 1), elementwise = TRUE),
diag(pdf(d, c(0, 1), elementwise = FALSE))
)
expect_equal(
cdf(d, c(0, 1), elementwise = TRUE),
diag(cdf(d, c(0, 1), elementwise = FALSE))
)
expect_equal(
quantile(d, c(0.25, 0.75), elementwise = TRUE),
diag(quantile(d, c(0.25, 0.75), elementwise = FALSE))
)
## support
expect_equal(
support(d),
matrix(
c(support(d1)[1], support(d2)[1], support(d1)[2], support(d2)[2]),
ncol = 2, dimnames = list(names(d), c("min", "max"))
)
)
expect_true(all(is_discrete(d)))
expect_true(!any(is_continuous(d)))
expect_true(is.numeric(support(d1)))
expect_true(is.numeric(support(d1, drop = FALSE)))
expect_null(dim(support(d1)))
expect_equal(dim(support(d1, drop = FALSE)), c(1L, 2L))
})
test_that("named return values for HurdlePoisson distribution work correctly", {
d <- HurdlePoisson(c(5, 10), 0.75)
names(d) <- LETTERS[1:length(d)]
expect_equal(names(mean(d)), LETTERS[1:length(d)])
expect_equal(names(variance(d)), LETTERS[1:length(d)])
expect_equal(names(skewness(d)), LETTERS[1:length(d)])
expect_equal(names(kurtosis(d)), LETTERS[1:length(d)])
expect_equal(names(random(d, 1)), LETTERS[1:length(d)])
expect_equal(rownames(random(d, 3)), LETTERS[1:length(d)])
expect_equal(names(pdf(d, 5)), LETTERS[1:length(d)])
expect_equal(names(pdf(d, c(5, 7))), LETTERS[1:length(d)])
expect_equal(rownames(pdf(d, c(5, 7, 9))), LETTERS[1:length(d)])
expect_equal(names(log_pdf(d, 5)), LETTERS[1:length(d)])
expect_equal(names(log_pdf(d, c(5, 7))), LETTERS[1:length(d)])
expect_equal(rownames(log_pdf(d, c(5, 7, 9))), LETTERS[1:length(d)])
expect_equal(names(cdf(d, 5)), LETTERS[1:length(d)])
expect_equal(names(cdf(d, c(5, 7))), LETTERS[1:length(d)])
expect_equal(rownames(cdf(d, c(5, 7, 9))), LETTERS[1:length(d)])
expect_equal(names(quantile(d, 0.5)), LETTERS[1:length(d)])
expect_equal(names(quantile(d, c(0.5, 0.7))), LETTERS[1:length(d)])
expect_equal(rownames(quantile(d, c(0.5, 0.7, 0.9))), LETTERS[1:length(d)])
expect_equal(names(support(d[1])), c("min", "max"))
expect_equal(colnames(support(d)), c("min", "max"))
expect_equal(rownames(support(d)), LETTERS[1:length(d)])
})
| /tests/testthat/test-HurdlePoisson.R | permissive | alexpghayes/distributions3 | R | false | false | 5,155 | r | context("test-HurdlePoisson")
test_that("print.HurdlePoisson works", {
expect_output(print(HurdlePoisson(1, 0.7)), regexp = "HurdlePoisson distribution")
})
test_that("random.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_length(random(p), 1)
expect_length(random(p, 100), 100)
expect_length(random(p[-1], 1), 0)
expect_length(random(p, 0), 0)
expect_error(random(p, -2))
# consistent with base R, using the `length` as number of samples to draw
expect_length(random(p, c(1, 2, 3)), 3)
expect_length(random(p, cbind(1, 2, 3)), 3)
expect_length(random(p, rbind(1, 2, 3)), 3)
})
test_that("pdf.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_equal(pdf(p, 0), dhpois(0, 1, 0.7))
expect_equal(pdf(p, 1), dhpois(1, 1, 0.7))
expect_equal(pdf(p, -12), 0)
expect_warning(pdf(p, 0.5))
expect_length(pdf(p, seq_len(0)), 0)
expect_length(pdf(p, seq_len(1)), 1)
expect_length(pdf(p, seq_len(10)), 10)
})
test_that("log_pdf.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_equal(log_pdf(p, 0), dhpois(0, 1, 0.7, log = TRUE))
expect_equal(log_pdf(p, 1), dhpois(1, 1, 0.7, log = TRUE))
expect_equal(log_pdf(p, -12), -Inf)
expect_warning(log_pdf(p, 0.5))
expect_length(log_pdf(p, seq_len(0)), 0)
expect_length(log_pdf(p, seq_len(1)), 1)
expect_length(log_pdf(p, seq_len(10)), 10)
})
test_that("cdf.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_equal(cdf(p, 0), phpois(0, 1, 0.7))
expect_equal(cdf(p, 1), phpois(1, 1, 0.7))
expect_length(cdf(p, seq_len(0)), 0)
expect_length(cdf(p, seq_len(1)), 1)
expect_length(cdf(p, seq_len(10)), 10)
})
test_that("quantile.HurdlePoisson work correctly", {
p <- HurdlePoisson(1, 0.7)
expect_equal(quantile(p, 0), 0)
expect_equal(quantile(p, 0.5), 1)
expect_length(quantile(p, seq_len(0)), 0)
expect_length(quantile(p, c(0, 1)), 2)
})
test_that("vectorization of a HurdlePoisson distribution work correctly", {
d <- HurdlePoisson(c(1, 2), 0.7)
d1 <- d[1]
d2 <- d[2]
## moments
expect_equal(mean(d), c(mean(d1), mean(d2)))
expect_equal(variance(d), c(variance(d1), variance(d2)))
expect_equal(skewness(d), c(skewness(d1), skewness(d2)))
expect_equal(kurtosis(d), c(kurtosis(d1), kurtosis(d2)))
## pdf, log_pdf, cdf
expect_equal(pdf(d, 0), c(pdf(d1, 0), pdf(d2, 0)))
expect_equal(log_pdf(d, 0), c(log_pdf(d1, 0), log_pdf(d2, 0)))
expect_equal(cdf(d, 0.5), c(cdf(d1, 0.5), cdf(d2, 0.5)))
## quantile
expect_equal(quantile(d, 0.5), c(quantile(d1, 0.5), quantile(d2, 0.5)))
expect_equal(quantile(d, c(0.5, 0.5)), c(quantile(d1, 0.5), quantile(d2, 0.5)))
expect_equal(
quantile(d, c(0.1, 0.5, 0.9)),
matrix(
rbind(quantile(d1, c(0.1, 0.5, 0.9)), quantile(d2, c(0.1, 0.5, 0.9))),
ncol = 3, dimnames = list(NULL, c("q_0.1", "q_0.5", "q_0.9"))
)
)
## elementwise
expect_equal(
pdf(d, c(0, 1), elementwise = TRUE),
diag(pdf(d, c(0, 1), elementwise = FALSE))
)
expect_equal(
cdf(d, c(0, 1), elementwise = TRUE),
diag(cdf(d, c(0, 1), elementwise = FALSE))
)
expect_equal(
quantile(d, c(0.25, 0.75), elementwise = TRUE),
diag(quantile(d, c(0.25, 0.75), elementwise = FALSE))
)
## support
expect_equal(
support(d),
matrix(
c(support(d1)[1], support(d2)[1], support(d1)[2], support(d2)[2]),
ncol = 2, dimnames = list(names(d), c("min", "max"))
)
)
expect_true(all(is_discrete(d)))
expect_true(!any(is_continuous(d)))
expect_true(is.numeric(support(d1)))
expect_true(is.numeric(support(d1, drop = FALSE)))
expect_null(dim(support(d1)))
expect_equal(dim(support(d1, drop = FALSE)), c(1L, 2L))
})
test_that("named return values for HurdlePoisson distribution work correctly", {
d <- HurdlePoisson(c(5, 10), 0.75)
names(d) <- LETTERS[1:length(d)]
expect_equal(names(mean(d)), LETTERS[1:length(d)])
expect_equal(names(variance(d)), LETTERS[1:length(d)])
expect_equal(names(skewness(d)), LETTERS[1:length(d)])
expect_equal(names(kurtosis(d)), LETTERS[1:length(d)])
expect_equal(names(random(d, 1)), LETTERS[1:length(d)])
expect_equal(rownames(random(d, 3)), LETTERS[1:length(d)])
expect_equal(names(pdf(d, 5)), LETTERS[1:length(d)])
expect_equal(names(pdf(d, c(5, 7))), LETTERS[1:length(d)])
expect_equal(rownames(pdf(d, c(5, 7, 9))), LETTERS[1:length(d)])
expect_equal(names(log_pdf(d, 5)), LETTERS[1:length(d)])
expect_equal(names(log_pdf(d, c(5, 7))), LETTERS[1:length(d)])
expect_equal(rownames(log_pdf(d, c(5, 7, 9))), LETTERS[1:length(d)])
expect_equal(names(cdf(d, 5)), LETTERS[1:length(d)])
expect_equal(names(cdf(d, c(5, 7))), LETTERS[1:length(d)])
expect_equal(rownames(cdf(d, c(5, 7, 9))), LETTERS[1:length(d)])
expect_equal(names(quantile(d, 0.5)), LETTERS[1:length(d)])
expect_equal(names(quantile(d, c(0.5, 0.7))), LETTERS[1:length(d)])
expect_equal(rownames(quantile(d, c(0.5, 0.7, 0.9))), LETTERS[1:length(d)])
expect_equal(names(support(d[1])), c("min", "max"))
expect_equal(colnames(support(d)), c("min", "max"))
expect_equal(rownames(support(d)), LETTERS[1:length(d)])
})
|
#'
#' Ports Class. Create an object that characterizes world ports, defined as polygons enveloping AIS-based anchorages
#'
#' @field polygons Object of class polygonsClass
#' @field clusters Object of class clustersClass
#' @field anchorages Object of class points2MatrixClass
#' @field earthGeo Object of class earthGeoClass
#' @field wpi World Ports Index object, of class points2MatrixClass
#' @field cities1000 Geonames cities1000 object, of class points2MatrixClass
#' @field uniqueTypes Array of vessel types, found in anchorageTypes object
#' @field types Decomposition of anchorageTypes object into a list; each list element contains only records of a given type of vessel
#' @field country ISO-3166 2-letter country code
#' @field counts N*M matrix of counts, where N = number of ports, M = number of types of vessels
#' @field nearestWPI data.frame with N rows and 2 columns; 1st - index of nearest WPI port; 2nd - distance between polygon centroid and nearest WPI port
#' @field nearestCity data.frame with N rows and 2 columns; 1st - index of nearest city; 2nd - distance between polygon centroid and nearest city
#' @field verbose Output progress to console
#'
portsClass <- setRefClass(
Class = "portsClass",
fields = list(
polygons = 'ANY',
clusters = 'ANY',
anchorages = 'ANY',
earthGeo = 'ANY',
wpi = 'data.frame',
cities1000 = 'data.frame',
uniqueTypes = 'character',
types = 'list',
country = 'character',
counts = 'matrix',
nearestWPI = 'data.frame',
nearestCity = 'data.frame',
verbose = 'logical'
),
methods = list(
initialize = function(polygons, wpi, wpiDB, cities1000, citiesDB, anchorageTypes, autobuild = TRUE, verbose = TRUE) {
.self$polygons = polygons
.self$clusters = polygons$clusters
.self$earthGeo = .self$clusters$earthGeo
.self$anchorages = .self$clusters$anchorages
.self$wpi = wpiDB # to read original WPI shapefile, use readOGR(dsn = "/.../WPI_Shapefile/WPI.shp")
.self$cities1000 = citiesDB
.self$verbose = verbose
if (autobuild) {
.self$nearestWPI = .self$getAllNearest(wpi, 1)
.self$nearestCity = .self$getAllNearest(cities1000, 2)
.self$country = .self$getCountry()
.self$uniqueTypes = unique(anchorageTypes$gear_type)
.self$types = lapply(.self$uniqueTypes, FUN = function(myType) anchorageTypes[anchorageTypes$gear_type == myType, ])
.self$setTables()
}
},
#
# Retrieve port data
#
getData = function() {
lapply(1:length(polygons$allPolygons), FUN = function(i) {
myPoly = .self$polygons$allPolygons[[i]]
myCentroid = .self$polygons$centroids[i,]
myCountry = .self$country[i]
myCounts = .self$counts[i,]
myWPI = as.numeric(.self$nearestWPI[i, ])
myCity = as.numeric(.self$nearestCity[i, ])
list(lon = myPoly$x, lat = myPoly$y, centroidLon = myCentroid$lon, centroidLat = myCentroid$lat, country = myCountry, counts = myCounts,
wpi = .self$wpi[myWPI[1],], wpiDistance = myWPI[2], city = .self$cities1000[myCity[1],], cityDistance = myCity[2])
})
},
#
# Populate the country field
#
getCountry = function() {
mapply(.self$nearestCity[,1], FUN = function(id) .self$cities1000$country[id])
},
#
# Given an object sparseObj of class points2MatrixClass, obtain, for each port, the index of the nearest point in sparseObj and the distance
#
getAllNearest = function(sparseObj, part) {
n = nrow(.self$polygons$centroids)
if (.self$verbose) print(paste0('Mapping, part ', part))
raw = t(mapply(1:n, FUN = function(i) {
if (i %% 1000 == 0 && .self$verbose) print(paste0(i, " out of ", n, " done."))
.self$getOneNearest(sparseObj, i)
}))
data.frame(index = raw[,1], distance = raw[,2])
},
#
# Inner loop of function getAllNearest (computations for a single port)
#
getOneNearest = function(sparseObj, i) {
crds = as.numeric(.self$polygons$centroids[i, ])
rc = sparseObj$getLatLonIdx(mylat = crds[2], mylon = crds[1])
searching = TRUE
range = 10
while (searching) {
rb = c(max(1, rc[1] - range), min(.self$earthGeo$nlat, rc[1] + range))
cb = c(max(1, rc[2] - range), min(.self$earthGeo$nlon, rc[2] + range))
u = sparseObj$mat[rb[1]:rb[2], cb[1]:cb[2]]
if (length(u@x) > 0) {
rIdx = rb[1] + u@i
cIdx = cb[1] + .self$columnIndexes(u@p) - 1
d = .self$getDistanceKm(lat1 = crds[2], lon1 = crds[1], lat2 = .self$earthGeo$lat[rIdx], lon2 = .self$earthGeo$lon[cIdx])
j = which.min(d)
nearest = c(sparseObj$mat[rIdx[j], cIdx[j]], d[j])
searching = FALSE
} else {
range = range * 10
}
}
nearest
},
#
# Given the p array of a sparse matrix, get the column indexes
#
columnIndexes = function(v) {
unlist(lapply(1:(length(v) - 1), FUN = function(i) rep(i, v[i + 1] - v[i])))
},
#
# Haversine distance calculator
#
getDistanceKm = function(lat1, lon1, lat2, lon2) {
earthGeo$earthCircumfKm / pi * asin(sqrt(sin((lat1 - lat2) * pi / 360) ^ 2 + cos(lat1 * pi / 180) * cos(lat2 * pi / 180) * sin((lon1 - lon2) * pi / 360) ^ 2))
},
#
# Populate the counts field
#
setTables = function() {
if (.self$verbose) print('Constructing vessel type table')
.self$counts = t(mapply(1:.self$clusters$nClust, FUN = function(i) {
if (i %% 1000 == 0 && .self$verbose) print(paste0(i, " out of ", .self$clusters$nClust, " done."))
setOneTable(i)
}))
colnames(.self$counts) = .self$uniqueTypes
},
#
# Inner loop of function setTables (computations for a single port)
#
setOneTable = function(i) {
idx = which(.self$clusters$groupID@x == i)
myS2id = .self$anchorages$id[idx]
cnt = mapply(.self$types, FUN = function(mygear) {
sum(unlist(mapply(myS2id, FUN = function(s) mygear$counts[mygear$s2id == s])))
})
cnt
}
)
)
| /R/05ports.R | permissive | rtlemos/portsModel | R | false | false | 6,209 | r | #'
#' Ports Class. Create an object that characterizes world ports, defined as polygons enveloping AIS-based anchorages
#'
#' @field polygons Object of class polygonsClass
#' @field clusters Object of class clustersClass
#' @field anchorages Object of class points2MatrixClass
#' @field earthGeo Object of class earthGeoClass
#' @field wpi World Ports Index object, of class points2MatrixClass
#' @field cities1000 Geonames cities1000 object, of class points2MatrixClass
#' @field uniqueTypes Array of vessel types, found in anchorageTypes object
#' @field types Decomposition of anchorageTypes object into a list; each list element contains only records of a given type of vessel
#' @field country ISO-3166 2-letter country code
#' @field counts N*M matrix of counts, where N = number of ports, M = number of types of vessels
#' @field nearestWPI data.frame with N rows and 2 columns; 1st - index of nearest WPI port; 2nd - distance between polygon centroid and nearest WPI port
#' @field nearestCity data.frame with N rows and 2 columns; 1st - index of nearest city; 2nd - distance between polygon centroid and nearest city
#' @field verbose Output progress to console
#'
portsClass <- setRefClass(
Class = "portsClass",
fields = list(
polygons = 'ANY',
clusters = 'ANY',
anchorages = 'ANY',
earthGeo = 'ANY',
wpi = 'data.frame',
cities1000 = 'data.frame',
uniqueTypes = 'character',
types = 'list',
country = 'character',
counts = 'matrix',
nearestWPI = 'data.frame',
nearestCity = 'data.frame',
verbose = 'logical'
),
methods = list(
initialize = function(polygons, wpi, wpiDB, cities1000, citiesDB, anchorageTypes, autobuild = TRUE, verbose = TRUE) {
.self$polygons = polygons
.self$clusters = polygons$clusters
.self$earthGeo = .self$clusters$earthGeo
.self$anchorages = .self$clusters$anchorages
.self$wpi = wpiDB # to read original WPI shapefile, use readOGR(dsn = "/.../WPI_Shapefile/WPI.shp")
.self$cities1000 = citiesDB
.self$verbose = verbose
if (autobuild) {
.self$nearestWPI = .self$getAllNearest(wpi, 1)
.self$nearestCity = .self$getAllNearest(cities1000, 2)
.self$country = .self$getCountry()
.self$uniqueTypes = unique(anchorageTypes$gear_type)
.self$types = lapply(.self$uniqueTypes, FUN = function(myType) anchorageTypes[anchorageTypes$gear_type == myType, ])
.self$setTables()
}
},
#
# Retrieve port data
#
getData = function() {
lapply(1:length(polygons$allPolygons), FUN = function(i) {
myPoly = .self$polygons$allPolygons[[i]]
myCentroid = .self$polygons$centroids[i,]
myCountry = .self$country[i]
myCounts = .self$counts[i,]
myWPI = as.numeric(.self$nearestWPI[i, ])
myCity = as.numeric(.self$nearestCity[i, ])
list(lon = myPoly$x, lat = myPoly$y, centroidLon = myCentroid$lon, centroidLat = myCentroid$lat, country = myCountry, counts = myCounts,
wpi = .self$wpi[myWPI[1],], wpiDistance = myWPI[2], city = .self$cities1000[myCity[1],], cityDistance = myCity[2])
})
},
#
# Populate the country field
#
getCountry = function() {
mapply(.self$nearestCity[,1], FUN = function(id) .self$cities1000$country[id])
},
#
# Given an object sparseObj of class points2MatrixClass, obtain, for each port, the index of the nearest point in sparseObj and the distance
#
getAllNearest = function(sparseObj, part) {
n = nrow(.self$polygons$centroids)
if (.self$verbose) print(paste0('Mapping, part ', part))
raw = t(mapply(1:n, FUN = function(i) {
if (i %% 1000 == 0 && .self$verbose) print(paste0(i, " out of ", n, " done."))
.self$getOneNearest(sparseObj, i)
}))
data.frame(index = raw[,1], distance = raw[,2])
},
#
# Inner loop of function getAllNearest (computations for a single port)
#
getOneNearest = function(sparseObj, i) {
crds = as.numeric(.self$polygons$centroids[i, ])
rc = sparseObj$getLatLonIdx(mylat = crds[2], mylon = crds[1])
searching = TRUE
range = 10
while (searching) {
rb = c(max(1, rc[1] - range), min(.self$earthGeo$nlat, rc[1] + range))
cb = c(max(1, rc[2] - range), min(.self$earthGeo$nlon, rc[2] + range))
u = sparseObj$mat[rb[1]:rb[2], cb[1]:cb[2]]
if (length(u@x) > 0) {
rIdx = rb[1] + u@i
cIdx = cb[1] + .self$columnIndexes(u@p) - 1
d = .self$getDistanceKm(lat1 = crds[2], lon1 = crds[1], lat2 = .self$earthGeo$lat[rIdx], lon2 = .self$earthGeo$lon[cIdx])
j = which.min(d)
nearest = c(sparseObj$mat[rIdx[j], cIdx[j]], d[j])
searching = FALSE
} else {
range = range * 10
}
}
nearest
},
#
# Given the p array of a sparse matrix, get the column indexes
#
columnIndexes = function(v) {
unlist(lapply(1:(length(v) - 1), FUN = function(i) rep(i, v[i + 1] - v[i])))
},
#
# Haversine distance calculator
#
getDistanceKm = function(lat1, lon1, lat2, lon2) {
earthGeo$earthCircumfKm / pi * asin(sqrt(sin((lat1 - lat2) * pi / 360) ^ 2 + cos(lat1 * pi / 180) * cos(lat2 * pi / 180) * sin((lon1 - lon2) * pi / 360) ^ 2))
},
#
# Populate the counts field
#
setTables = function() {
if (.self$verbose) print('Constructing vessel type table')
.self$counts = t(mapply(1:.self$clusters$nClust, FUN = function(i) {
if (i %% 1000 == 0 && .self$verbose) print(paste0(i, " out of ", .self$clusters$nClust, " done."))
setOneTable(i)
}))
colnames(.self$counts) = .self$uniqueTypes
},
#
# Inner loop of function setTables (computations for a single port)
#
setOneTable = function(i) {
idx = which(.self$clusters$groupID@x == i)
myS2id = .self$anchorages$id[idx]
cnt = mapply(.self$types, FUN = function(mygear) {
sum(unlist(mapply(myS2id, FUN = function(s) mygear$counts[mygear$s2id == s])))
})
cnt
}
)
)
|
#' @importFrom dplyr select distinct left_join arrange %>% mutate
#' @importFrom readr write_tsv
WebGestaltRGsea <- function(organism="hsapiens", enrichDatabase=NULL, enrichDatabaseFile=NULL, enrichDatabaseType=NULL, enrichDatabaseDescriptionFile=NULL, interestGeneFile=NULL, interestGene=NULL, interestGeneType=NULL, collapseMethod="mean", minNum=10, maxNum=500, fdrMethod="BH", sigMethod="fdr", fdrThr=0.05, topThr=10, reportNum=20, setCoverNum=10, perNum=1000, isOutput=TRUE, outputDirectory=getwd(), projectName=NULL, dagColor="binary", nThreads=1, cache=NULL, hostName="http://www.webgestalt.org/") {
enrichMethod <- "GSEA"
projectDir <- file.path(outputDirectory, paste0("Project_", projectName))
######### Web server will input "NULL" to the R package, thus, we need to change "NULL" to NULL ########
enrichDatabase <- testNull(enrichDatabase)
enrichDatabaseFile <- testNull(enrichDatabaseFile)
enrichDatabaseType <- testNull(enrichDatabaseType)
enrichDatabaseDescriptionFile <- testNull(enrichDatabaseDescriptionFile)
interestGeneFile <- testNull(interestGeneFile)
interestGene <- testNull(interestGene)
interestGeneType <- testNull(interestGeneType)
################ Check parameter ################
errorTest <- parameterErrorMessage(enrichMethod=enrichMethod, organism=organism, collapseMethod=collapseMethod, minNum=minNum, maxNum=maxNum, fdrMethod=fdrMethod, sigMethod=sigMethod, fdrThr=fdrThr, topThr=topThr, reportNum=reportNum, perNum=perNum, isOutput=isOutput, outputDirectory=outputDirectory, dagColor=dagColor, hostName=hostName, cache=cache)
if(!is.null(errorTest)){
stop(errorTest)
}
############# Check enriched database #############
cat("Loading the functional categories...\n")
enrichD <- loadGeneSet(organism=organism, enrichDatabase=enrichDatabase, enrichDatabaseFile=enrichDatabaseFile, enrichDatabaseType=enrichDatabaseType, enrichDatabaseDescriptionFile=enrichDatabaseDescriptionFile, cache=cache, hostName=hostName)
geneSet <- enrichD$geneSet
geneSetDes <- enrichD$geneSetDes
geneSetDag <- enrichD$geneSetDag
geneSetNet <- enrichD$geneSetNet
databaseStandardId <- enrichD$standardId
rm(enrichD)
########### Check input interesting gene list ###############
cat("Loading the ID list...\n")
interestingGeneMap <- loadInterestGene(organism=organism, dataType="rnk", inputGeneFile=interestGeneFile, inputGene=interestGene, geneType=interestGeneType, collapseMethod=collapseMethod, cache=cache, hostName=hostName, geneSet=geneSet)
if (organism == "others") {
interestGeneList <- unique(interestingGeneMap)
} else {
interestStandardId <- interestingGeneMap$standardId
interestGeneList <- interestingGeneMap$mapped %>% select(interestStandardId, .data$score) %>% distinct()
}
########## Create project folder ##############
if (isOutput) {
dir.create(projectDir)
###### Summarize gene annotation based on the GOSlim ###########
if (organism != "others") {
if (databaseStandardId == "entrezgene") {
cat("Summarizing the uploaded ID list by GO Slim data...\n")
goSlimOutput <- file.path(projectDir, paste0("goslim_summary_", projectName))
re <- goSlimSummary(organism=organism, geneList=interestGeneList[[interestStandardId]], outputFile=goSlimOutput, outputType="png", isOutput=isOutput, cache=cache, hostName=hostName)
}
write_tsv(interestingGeneMap$mapped, file.path(projectDir, paste0("interestingID_mappingTable_", projectName, ".txt")))
write(interestingGeneMap$unmapped, file.path(projectDir, paste0("interestingID_unmappedList_", projectName, ".txt")))
} else {
write_tsv(interestGeneList, file.path(projectDir, paste0("interestList_", projectName, ".txt")), col_names=FALSE)
}
}
############# Run enrichment analysis ###################
cat("Performing the enrichment analysis...\n")
gseaRes <- gseaEnrichment(hostName, outputDirectory, projectName, interestGeneList,
geneSet, geneSetDes=geneSetDes, minNum=minNum, maxNum=maxNum, sigMethod=sigMethod, fdrThr=fdrThr,
topThr=topThr, perNum=perNum, nThreads=nThreads, isOutput=isOutput
)
if (is.null(gseaRes)) {
return(NULL)
}
enrichedSig <- gseaRes$enriched
insig <- gseaRes$background
clusters <- list()
geneTables <- list()
if (!is.null(enrichedSig)) {
if (!is.null(geneSetDes)) { ####### Add extra description information ###########
enrichedSig <- enrichedSig %>%
left_join(geneSetDes, by="geneSet") %>%
select(.data$geneSet, .data$description, .data$link, .data$enrichmentScore, .data$normalizedEnrichmentScore, .data$pValue, .data$FDR, .data$size, .data$plotPath, .data$leadingEdgeNum, .data$leadingEdgeId) %>%
arrange(.data$FDR, .data$pValue, desc(.data$normalizedEnrichmentScore)) %>%
mutate(description=ifelse(is.na(.data$description), "", .data$description))
} else {
enrichedSig <- enrichedSig %>%
select(.data$geneSet, .data$link, .data$enrichmentScore, .data$normalizedEnrichmentScore, .data$pValue, .data$FDR, .data$size, .data$plotPath, .data$leadingEdgeNum, .data$leadingEdgeId) %>%
arrange(.data$FDR, .data$pValue, desc(.data$normalizedEnrichmentScore))
}
geneTables <- getGeneTables(organism, enrichedSig, "leadingEdgeId", interestingGeneMap)
if (organism != "others") {
enrichedSig$link <- mapply(function(link, geneList) linkModification("GSEA", link, geneList, interestingGeneMap),
enrichedSig$link,
enrichedSig$leadingEdgeId
)
}
if ("database" %in% colnames(geneSet)) {
# add source database for multiple databases
enrichedSig <- enrichedSig %>% left_join(unique(geneSet[, c("geneSet", "database")]), by="geneSet")
}
if (organism != "others" && interestGeneType != interestStandardId) {
outputEnrichedSig <- mapUserId(enrichedSig, "leadingEdgeId", interestingGeneMap)
} else {
outputEnrichedSig <- enrichedSig
}
if (isOutput) {
write_tsv(outputEnrichedSig, file.path(projectDir, paste0("enrichment_results_", projectName, ".txt")))
idsInSet <- sapply(enrichedSig$leadingEdgeId, strsplit, split=";")
names(idsInSet) <- enrichedSig$geneSet
pValue <- enrichedSig$pValue
pValue[pValue == 0] <- .Machine$double.eps
signedLogP <- -log(pValue) * sign(enrichedSig$enrichmentScore)
apRes <- affinityPropagation(idsInSet, signedLogP)
wscRes <- weightedSetCover(idsInSet, 1 / signedLogP, setCoverNum, nThreads)
if (!is.null(apRes)) {
writeLines(sapply(apRes$clusters, paste, collapse="\t"), file.path(projectDir, paste0("enriched_geneset_ap_clusters_", projectName, ".txt")))
} else {
apRes <- NULL
}
clusters$ap <- apRes
if (!is.null(wscRes$topSets)) {
writeLines(c(paste0("# Coverage: ", wscRes$coverage), wscRes$topSets), file.path(projectDir, paste0("enriched_geneset_wsc_topsets_", projectName, ".txt")))
clusters$wsc <- list(representatives=wscRes$topSets, coverage=wscRes$coverage)
} else {
clusters$wsc <- NULL
}
}
}
if (isOutput) {
############## Create report ##################
cat("Generate the final report...\n")
createReport(hostName=hostName, outputDirectory=outputDirectory, organism=organism, projectName=projectName, enrichMethod=enrichMethod, geneSet=geneSet, geneSetDes=geneSetDes, geneSetDag=geneSetDag, geneSetNet=geneSetNet, interestingGeneMap=interestingGeneMap, enrichedSig=enrichedSig, background=insig, geneTables=geneTables, clusters=clusters, enrichDatabase=enrichDatabase, enrichDatabaseFile=enrichDatabaseFile, enrichDatabaseType=enrichDatabaseType, enrichDatabaseDescriptionFile=enrichDatabaseDescriptionFile, interestGeneFile=interestGeneFile, interestGene=interestGene, interestGeneType=interestGeneType, collapseMethod=collapseMethod, minNum=minNum, maxNum=maxNum, fdrMethod=fdrMethod, sigMethod=sigMethod, fdrThr=fdrThr, topThr=topThr, reportNum=reportNum, perNum=perNum, dagColor=dagColor)
cwd <- getwd()
setwd(projectDir)
zip(paste0("Project_", projectName, ".zip"), ".", flags="-rq")
setwd(cwd)
cat("Results can be found in the ", projectDir, "!\n", sep="")
}
return(outputEnrichedSig)
}
| /R/WebGestaltRGsea.R | no_license | sailepradh/WebGestaltR | R | false | false | 8,001 | r | #' @importFrom dplyr select distinct left_join arrange %>% mutate
#' @importFrom readr write_tsv
WebGestaltRGsea <- function(organism="hsapiens", enrichDatabase=NULL, enrichDatabaseFile=NULL, enrichDatabaseType=NULL, enrichDatabaseDescriptionFile=NULL, interestGeneFile=NULL, interestGene=NULL, interestGeneType=NULL, collapseMethod="mean", minNum=10, maxNum=500, fdrMethod="BH", sigMethod="fdr", fdrThr=0.05, topThr=10, reportNum=20, setCoverNum=10, perNum=1000, isOutput=TRUE, outputDirectory=getwd(), projectName=NULL, dagColor="binary", nThreads=1, cache=NULL, hostName="http://www.webgestalt.org/") {
enrichMethod <- "GSEA"
projectDir <- file.path(outputDirectory, paste0("Project_", projectName))
######### Web server will input "NULL" to the R package, thus, we need to change "NULL" to NULL ########
enrichDatabase <- testNull(enrichDatabase)
enrichDatabaseFile <- testNull(enrichDatabaseFile)
enrichDatabaseType <- testNull(enrichDatabaseType)
enrichDatabaseDescriptionFile <- testNull(enrichDatabaseDescriptionFile)
interestGeneFile <- testNull(interestGeneFile)
interestGene <- testNull(interestGene)
interestGeneType <- testNull(interestGeneType)
################ Check parameter ################
errorTest <- parameterErrorMessage(enrichMethod=enrichMethod, organism=organism, collapseMethod=collapseMethod, minNum=minNum, maxNum=maxNum, fdrMethod=fdrMethod, sigMethod=sigMethod, fdrThr=fdrThr, topThr=topThr, reportNum=reportNum, perNum=perNum, isOutput=isOutput, outputDirectory=outputDirectory, dagColor=dagColor, hostName=hostName, cache=cache)
if(!is.null(errorTest)){
stop(errorTest)
}
############# Check enriched database #############
cat("Loading the functional categories...\n")
enrichD <- loadGeneSet(organism=organism, enrichDatabase=enrichDatabase, enrichDatabaseFile=enrichDatabaseFile, enrichDatabaseType=enrichDatabaseType, enrichDatabaseDescriptionFile=enrichDatabaseDescriptionFile, cache=cache, hostName=hostName)
geneSet <- enrichD$geneSet
geneSetDes <- enrichD$geneSetDes
geneSetDag <- enrichD$geneSetDag
geneSetNet <- enrichD$geneSetNet
databaseStandardId <- enrichD$standardId
rm(enrichD)
########### Check input interesting gene list ###############
cat("Loading the ID list...\n")
interestingGeneMap <- loadInterestGene(organism=organism, dataType="rnk", inputGeneFile=interestGeneFile, inputGene=interestGene, geneType=interestGeneType, collapseMethod=collapseMethod, cache=cache, hostName=hostName, geneSet=geneSet)
if (organism == "others") {
interestGeneList <- unique(interestingGeneMap)
} else {
interestStandardId <- interestingGeneMap$standardId
interestGeneList <- interestingGeneMap$mapped %>% select(interestStandardId, .data$score) %>% distinct()
}
########## Create project folder ##############
if (isOutput) {
dir.create(projectDir)
###### Summarize gene annotation based on the GOSlim ###########
if (organism != "others") {
if (databaseStandardId == "entrezgene") {
cat("Summarizing the uploaded ID list by GO Slim data...\n")
goSlimOutput <- file.path(projectDir, paste0("goslim_summary_", projectName))
re <- goSlimSummary(organism=organism, geneList=interestGeneList[[interestStandardId]], outputFile=goSlimOutput, outputType="png", isOutput=isOutput, cache=cache, hostName=hostName)
}
write_tsv(interestingGeneMap$mapped, file.path(projectDir, paste0("interestingID_mappingTable_", projectName, ".txt")))
write(interestingGeneMap$unmapped, file.path(projectDir, paste0("interestingID_unmappedList_", projectName, ".txt")))
} else {
write_tsv(interestGeneList, file.path(projectDir, paste0("interestList_", projectName, ".txt")), col_names=FALSE)
}
}
############# Run enrichment analysis ###################
cat("Performing the enrichment analysis...\n")
gseaRes <- gseaEnrichment(hostName, outputDirectory, projectName, interestGeneList,
geneSet, geneSetDes=geneSetDes, minNum=minNum, maxNum=maxNum, sigMethod=sigMethod, fdrThr=fdrThr,
topThr=topThr, perNum=perNum, nThreads=nThreads, isOutput=isOutput
)
if (is.null(gseaRes)) {
return(NULL)
}
enrichedSig <- gseaRes$enriched
insig <- gseaRes$background
clusters <- list()
geneTables <- list()
if (!is.null(enrichedSig)) {
if (!is.null(geneSetDes)) { ####### Add extra description information ###########
enrichedSig <- enrichedSig %>%
left_join(geneSetDes, by="geneSet") %>%
select(.data$geneSet, .data$description, .data$link, .data$enrichmentScore, .data$normalizedEnrichmentScore, .data$pValue, .data$FDR, .data$size, .data$plotPath, .data$leadingEdgeNum, .data$leadingEdgeId) %>%
arrange(.data$FDR, .data$pValue, desc(.data$normalizedEnrichmentScore)) %>%
mutate(description=ifelse(is.na(.data$description), "", .data$description))
} else {
enrichedSig <- enrichedSig %>%
select(.data$geneSet, .data$link, .data$enrichmentScore, .data$normalizedEnrichmentScore, .data$pValue, .data$FDR, .data$size, .data$plotPath, .data$leadingEdgeNum, .data$leadingEdgeId) %>%
arrange(.data$FDR, .data$pValue, desc(.data$normalizedEnrichmentScore))
}
geneTables <- getGeneTables(organism, enrichedSig, "leadingEdgeId", interestingGeneMap)
if (organism != "others") {
enrichedSig$link <- mapply(function(link, geneList) linkModification("GSEA", link, geneList, interestingGeneMap),
enrichedSig$link,
enrichedSig$leadingEdgeId
)
}
if ("database" %in% colnames(geneSet)) {
# add source database for multiple databases
enrichedSig <- enrichedSig %>% left_join(unique(geneSet[, c("geneSet", "database")]), by="geneSet")
}
if (organism != "others" && interestGeneType != interestStandardId) {
outputEnrichedSig <- mapUserId(enrichedSig, "leadingEdgeId", interestingGeneMap)
} else {
outputEnrichedSig <- enrichedSig
}
if (isOutput) {
write_tsv(outputEnrichedSig, file.path(projectDir, paste0("enrichment_results_", projectName, ".txt")))
idsInSet <- sapply(enrichedSig$leadingEdgeId, strsplit, split=";")
names(idsInSet) <- enrichedSig$geneSet
pValue <- enrichedSig$pValue
pValue[pValue == 0] <- .Machine$double.eps
signedLogP <- -log(pValue) * sign(enrichedSig$enrichmentScore)
apRes <- affinityPropagation(idsInSet, signedLogP)
wscRes <- weightedSetCover(idsInSet, 1 / signedLogP, setCoverNum, nThreads)
if (!is.null(apRes)) {
writeLines(sapply(apRes$clusters, paste, collapse="\t"), file.path(projectDir, paste0("enriched_geneset_ap_clusters_", projectName, ".txt")))
} else {
apRes <- NULL
}
clusters$ap <- apRes
if (!is.null(wscRes$topSets)) {
writeLines(c(paste0("# Coverage: ", wscRes$coverage), wscRes$topSets), file.path(projectDir, paste0("enriched_geneset_wsc_topsets_", projectName, ".txt")))
clusters$wsc <- list(representatives=wscRes$topSets, coverage=wscRes$coverage)
} else {
clusters$wsc <- NULL
}
}
}
if (isOutput) {
############## Create report ##################
cat("Generate the final report...\n")
createReport(hostName=hostName, outputDirectory=outputDirectory, organism=organism, projectName=projectName, enrichMethod=enrichMethod, geneSet=geneSet, geneSetDes=geneSetDes, geneSetDag=geneSetDag, geneSetNet=geneSetNet, interestingGeneMap=interestingGeneMap, enrichedSig=enrichedSig, background=insig, geneTables=geneTables, clusters=clusters, enrichDatabase=enrichDatabase, enrichDatabaseFile=enrichDatabaseFile, enrichDatabaseType=enrichDatabaseType, enrichDatabaseDescriptionFile=enrichDatabaseDescriptionFile, interestGeneFile=interestGeneFile, interestGene=interestGene, interestGeneType=interestGeneType, collapseMethod=collapseMethod, minNum=minNum, maxNum=maxNum, fdrMethod=fdrMethod, sigMethod=sigMethod, fdrThr=fdrThr, topThr=topThr, reportNum=reportNum, perNum=perNum, dagColor=dagColor)
cwd <- getwd()
setwd(projectDir)
zip(paste0("Project_", projectName, ".zip"), ".", flags="-rq")
setwd(cwd)
cat("Results can be found in the ", projectDir, "!\n", sep="")
}
return(outputEnrichedSig)
}
|
setwd("C:\\Users\\a552344\\Desktop\\scripts")
if(!file.exists("household_power_consumption.txt")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",zptmp)
tdf<- unzip(zptmp)
}
elecpwr <- read.table(tdf, header=T, sep=";")
elecpwr$Date <- as.Date(elecpwr$Date,"%d/%m/%Y")
finaldata <- elecpwr[(elecpwr$Date=="2007-02-01")| (elecpwr$Date=="2007-02-02"),]
# convert date and time variables to Date/Time class
finaldata <- transform(finaldata, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
finaldata$Global_active_power <- as.numeric(as.character(finaldata$Global_active_power))
finaldata$Global_reactive_power <- as.numeric(as.character(finaldata$Global_reactive_power))
finaldata$Voltage <- as.numeric(as.character(finaldata$Voltage))
finaldata$Sub_metering_1 <- as.numeric(as.character(finaldata$Sub_metering_1))
finaldata$Sub_metering_2 <- as.numeric(as.character(finaldata$Sub_metering_2))
finaldata$Sub_metering_3 <- as.numeric(as.character(finaldata$Sub_metering_3)) | /dataload.R | no_license | Deepthisri/ExData_Plotting1 | R | false | false | 1,068 | r | setwd("C:\\Users\\a552344\\Desktop\\scripts")
if(!file.exists("household_power_consumption.txt")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",zptmp)
tdf<- unzip(zptmp)
}
elecpwr <- read.table(tdf, header=T, sep=";")
elecpwr$Date <- as.Date(elecpwr$Date,"%d/%m/%Y")
finaldata <- elecpwr[(elecpwr$Date=="2007-02-01")| (elecpwr$Date=="2007-02-02"),]
# convert date and time variables to Date/Time class
finaldata <- transform(finaldata, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
finaldata$Global_active_power <- as.numeric(as.character(finaldata$Global_active_power))
finaldata$Global_reactive_power <- as.numeric(as.character(finaldata$Global_reactive_power))
finaldata$Voltage <- as.numeric(as.character(finaldata$Voltage))
finaldata$Sub_metering_1 <- as.numeric(as.character(finaldata$Sub_metering_1))
finaldata$Sub_metering_2 <- as.numeric(as.character(finaldata$Sub_metering_2))
finaldata$Sub_metering_3 <- as.numeric(as.character(finaldata$Sub_metering_3)) |
filepath= system.file("extdata", "simple_plant.mtg", package = "XploRer")
MTG_file = readLines(filepath)
MTG_file = strip_comments(MTG_file)
MTG_file = strip_empty_lines(MTG_file)
test_that("Check the sections", {
expect_null(check_sections(MTG_file))
})
test_that("Parse code", {
expect_equal(parse_MTG_code(MTG_file), "FORM-A")
})
classes = parse_MTG_classes(MTG_file)
test_that("Parse classes", {
expect_true(is.data.frame(classes))
expect_equal(nrow(classes),5)
expect_equal(classes$SYMBOL,c("$","Individual","Axis","Internode","Leaf"))
expect_equal(classes$SCALE,c(0,1,2,3,3))
expect_equal(classes$DECOMPOSITION,rep("FREE",5))
expect_equal(classes$INDEXATION,rep("FREE",5))
expect_equal(classes$DEFINITION,rep("IMPLICIT",5))
})
description = parse_MTG_description(MTG_file)
test_that("Parse description", {
expect_true(is.data.frame(description))
expect_equal(nrow(description),2)
expect_equal(description$LEFT,rep("Internode",2))
expect_equal(description$RELTYPE,c("+","<"))
expect_equal(description$MAX,c("?","?"))
})
features = parse_MTG_section(MTG_file,"FEATURES:",
c("NAME", "TYPE"),
"MTG:",TRUE)
test_that("Parse features", {
expect_true(is.data.frame(features))
expect_equal(nrow(features),7)
expect_equal(features$NAME,c('XX','YY','ZZ','FileName','Length','Width','XEuler'))
expect_equal(features$TYPE,c('REAL','REAL','REAL','ALPHA','ALPHA','ALPHA','REAL'))
})
test_that("Parse MTG", {
MTG = parse_MTG_MTG(MTG_file,classes,description,features)
expect_equal(MTG$totalCount,7) # number of nodes
expect_equal(MTG$leafCount,2)
expect_equal(MTG$height,6)
expect_equal(MTG$averageBranchingFactor,1.2)
})
test_that("Read MTG file", {
MTG = read_mtg(filepath)
expect_equal(names(attributes(MTG)),c("class","classes","description","features"))
expect_equal(MTG,parse_MTG_MTG(MTG_file,classes,description,features))
})
| /tests/testthat/test-read_MTG.R | permissive | VEZY/XploRer | R | false | false | 1,944 | r | filepath= system.file("extdata", "simple_plant.mtg", package = "XploRer")
MTG_file = readLines(filepath)
MTG_file = strip_comments(MTG_file)
MTG_file = strip_empty_lines(MTG_file)
test_that("Check the sections", {
expect_null(check_sections(MTG_file))
})
test_that("Parse code", {
expect_equal(parse_MTG_code(MTG_file), "FORM-A")
})
classes = parse_MTG_classes(MTG_file)
test_that("Parse classes", {
expect_true(is.data.frame(classes))
expect_equal(nrow(classes),5)
expect_equal(classes$SYMBOL,c("$","Individual","Axis","Internode","Leaf"))
expect_equal(classes$SCALE,c(0,1,2,3,3))
expect_equal(classes$DECOMPOSITION,rep("FREE",5))
expect_equal(classes$INDEXATION,rep("FREE",5))
expect_equal(classes$DEFINITION,rep("IMPLICIT",5))
})
description = parse_MTG_description(MTG_file)
test_that("Parse description", {
expect_true(is.data.frame(description))
expect_equal(nrow(description),2)
expect_equal(description$LEFT,rep("Internode",2))
expect_equal(description$RELTYPE,c("+","<"))
expect_equal(description$MAX,c("?","?"))
})
features = parse_MTG_section(MTG_file,"FEATURES:",
c("NAME", "TYPE"),
"MTG:",TRUE)
test_that("Parse features", {
expect_true(is.data.frame(features))
expect_equal(nrow(features),7)
expect_equal(features$NAME,c('XX','YY','ZZ','FileName','Length','Width','XEuler'))
expect_equal(features$TYPE,c('REAL','REAL','REAL','ALPHA','ALPHA','ALPHA','REAL'))
})
test_that("Parse MTG", {
MTG = parse_MTG_MTG(MTG_file,classes,description,features)
expect_equal(MTG$totalCount,7) # number of nodes
expect_equal(MTG$leafCount,2)
expect_equal(MTG$height,6)
expect_equal(MTG$averageBranchingFactor,1.2)
})
test_that("Read MTG file", {
MTG = read_mtg(filepath)
expect_equal(names(attributes(MTG)),c("class","classes","description","features"))
expect_equal(MTG,parse_MTG_MTG(MTG_file,classes,description,features))
})
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
| /cachematrix.R | no_license | codejay411/ProgrammingAssignment2 | R | false | false | 710 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cachemean <- function(x, ...) {
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setmean(m)
m
}
|
## Script para extraer las tablas de UP
library(data.table)
library(tidyverse)
carpetaSujetos <- "~/Dropbox/MOOCs/R/P48/SUJETOS/"
uFisicas <- list.files(path = carpetaSujetos, pattern = "export_unidades-fisicas")
sujMercado <- list.files(path = carpetaSujetos, pattern = "export_sujetos-del-mercado")
uProg <- list.files(path = carpetaSujetos, pattern = "export_unidades-de-programacion")
sujetos <- lapply(list(paste0(carpetaSujetos, uFisicas),
paste0(carpetaSujetos, sujMercado),
paste0(carpetaSujetos, uProg)
),
fread,
encoding = "UTF-8"
)
names(sujetos) <- c("uFisicas", "sujMercado", "uProg")
colnames(sujetos$sujMercado)[grep(colnames(sujetos$sujMercado), pattern = "Código de sujeto")] <- "Sujeto del Mercado"
tablaUP <- left_join(x = sujetos$uProg,
y = sujetos$sujMercado[, 1:2],
by = "Sujeto del Mercado")
colnames(tablaUP)[grep(pattern = "Código de UP", x = colnames(tablaUP))] <- "CodUP"
setDT(tablaUP)
setkey(x = tablaUP, CodUP)
rm(list = c("carpetaSujetos", "uFisicas", "sujMercado", "uProg"))
| /R/tablasUP.R | no_license | rucoma/P48 | R | false | false | 1,157 | r | ## Script para extraer las tablas de UP
library(data.table)
library(tidyverse)
carpetaSujetos <- "~/Dropbox/MOOCs/R/P48/SUJETOS/"
uFisicas <- list.files(path = carpetaSujetos, pattern = "export_unidades-fisicas")
sujMercado <- list.files(path = carpetaSujetos, pattern = "export_sujetos-del-mercado")
uProg <- list.files(path = carpetaSujetos, pattern = "export_unidades-de-programacion")
sujetos <- lapply(list(paste0(carpetaSujetos, uFisicas),
paste0(carpetaSujetos, sujMercado),
paste0(carpetaSujetos, uProg)
),
fread,
encoding = "UTF-8"
)
names(sujetos) <- c("uFisicas", "sujMercado", "uProg")
colnames(sujetos$sujMercado)[grep(colnames(sujetos$sujMercado), pattern = "Código de sujeto")] <- "Sujeto del Mercado"
tablaUP <- left_join(x = sujetos$uProg,
y = sujetos$sujMercado[, 1:2],
by = "Sujeto del Mercado")
colnames(tablaUP)[grep(pattern = "Código de UP", x = colnames(tablaUP))] <- "CodUP"
setDT(tablaUP)
setkey(x = tablaUP, CodUP)
rm(list = c("carpetaSujetos", "uFisicas", "sujMercado", "uProg"))
|
#MNIST
sapply(paste("functions/",list.files("functions/"), sep = ""), source)
#' Parameters and more
N <- 5000 # Number of observations
m <- 100 # Number of inducing points
D <- 28*28 # Ambient dimension / data dimension
d <- 2 # Latent dimension
float_type = tf$float64
swiss <- subtrain$x
####################
A <- "Has to be loaded in"
#####################
z <- "This has to be loaded in"#Z$points
#######################
cut_off <- "Find out cutoff"
#######################
#' R is the distance matrix with the censored values replaced with the cutoff
R <- matrix(rep(1,N^2), ncol = N)
R[which(A < cut_off, arr.ind = TRUE)] <- A[which(A < cut_off, arr.ind = TRUE)]
R[which(A >= cut_off, arr.ind = TRUE)] <- cut_off * R[which(A >= cut_off, arr.ind = TRUE)]
#prior_mean <- function(s){ # This makes prior mean "diagonal"
# N <- s$get_shape()$as_list()[1]
# a <- tf$constant(W, dtype = float_type)
# a <- tf$tile(a[NULL,,], as.integer(c(N,1,1)))
# return(a)
#}
model <- make_gp_model(kern.type = "ARD",
input = z,
num_inducing = m,
in_dim = d, out_dim = D,
is.WP = TRUE, deg_free = d,
#mf = prior_mean,
float_type = float_type) # Should be unconstrained Wishart to generate Dxd matrices
model$kern$ARD$ls <- tf$Variable(rep(log(exp(2)-1),d), dtype = float_type)
model$kern$ARD$var <- tf$Variable(2, constraint = constrain_pos, dtype = float_type)
#model$v_par$mu <- tf$Variable(aperm(array(rep(W,m), c(D,d,m)), perm = c(3,1,2)), dtype = float_type)
rm(A) # Remove A from memory
latents <- make_gp_model(kern.type = "white",
input = z,
num_inducing = N,
in_dim = d, out_dim = d,
variational_is_diag = TRUE,
float_type = float_type)
latents$kern$white$noise <- tf$constant(1, dtype = float_type) # GP hyperparameter is not variable here
latents$v_par$mu <- tf$Variable(z, dtype = float_type)
latents$v_par$chol <- tf$Variable(matrix( rep(1e-3, d*N), ncol = N ), dtype = float_type, constraint = constrain_pos)
I_batch <- tf$placeholder(tf$int32, shape(NULL,2L))
z_batch <- tf$transpose(tf$gather(latents$v_par$mu, I_batch), as.integer(c(0,2,1))) +
tf$transpose(tf$gather(tf$transpose(latents$v_par$chol), I_batch), as.integer(c(0,2,1))) *
tf$random_normal(tf$shape(tf$transpose(tf$gather(latents$v_par$mu, I_batch), as.integer(c(0,2,1)))), dtype = float_type)
dist_batch <- tf$cast(tf$gather_nd(R, I_batch), dtype = float_type) # N,
trainer <- tf$train$AdamOptimizer(learning_rate = 0.005)
reset_trainer <- tf$variables_initializer(trainer$variables())
driver <- censored_nakagami(model, z_batch, dist_batch, cut_off, number_of_interpolants = 15, samples = 30)
llh <- tf$reduce_mean(driver)
KL <- compute_kl(model) / tf$constant(N, dtype = float_type) #+ compute_kl(latents) / tf$constant(N, dtype = float_type)
optimizer_model <- trainer$minimize( - (llh - KL), var_list = list(model$kern$ARD, model$v_par$v_x, model$v_par$mu, model$v_par$chol))
optimizer_latents <- trainer$minimize( - (llh - KL), var_list = list(latents$v_par$mu, latents$v_par$chol)) | /model_mnist.R | no_license | JorgensenMart/ISOGP | R | false | false | 3,247 | r | #MNIST
sapply(paste("functions/",list.files("functions/"), sep = ""), source)
#' Parameters and more
N <- 5000 # Number of observations
m <- 100 # Number of inducing points
D <- 28*28 # Ambient dimension / data dimension
d <- 2 # Latent dimension
float_type = tf$float64
swiss <- subtrain$x
####################
A <- "Has to be loaded in"
#####################
z <- "This has to be loaded in"#Z$points
#######################
cut_off <- "Find out cutoff"
#######################
#' R is the distance matrix with the censored values replaced with the cutoff
R <- matrix(rep(1,N^2), ncol = N)
R[which(A < cut_off, arr.ind = TRUE)] <- A[which(A < cut_off, arr.ind = TRUE)]
R[which(A >= cut_off, arr.ind = TRUE)] <- cut_off * R[which(A >= cut_off, arr.ind = TRUE)]
#prior_mean <- function(s){ # This makes prior mean "diagonal"
# N <- s$get_shape()$as_list()[1]
# a <- tf$constant(W, dtype = float_type)
# a <- tf$tile(a[NULL,,], as.integer(c(N,1,1)))
# return(a)
#}
model <- make_gp_model(kern.type = "ARD",
input = z,
num_inducing = m,
in_dim = d, out_dim = D,
is.WP = TRUE, deg_free = d,
#mf = prior_mean,
float_type = float_type) # Should be unconstrained Wishart to generate Dxd matrices
model$kern$ARD$ls <- tf$Variable(rep(log(exp(2)-1),d), dtype = float_type)
model$kern$ARD$var <- tf$Variable(2, constraint = constrain_pos, dtype = float_type)
#model$v_par$mu <- tf$Variable(aperm(array(rep(W,m), c(D,d,m)), perm = c(3,1,2)), dtype = float_type)
rm(A) # Remove A from memory
latents <- make_gp_model(kern.type = "white",
input = z,
num_inducing = N,
in_dim = d, out_dim = d,
variational_is_diag = TRUE,
float_type = float_type)
latents$kern$white$noise <- tf$constant(1, dtype = float_type) # GP hyperparameter is not variable here
latents$v_par$mu <- tf$Variable(z, dtype = float_type)
latents$v_par$chol <- tf$Variable(matrix( rep(1e-3, d*N), ncol = N ), dtype = float_type, constraint = constrain_pos)
I_batch <- tf$placeholder(tf$int32, shape(NULL,2L))
z_batch <- tf$transpose(tf$gather(latents$v_par$mu, I_batch), as.integer(c(0,2,1))) +
tf$transpose(tf$gather(tf$transpose(latents$v_par$chol), I_batch), as.integer(c(0,2,1))) *
tf$random_normal(tf$shape(tf$transpose(tf$gather(latents$v_par$mu, I_batch), as.integer(c(0,2,1)))), dtype = float_type)
dist_batch <- tf$cast(tf$gather_nd(R, I_batch), dtype = float_type) # N,
trainer <- tf$train$AdamOptimizer(learning_rate = 0.005)
reset_trainer <- tf$variables_initializer(trainer$variables())
driver <- censored_nakagami(model, z_batch, dist_batch, cut_off, number_of_interpolants = 15, samples = 30)
llh <- tf$reduce_mean(driver)
KL <- compute_kl(model) / tf$constant(N, dtype = float_type) #+ compute_kl(latents) / tf$constant(N, dtype = float_type)
optimizer_model <- trainer$minimize( - (llh - KL), var_list = list(model$kern$ARD, model$v_par$v_x, model$v_par$mu, model$v_par$chol))
optimizer_latents <- trainer$minimize( - (llh - KL), var_list = list(latents$v_par$mu, latents$v_par$chol)) |
new.charts.TimeSeries <-
function (R, space = 0, main = "Returns", ...)
{
R = checkData(R)
columns = NCOL(R)
columnnames = colnames(R)
ymax = max(R, na.rm=TRUE)
ymin = min(R, na.rm=TRUE)
op <- par(oma = c(2,0,4,0), mar=c(0,4,0,4))
layout(matrix(c(1:columns), ncol = 1, byrow = TRUE), widths=1)
xaxis=FALSE
yaxis=TRUE
#even function introduced
even <- function (x) {
x%%2 == 0
}
##############################################################################
for(i in 1:columns){
if(even(i))
yaxis.right=TRUE
else
yaxis.right=FALSE
if(i==columns)
xaxis = TRUE
#chart.TimeSeries replaced by new.chart.TimeSeries
print(new.chart.TimeSeries(R[,i,drop=FALSE], xaxis=xaxis, main="", ylab=colnames(R)[i], ylim = c(ymin,ymax), yaxis=yaxis, yaxis.right=yaxis.right, ...))
########################################################################
if(i==1)
yaxis=FALSE
}
mtext(main,
side = 3, outer = TRUE,
font = 2, cex = 1.2, line=1)
par(op)
}
| /new.charts.TimeSeries.R | no_license | Shubham-Khanve/xtsModPerf | R | false | false | 1,145 | r | new.charts.TimeSeries <-
function (R, space = 0, main = "Returns", ...)
{
R = checkData(R)
columns = NCOL(R)
columnnames = colnames(R)
ymax = max(R, na.rm=TRUE)
ymin = min(R, na.rm=TRUE)
op <- par(oma = c(2,0,4,0), mar=c(0,4,0,4))
layout(matrix(c(1:columns), ncol = 1, byrow = TRUE), widths=1)
xaxis=FALSE
yaxis=TRUE
#even function introduced
even <- function (x) {
x%%2 == 0
}
##############################################################################
for(i in 1:columns){
if(even(i))
yaxis.right=TRUE
else
yaxis.right=FALSE
if(i==columns)
xaxis = TRUE
#chart.TimeSeries replaced by new.chart.TimeSeries
print(new.chart.TimeSeries(R[,i,drop=FALSE], xaxis=xaxis, main="", ylab=colnames(R)[i], ylim = c(ymin,ymax), yaxis=yaxis, yaxis.right=yaxis.right, ...))
########################################################################
if(i==1)
yaxis=FALSE
}
mtext(main,
side = 3, outer = TRUE,
font = 2, cex = 1.2, line=1)
par(op)
}
|
parse_args <- function(args){
ExecName <- 'MBA'
lop <- read.MBA.opts.batch(args, verb = 0)
lop['outFN'] <- lop['prefix']
lop['prefix'] <- NULL
lop <- process.MBA.opts(lop, verb = lop$verb)
lop
}
get_eoiq <- function(qVars, EOI) {
EOIq <- strsplit(qVars, ",")[[1]]
if (!("Intercept" %in% EOIq)) EOIq <- c("Intercept", EOIq)
EOIq <- intersect(strsplit(EOI, ",")[[1]], EOIq)
}
get_eioc <- function(cVars, EOI) {
if (is.null(cVars)) {
EOIc <- NA
} else {
EOIc <- intersect(strsplit(EOI, ",")[[1]], strsplit(cVars, ",")[[1]])
}
}
post_process <- function(fm,outFN,iterations,chains,EOIq,EOIc,qContr,ptm,ROI1,ROI2){
nR <- get_nr(dataTable,c(ROI1,ROI2))
print(format(Sys.time(), "%D %H:%M:%OS3"))
# Stop the clock
proc.time() - ptm
save.image(file = paste0(outFN, ".RData"))
cat(format(Sys.time(), "%D %H:%M:%OS3"), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat(utils::capture.output(proc.time() - ptm), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
rs <- summary(fm)
rs_text <- utils::capture.output(rs)
cat("
++++++++++++++++++++++++++++++++++++++++++++++++++++
")
cat("***** Summary information of model information *****
")
cat(rs_text, fill = 2)
cat("
***** End of model information *****
")
cat("++++++++++++++++++++++++++++++++++++++++++++++++++++
")
cat("
***** Summary information of model results *****
", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat(rs_text, file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
# union(levels(dataTable[ROI1][[1]]), levels(dataTable[ROI2][[1]]))
# <- list(outFN="Tara", EOI=c("Intercept", "e4", "site"), EOIc=c("e4", "site"), EOIq="Intercept")
# ["EOIq"]] <- "Intercept"
ns <- iterations * chains / 2
# nR <- nlevels(dataTable[ROI1][[1]])
aa <- brms::fixef(fm, summary = FALSE) # Population-Level Estimates
bb <- brms::ranef(fm, summary = FALSE) # Extract Group-Level (or random-effect) Estimates
if (nR != length(dimnames(bb$mmROI1ROI2)[[2]])) {
cat("
***** Warning: something strange about the ROIs! *****
", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
########## region effects #############
# posterior samples at ROIs for a term
# gg <- psROI(aa, bb, 'Intercept', nR)
# summary for ROIs: nd - number of digits to output
# gg <- sumROI(gg, ns, 3)
# for Intercept and quantitative variables
if (any(!is.na(EOIq) == TRUE)) {
for (ii in 1:length(EOIq)) {
cat(sprintf("===== Summary of region effects for %s =====", EOIq[ii]),
file = paste0(outFN, ".txt"), sep = "\n", append = TRUE
)
gg <- sumROI(psROI(aa, bb, EOIq[ii], nR), ns, 3)
cat(utils::capture.output(gg), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
}
# for contrasts among quantitative variables
if (any(!is.na(qContr) == TRUE)) {
for (ii in 1:(length(qContrL) / 2)) {
cat(sprintf("===== Summary of region effects for %s vs %s =====", qContrL[2 * ii - 1], qContrL[2 * ii]),
file = paste0(outFN, ".txt"), sep = "\n", append = TRUE
)
gg <- sumROI(psROI(aa, bb, qContrL[2 * ii - 1], nR) - psROI(aa, bb, qContrL[2 * ii], nR), ns, 3)
cat(utils::capture.output(gg), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
}
# for factor
if (any(!is.na(EOIc) == TRUE)) {
for (ii in 1:length(EOIc)) {
lvl <- levels(dataTable[[EOIc[ii]]]) # levels
nl <- nlevels(dataTable[[EOIc[ii]]]) # number of levels: last level is the reference in deviation coding
ps <- array(0, dim = c(nl, ns, nR)) # posterior samples
for (jj in 1:(nl - 1)) ps[jj, , ] <- psROI(aa, bb, paste0(EOIc[ii], jj), nR)
ps[nl, , ] <- psROI(aa, bb, "Intercept", nR) # Intercept: averge effect
psa <- array(0, dim = c(nl, ns, nR)) # posterior samples adjusted
for (jj in 1:(nl - 1)) {
psa[jj, , ] <- ps[nl, , ] + ps[jj, , ]
psa[nl, , ] <- psa[nl, , ] + ps[jj, , ]
}
psa[nl, , ] <- ps[nl, , ] - psa[nl, , ] # reference level
dimnames(psa)[[3]] <- dimnames(bb$mmROI1ROI2)[[2]]
oo <- apply(psa, 1, sumROI, ns, 3)
cat(sprintf("===== Summary of region effects for %s =====", EOIc[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
for (jj in 1:nl) {
cat(sprintf("----- %s level: %s", EOIc[ii], lvl[jj]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat(utils::capture.output(oo[[jj]]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
cat(sprintf("===== Summary of region effects for %s comparisons =====", EOIc[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
for (jj in 1:(nl - 1)) {
for (kk in (jj + 1):nl) {
cat(sprintf("----- level comparison: %s vs %s", lvl[jj], lvl[kk]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
oo <- sumROI(psa[jj, , ] - psa[kk, , ], ns, 3)
cat(utils::capture.output(oo), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
}
}
}
########## region pair effects #############
# for intercept or quantitative variable
if (any(!is.na(EOIq) == TRUE)) {
for (ii in 1:length(EOIq)) {
xx <- vv(ww(aa, bb, EOIq[ii], nR, ns), ns, nR)
cat(sprintf("===== Summary of region pair effects for %s =====", EOIq[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
prnt(90, 1, res(bb, xx, 0.1, 3), outFN, "region pairs")
prnt(95, 1, res(bb, xx, 0.05, 3), outFN, "region pairs")
prnt(95, 2, res(bb, xx, 0.025, 3), outFN, "region pairs")
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
mPlot(xx, EOIq[ii])
}
}
# for contrasts among quantitative variables
if (any(!is.na(qContr) == TRUE)) {
for (ii in 1:(length(qContrL) / 2)) {
xx <- vv(ww(aa, bb, qContrL[2 * ii - 1], nR, ns) - ww(aa, bb, qContrL[2 * ii], nR, ns), ns, nR)
cat(sprintf("===== Summary of region pair effects for %s vs %s =====", qContrL[2 * ii - 1], qContrL[2 * ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
prnt(90, 1, res(bb, xx, 0.1, 3), outFN, "region pairs")
prnt(95, 1, res(bb, xx, 0.05, 3), outFN, "region pairs")
prnt(95, 2, res(bb, xx, 0.025, 3), outFN, "region pairs")
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
mPlot(xx, paste0(qContrL[2 * ii - 1], "vs", qContrL[2 * ii]))
}
}
# for factor
if (any(!is.na(EOIc) == TRUE)) {
for (ii in 1:length(EOIc)) {
lvl <- levels(dataTable[[EOIc[ii]]]) # levels
nl <- nlevels(dataTable[[EOIc[ii]]]) # number of levels: last level is the reference in deviation coding
ps <- array(0, dim = c(nl, ns, nR, nR)) # posterior samples
for (jj in 1:(nl - 1)) ps[jj, , , ] <- ww(aa, bb, paste0(EOIc[ii], jj), nR)
ps[nl, , , ] <- ww(aa, bb, "Intercept", nR)
psa <- array(0, dim = c(nl, ns, nR, nR)) # posterior samples adjusted
for (jj in 1:(nl - 1)) {
psa[jj, , , ] <- ps[nl, , , ] + ps[jj, , , ]
psa[nl, , , ] <- psa[nl, , , ] + ps[jj, , , ]
}
psa[nl, , , ] <- ps[nl, , , ] - psa[nl, , , ] # reference level
dimnames(psa)[[3]] <- dimnames(bb$mmROI1ROI2)[[2]]
dimnames(psa)[[4]] <- dimnames(bb$mmROI1ROI2)[[2]]
# oo <- array(apply(psa, 1, vv, ns, nR), dim=c(nR, nR, 8, nl))
# dimnames(oo)[[3]] <- c('mean', 'sd', 'P+', '2.5%', '5%', '50%', '95%', '97.5%')
cat(sprintf("===== Summary of region pair effects for %s =====", EOIc[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
for (jj in 1:nl) {
cat(sprintf("----- %s level: %s", EOIc[ii], lvl[jj]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
oo <- vv(psa[jj, , , ], ns, nR)
prnt(90, 1, res(bb, oo, 0.1, 3), outFN, "region pairs")
prnt(95, 1, res(bb, oo, 0.05, 3), outFN, "region pairs")
prnt(95, 2, res(bb, oo, 0.025, 3), outFN, "region pairs")
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
mPlot(oo, paste0(EOIc[ii], "_", lvl[jj]))
}
cat(sprintf("===== Summary of region pair effects for %s comparisons =====", EOIc[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
for (jj in 1:(nl - 1)) {
for (kk in (jj + 1):nl) {
cat(sprintf("----- level comparison: %s vs %s", lvl[jj], lvl[kk]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
oo <- vv(psa[jj, , , ] - psa[kk, , , ], ns, nR)
prnt(90, 1, res(bb, oo, 0.1), outFN, "region pairs")
prnt(95, 1, res(bb, oo, 0.05), outFN, "region pairs")
prnt(95, 2, res(bb, oo, 0.025), outFN, "region pairs")
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
mPlot(oo, paste0(EOIc[ii], "_", lvl[jj], "vs", lvl[kk]))
}
}
}
}
# save it again
save.image(file = paste0(outFN, ".RData"))
cat("\nCongratulations! The above results are saved in file ", outFN, "\n\n", sep = "")
}
setup_dataTable <- function(data_path,model,MD,r2z,cVars,qVars,stdz,
qContr,Y,Subj,ROI1, ROI2=NULL){
dataTable <- utils::read.table(data_path,header=T)
# standardize the names for Y, ROI and subject
names(dataTable)[names(dataTable)==Subj] <- "Subj"
names(dataTable)[names(dataTable)==Y] <- "Y"
names(dataTable)[names(dataTable)==ROI1] <- "ROI1"
# make sure ROI1, ROI2 and Subj are treated as factors
if(!is.factor(dataTable[ROI1][[1]])) dataTable[ROI1][[1]] <- as.factor(dataTable[ROI1][[1]])
if(!is.factor(dataTable$Subj)) dataTable$Subj <- as.factor(dataTable$Subj)
if (!is.null(ROI2)){
if(!is.factor(dataTable[ROI2][[1]])) dataTable[ROI2][[1]] <- as.factor(dataTable[ROI2][[1]])
names(dataTable)[names(dataTable)==ROI2] <- "ROI2"
}
# verify variable types
if(model==1) terms <- 1 else terms <- strsplit(model, "\\+")[[1]]
if(length(terms) > 1) {
#terms <- terms[terms!="1"]
for(ii in 1:length(terms)) {
if(!is.null(cVars[1])) if(terms[ii] %in% strsplit(cVars, ",")[[1]] & !is.factor(dataTable[[terms[ii]]])) # declared factor with quantitative levels
dataTable[[terms[ii]]] <- as.factor(dataTable[[terms[ii]]])
if(terms[ii] %in% strsplit(qVars, ",")[[1]] & is.factor(dataTable[[terms[ii]]])) # declared numerical variable contains characters
stop(sprintf("Column %s in the data table is declared as numerical, but contains characters!", terms[ii]))
}
}
dataTable$w <- 1
# standardization
if(!is.null(stdz)) {
sl <- strsplit(stdz, ",")[[1]]
for(ii in 1:length(sl)) if(is.numeric(dataTable[[sl[ii]]]))
dataTable[[sl[ii]]] <- scale(dataTable[[sl[ii]]], center = TRUE, scale = TRUE) else
stop(sprintf("The column %s is categorical, not numerical! Why are you asking me to standardize it?", sl[ii]))
}
# number of ROIs
nR <- get_nr(dataTable,c(ROI1,ROI2))
if(!MD) if(nlevels(dataTable$Subj)*nR*(nR-1)/2 > nrow(dataTable))
stop(sprintf("Error: with %d regions and %d subjects, it is expected to have %d rows per subject, leading to toally %d rows in the input data table. However, there are only %d rows. If you have missing data, use option -MD", nR, nlevels(dataTable$Subj), nR*(nR-1)/2, nlevels(dataTable$Subj)*nR*(nR-1)/2, nrow(dataTable)))
if(any(!is.null(qContr))) {
qContrL <- unlist(strsplit(qContr, ","))
# verify "vs" in alternating location
ll <- which(qContrL %in% "vs")
if(!all(ll == seq(2,300,3)[1:length(ll)]))
stop(sprintf("Quantitative contrast specification -qContr is incorrect!"))
qContrL <- qContrL[!qContrL %in% "vs"]
# verify that variable names are correct
if(!all(qContrL %in% c(QV, "Intercept")))
stop(sprintf("At least one of the variable labels in quantitative contrast specification -qContr is incorrect!"))
}
dataTable
}
#' Get number of rows based on the count of variable levels
#'
#' Given a dataframe with columns that represent categorical
#' variables this function will return the total number of unique
#' elements that are found across all columns.
#'
#' @param df A dataframe in which some categorical variables are stored
#' @param col_names The column labels that refer to categorical variables
#' used to fit the model
#'
#' @return count
#' @export
#'
#' @examples
#' col_names <- c("cat_var_1","cat_var_2")
#'
#' df <- tibble::tribble(
#' ~col_1, ~cat_var_1, ~cat_var_2,
#' "text", "unique_val_1", "unique_val_1",
#' "text", "unique_val_2", "unique_val_1",
#' "text", "unique_val_1", "unique_val_3",
#' "text", "unique_val_1", "unique_val_4",
#' )
#'
#' get_nr(df,col_names)
get_nr <- function(df,roi_names){
purrr::map(roi_names, ~ as.character(df[.x][[1]])) %>%
purrr::flatten_chr() %>%
dplyr::n_distinct()
}
run_mba <- function(dataTable,model,chains,iterations){
set.seed(1234)
if(model==1) {
modelForm <- stats::as.formula(paste("Y ~ 1 + (1|Subj) + (1|ROI1:ROI2) +
(1|mm(ROI1, ROI2, weights = cbind(w, w), scale=FALSE)) +
(1|mm(ROI1:Subj, ROI2:Subj, weights = cbind(w, w), scale=FALSE))"))
}else{
modelForm <- stats::as.formula(paste("Y~", model, "+(1|Subj)+(", model, "|ROI1:ROI2)+(",
model, "|mm(ROI1, ROI2, weights = cbind(w, w), scale=FALSE))"))
}
if(model==1){
fm <- brm(modelForm, data=dataTable, chains = chains,
iter=iterations, control = list(adapt_delta = 0.99, max_treedepth = 15))
}else{
fm <- brm(modelForm, data=dataTable,
prior=c(prior(normal(0, 1), class = "Intercept"),prior(normal(0, 0.5), class = "sd")),
chains = chains, iter=iterations, control = list(adapt_delta = 0.99, max_treedepth = 15))
fm
}
}
log_setup_info <- function(dataTable,outFN,ROI1,ROI2=NULL){
nR <- get_nr(dataTable,c(ROI1,ROI2))
cat("===== Summary of variable information =====", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
cat(sprintf("Total number of ROIs: %i", nR),
file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
cat(sprintf("Response variable Y - mean: %f; SD: %f", mean(dataTable$Y), stats::sd(dataTable$Y)),
file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
outDF(summary(dataTable$Y), outFN)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
cat("Data structure:", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
outDF(utils::str(dataTable), outFN)
cat("Subjects:", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
outDF(summary(dataTable$Subj), outFN)
cat("ROIs:", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
outDF(summary(dataTable[ROI1][[1]]), outFN)
if (!is.null(ROI2)) outDF(summary(dataTable[ROI2][[1]]), outFN)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
}
# write data.frame to a file
outDF <- function(DF, fl) cat(utils::capture.output(DF), file = paste0(fl, '.txt'), sep = '\n', append=TRUE)
# Fisher transformation
fisher <- function(r) ifelse(abs(r) < .995, 0.5*(log(1+r)-log(1-r)), stop('Are you sure that you have correlation values so close to 1 or -1?'))
# compute P+
cnt <- function(x, ns) return(sum(x>0)/ns)
# extract region-pair posterior samples for an effect 'tm'
ww <- function(aa, bb, tm, nR,ns) {
ps0 <- array(apply(bb[['mmROI1ROI2']][,,tm], 2, "+", bb[['mmROI1ROI2']][,,tm]), c(ns, nR, nR))
ps <- apply(ps0, c(2,3), '+', aa[,tm])
dimnames(ps) <- list(1:ns, dimnames(bb$mmROI1ROI2)[[2]], dimnames(bb$mmROI1ROI2)[[2]])
tmp <- ps
sel1 <- match(dimnames(bb$`ROI1:ROI2`)[[2]], outer(dimnames(ps)[[2]],dimnames(ps)[[3]], function(x,y) paste(x,y,sep="_")))
sel2 <- match(dimnames(bb$`ROI1:ROI2`)[[2]], outer(dimnames(ps)[[2]],dimnames(ps)[[3]], function(x,y) paste(y,x,sep="_")))
ad <- function(tt,bb,s1,s2) {tt[s1] <- tt[s1] + bb; tt[s2] <- tt[s2] + bb; return(tt)}
for(ii in 1:ns) tmp[ii,,] <- ad(tmp[ii,,], bb$`ROI1:ROI2`[ii,,tm], sel1, sel2)
ps <- tmp
return(ps)
}
# ps <- ww(aa, bb, 'Intercept', nR)
# obtain summary informatin of posterior samples for RPs
vv <- function(ps, ns, nR) {
mm <- apply(ps, c(2,3), mean,ns)
for(ii in 1:nR) for(jj in 1:nR) ps[,ii,jj] <- sqrt(2)*(ps[,ii,jj] - mm[ii,jj]) + mm[ii,jj]
RP <- array(NA, dim=c(nR, nR, 8))
RP[,,1] <- apply(ps, c(2,3), mean)
RP[,,2] <- apply(ps, c(2,3), stats::sd)
RP[,,3] <- apply(ps, c(2,3), cnt, ns)
RP[,,4:8] <- aperm(apply(ps, c(2,3), stats::quantile, probs=c(0.025, 0.05, 0.5, 0.95, 0.975)), dim=c(2,3,1))
dimnames(RP)[[1]] <- dimnames(ps)[[2]]
dimnames(RP)[[2]] <- dimnames(ps)[[3]]
dimnames(RP)[[3]] <- c('mean', 'SD', 'P+', '2.5%', '5%', '50%', '95%', '97.5%')
return(RP)
}
# full region pair result without thresholding
#xx <- vv(ww(aa, bb, 'Intercept', nR), ns, nR)
#subset(xx[,,c(1,8)], xx[,,'P+'] >= 0.975 | xx[,,'P+'] <= 0.025)
# graded thresholding
res <- function(bb, xx, pp, nd) {
RP <- which(xx[,,'P+'] >= 1-pp | xx[,,'P+'] <= pp, arr.ind = T)
RP <- RP[RP[,1] < RP[,2],]
tmp <- data.frame(ROI1=factor(), ROI2=factor(), mean=factor(), SD=factor(), `P+`=factor(), check.names = FALSE)
if(length(RP) > 2) {
tmp <- cbind(dimnames(bb$mmROI1ROI2)[[2]][RP[,1]], dimnames(bb$mmROI1ROI2)[[2]][RP[,2]],
round(t(mapply(function(i, j) xx[i, j, 1:3], RP[,1], RP[,2])), nd))
colnames(tmp)[1:2] <- c('ROI1', 'ROI2')
tmp <- data.frame(tmp, row.names = NULL, check.names = FALSE) } else
if(length(RP)==2) {
tmp <- c(dimnames(bb$mmROI1ROI2)[[2]][RP[1]], dimnames(bb$mmROI1ROI2)[[2]][RP[2]], round(xx[RP[1], RP[2], 1:3],3))
#tmp <- paste(RP[1], RP[2], round(xx[RP[1], RP[2], 1:3], nd))
#names(tmp)[1:2] <- c('ROI1', 'ROI2')
tmp <- data.frame(t(tmp), row.names = NULL, check.names = FALSE)
}
return(tmp)
}
# standardize the output
prnt <- function(pct, side, dat, fl, entity) {
cat(sprintf('***** %i %s based on %i-sided %i uncertainty interval *****',
nrow(dat), entity, side, pct), file = paste0(fl, '.txt'), sep = '\n', append=TRUE)
if(nrow(dat) > 0) cat(utils::capture.output(dat), file = paste0(fl, '.txt'), sep = '\n', append=TRUE) else
cat('NULL', file = paste0(fl, '.txt'), sep = '\n', append=TRUE)
}
# matrix plot for RPs: assuming no diagonals for now
addTrans <- function(color,trans)
{
# This function adds transparancy to a color.
# Define transparancy with an integer between 0 and 255
# 0 being fully transparant and 255 being fully visable
# Works with either color and trans a vector of equal length,
# or one of the two of length 1.
if (length(color)!=length(trans)&!any(c(length(color),length(trans))==1)) stop("Vector lengths not correct")
if (length(color)==1 & length(trans)>1) color <- rep(color,length(trans))
if (length(trans)==1 & length(color)>1) trans <- rep(trans,length(color))
num2hex <- function(x)
{
hex <- unlist(strsplit("0123456789ABCDEF",split=""))
return(paste(hex[(x-x%%16)/16+1],hex[x%%16+1],sep=""))
}
rgb <- rbind(grDevices::col2rgb(color),trans)
res <- paste("#",apply(apply(rgb,2,num2hex),2,paste,collapse=""),sep="")
return(res)
}
mPlot <- function(xx, fn) {
mm <- xx[,,6] # median
pp <- xx[,,3] # P+
BC1 <- ((pp >= 0.975 ) | (pp <= 0.025)) # background color
BC <- ((pp >= 0.95 ) | (pp <= 0.05)) # background color
BC2 <- (((pp > 0.9) & (pp < 0.95)) | ((pp < 0.1) & (pp > 0.05)))
BC[BC == T] <- addTrans('yellow',150)
BC[BC1 == T] <- addTrans('green',175)
BC[BC == F] <- "white"
BC[BC2 == T] <- addTrans('gray',125)
#BC[BC == T] <- "blue"
#BC[BC1 == T] <- "green"
#BC[BC == F] <- "white"
#BC[BC2 == T] <- 'yellow'
rng <- range(mm)
diag(mm) <- NA # diagonals are meaningful in the case of correlation matrix
diag(BC) <- "white" # if the diagonal values shall be white
ii <- !kronecker(diag(1, nrow(BC)), matrix(1, ncol=1, nrow=1))
BC <- matrix(BC[ii], ncol = ncol(BC)-1)
col2 <- grDevices::colorRampPalette(c("#67001F", "#B2182B", "#D6604D", "#F4A582",
"#FDDBC7", "#FFFFFF", "#D1E5F0", "#92C5DE",
"#4393C3", "#2166AC", "#053061"))
grDevices::pdf(paste0(fn, ".pdf"), width=8, height=8)
corrplot::corrplot(mm, method="circle", type = "full", is.corr = FALSE, bg=BC, tl.pos='lt', tl.col='black', col=rev(col2(200)), cl.pos='r', na.label = "square", na.label.col='white')
grDevices::dev.off()
}
sumROI <- function(R0, ns, nd) {
hubs <- data.frame(cbind(apply(R0, 2, mean), apply(R0, 2, stats::sd), apply(R0, 2, cnt, ns), t(apply(R0, 2, stats::quantile,
probs=c(0.025, 0.05, 0.5, 0.95, 0.975)))))
names(hubs) <- c('mean', 'SD', 'P+', '2.5%', '5%', '50%', '95%', '97.5%')
return(round(hubs,nd))
}
psROI <- function(aa, bb, tm, nR) {
R0 <- apply(bb$mmROI1ROI2[,,tm], 2, '+', 0.5*aa[,tm])
for(jj in 1:nR) {
mm <- stats::quantile(R0[,jj], probs=.5)
R0[,jj] <- sqrt(2)*(R0[,jj] - mm)+mm
}
return(R0)
}
first.in.path <- function(file) {
ff <- paste(strsplit(Sys.getenv('PATH'),':')[[1]],'/', file, sep='')
ff<-ff[lapply(ff,file.exists)==TRUE];
#cat('Using ', ff[1],'\n');
return(gsub('//','/',ff[1], fixed=TRUE))
}
pprefix.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
return(an$pprefix);
}
prefix.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
return(an$prefix);
}
view.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
return(an$view);
}
pv.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
return(paste(an$pprefix,an$view,sep=''));
}
head.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
if (an$type == 'BRIK' && !is.na(an$view)) {
return(paste(an$pprefix,an$view,".HEAD",sep=''));
} else {
return((an$orig_name));
}
}
brik.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
if (an$type == 'BRIK' && !is.na(an$view)) {
return(paste(an$pprefix,an$view,".BRIK",sep=''));
} else {
return((an$orig_name));
}
}
compressed.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
if (length(grep('\\.gz$', an$ext))) {
return('gz')
} else if (length(grep('\\.bz2$', an$ext))) {
return('bz2')
} else if (length(grep('\\.Z$', an$ext))) {
return('Z')
} else {
return('')
}
}
modify.AFNI.name <- function (name, what="append", val="_new", cwd=NULL) {
if (!is.loaded('R_SUMA_ParseModifyName')) {
err.AFNI("Missing R_io.so");
return(NULL);
}
an <- .Call("R_SUMA_ParseModifyName",
name = name,
what = what,
val = val,
cwd = cwd)
return(an)
}
parse.AFNI.name <- function(filename, verb = 0) {
if (filename == '-self_test') { #Secret testing flag
note.AFNI('Function running in test mode');
show.AFNI.name(parse.AFNI.name('DePath/hello.DePrefix', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc.', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc.HEAD', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc.BRIK.gz', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc.HEAD[23]', verb))
show.AFNI.name(
parse.AFNI.name('DePath/DePrefix+acpc.HEAD[DeLabel]{DeRow}', verb))
show.AFNI.name(
parse.AFNI.name('DePath/DePrefix+acpc[DeLabel]{DeRow}', verb))
show.AFNI.name(
parse.AFNI.name('DePath/DePrefix+acpc.[DeLabel]{DeRow}', verb))
return(NULL)
}
an <- list()
an$view <- NULL
an$pprefix <- NULL
an$brsel <- NULL;
an$rosel <- NULL;
an$rasel <- NULL;
an$insel <- NULL;
an$type <- NULL;
an$path <- NULL;
an$orig_name <- filename;
an$file <- NULL;
if (verb) { cat ('Parsing >>',filename,'<<\n', sep=''); }
if (!is.character(filename)) {
warning(paste('filename >>',
filename, '<< not a character string\n', sep=''),
immediate. = TRUE);
traceback();
return(NULL);
}
#Deal with special names:
if (length(grep("^1D:.*$",filename))) {
an$type = '1Ds'
return(an)
} else if (length(grep("^R:.*$",filename))) {
an$type = 'Rs'
return(an)
}
#Deal with selectors
n <- parse.AFNI.name.selectors(filename, verb)
filename <- n$name
an$file <- n$name
an$brsel <- n$brsel;
an$rosel <- n$rosel;
an$rasel <- n$rasel;
an$insel <- n$insel;
#Remove last dot if there
filename <- sub('\\.$','',filename)
#NIFTI?
n <- strip.extension(filename, c('.nii', '.nii.gz'), verb)
if (n$ext != '') {
an$ext <- n$ext
an$type <- 'NIFTI'
an$pprefix <- n$name_noext
} else {
#remove other extensions
n <- strip.extension(filename, c('.HEAD','.BRIK','.BRIK.gz',
'.BRIK.bz2','.BRIK.Z',
'.1D', '.1D.dset',
'.niml.dset',
'.' ),
verb)
if (n$ext == '.1D' || n$ext == '.1D.dset') {
an$type <- '1D'
} else if (n$ext == '.niml.dset') {
an$type <- 'NIML'
} else {
an$type <- 'BRIK'
}
if (n$ext == '.') {
n$ext <- ''
}
an$ext <- n$ext
filename <- n$name_noext
n <- strip.extension(filename, c('+orig','+tlrc','+acpc'), verb)
if (n$ext != '') {
an$view <- n$ext
} else {
an$view <- NA
}
an$pprefix <- n$name_noext
}
#a prefix with no path
an$prefix <- basename(an$pprefix)
#and the path
an$path <- dirname(an$orig_name)
if (verb > 2) {
note.AFNI("Browser not active");
# browser()
}
if ( an$type != '1D' && (
!is.null(an$brsel) || !is.null(an$rosel) ||
!is.null(an$rasel) || !is.null(an$insel))) {
#Remove trailing quote if any
an$prefix <- gsub("'$", '', an$prefix);
an$prefix <- gsub('"$', '', an$prefix);
an$pprefix <- gsub("'$",'', an$pprefix);
an$pprefix <- gsub('"$','', an$pprefix);
}
if ( an$type != 'BRIK' ) {
#Put the extension back on
an$pprefix <- paste(an$pprefix,an$ext, sep='');
an$prefix <- paste(an$prefix,an$ext, sep='');
}
return(an)
}
exists.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
ans <- 0
if (file.exists(head.AFNI.name(an))) ans <- ans + 1;
if (file.exists(brik.AFNI.name(an)) ||
file.exists(paste(brik.AFNI.name(an),'.gz', sep='')) ||
file.exists(paste(brik.AFNI.name(an),'.Z', sep=''))) ans <- ans + 2;
return(ans);
}
AFNI.new.options.list <- function(history = '', parsed_args = NULL) {
lop <- list (com_history = history);
#Look for defaults
lop$overwrite <- FALSE
for (i in 1:length(parsed_args)) {
opname <- strsplit(names(parsed_args)[i],'^-')[[1]];
opname <- opname[length(opname)];
switch(opname,
overwrite = lop$overwrite <- TRUE )
}
return(lop)
}
parse.AFNI.name.selectors <- function(filename,verb=0) {
n <- list()
n$brsel<- NULL;
n$rosel<- NULL;
n$rasel<- NULL;
n$insel<- NULL;
selecs <- strsplit(filename,"\\[|\\{|<|#")[[1]];
n$name <- selecs[1]
for (ss in selecs[2:length(selecs)]) {
if (length(grep("]",ss))) {
n$brsel <- strsplit(ss,"\\]")[[1]][1];
} else if (length(grep("}",ss))) {
n$rosel <- strsplit(ss,"\\}")[[1]][1];
} else if (length(grep(">",ss))) {
n$rasel <- strsplit(ss,">")[[1]][1];
}
}
selecs <- strsplit(filename,"#")[[1]];
if (length(selecs) > 1) {
n$insel <- selecs[2]
}
return(n)
}
strip.extension <- function (filename, extvec=NULL, verb=0) {
n <- list()
if (is.null(extvec)) {
ff <- strsplit(filename, '\\.')[[1]]
if (length(ff) > 1) {
n$ext <- paste('.',ff[length(ff)], sep='')
n$name_noext <- paste(ff[1:length(ff)-1],collapse='.')
} else {
n$ext <- ''
n$name_noext <- filename
}
} else {
n$ext <- ''
n$name_noext <- filename
for (ex in extvec) {
patt <- paste('\\',ex,'$',collapse='', sep='')
if (length(grep(patt, filename))) {
n$ext <- ex
n$name_noext <- sub(patt,'',filename)
return(n)
}
}
}
return(n)
}
| /R/MBAfuncs.R | no_license | afni/afnistats | R | false | false | 28,720 | r | parse_args <- function(args){
ExecName <- 'MBA'
lop <- read.MBA.opts.batch(args, verb = 0)
lop['outFN'] <- lop['prefix']
lop['prefix'] <- NULL
lop <- process.MBA.opts(lop, verb = lop$verb)
lop
}
get_eoiq <- function(qVars, EOI) {
EOIq <- strsplit(qVars, ",")[[1]]
if (!("Intercept" %in% EOIq)) EOIq <- c("Intercept", EOIq)
EOIq <- intersect(strsplit(EOI, ",")[[1]], EOIq)
}
get_eioc <- function(cVars, EOI) {
if (is.null(cVars)) {
EOIc <- NA
} else {
EOIc <- intersect(strsplit(EOI, ",")[[1]], strsplit(cVars, ",")[[1]])
}
}
post_process <- function(fm,outFN,iterations,chains,EOIq,EOIc,qContr,ptm,ROI1,ROI2){
nR <- get_nr(dataTable,c(ROI1,ROI2))
print(format(Sys.time(), "%D %H:%M:%OS3"))
# Stop the clock
proc.time() - ptm
save.image(file = paste0(outFN, ".RData"))
cat(format(Sys.time(), "%D %H:%M:%OS3"), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat(utils::capture.output(proc.time() - ptm), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
rs <- summary(fm)
rs_text <- utils::capture.output(rs)
cat("
++++++++++++++++++++++++++++++++++++++++++++++++++++
")
cat("***** Summary information of model information *****
")
cat(rs_text, fill = 2)
cat("
***** End of model information *****
")
cat("++++++++++++++++++++++++++++++++++++++++++++++++++++
")
cat("
***** Summary information of model results *****
", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat(rs_text, file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
# union(levels(dataTable[ROI1][[1]]), levels(dataTable[ROI2][[1]]))
# <- list(outFN="Tara", EOI=c("Intercept", "e4", "site"), EOIc=c("e4", "site"), EOIq="Intercept")
# ["EOIq"]] <- "Intercept"
ns <- iterations * chains / 2
# nR <- nlevels(dataTable[ROI1][[1]])
aa <- brms::fixef(fm, summary = FALSE) # Population-Level Estimates
bb <- brms::ranef(fm, summary = FALSE) # Extract Group-Level (or random-effect) Estimates
if (nR != length(dimnames(bb$mmROI1ROI2)[[2]])) {
cat("
***** Warning: something strange about the ROIs! *****
", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
########## region effects #############
# posterior samples at ROIs for a term
# gg <- psROI(aa, bb, 'Intercept', nR)
# summary for ROIs: nd - number of digits to output
# gg <- sumROI(gg, ns, 3)
# for Intercept and quantitative variables
if (any(!is.na(EOIq) == TRUE)) {
for (ii in 1:length(EOIq)) {
cat(sprintf("===== Summary of region effects for %s =====", EOIq[ii]),
file = paste0(outFN, ".txt"), sep = "\n", append = TRUE
)
gg <- sumROI(psROI(aa, bb, EOIq[ii], nR), ns, 3)
cat(utils::capture.output(gg), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
}
# for contrasts among quantitative variables
if (any(!is.na(qContr) == TRUE)) {
for (ii in 1:(length(qContrL) / 2)) {
cat(sprintf("===== Summary of region effects for %s vs %s =====", qContrL[2 * ii - 1], qContrL[2 * ii]),
file = paste0(outFN, ".txt"), sep = "\n", append = TRUE
)
gg <- sumROI(psROI(aa, bb, qContrL[2 * ii - 1], nR) - psROI(aa, bb, qContrL[2 * ii], nR), ns, 3)
cat(utils::capture.output(gg), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
}
# for factor
if (any(!is.na(EOIc) == TRUE)) {
for (ii in 1:length(EOIc)) {
lvl <- levels(dataTable[[EOIc[ii]]]) # levels
nl <- nlevels(dataTable[[EOIc[ii]]]) # number of levels: last level is the reference in deviation coding
ps <- array(0, dim = c(nl, ns, nR)) # posterior samples
for (jj in 1:(nl - 1)) ps[jj, , ] <- psROI(aa, bb, paste0(EOIc[ii], jj), nR)
ps[nl, , ] <- psROI(aa, bb, "Intercept", nR) # Intercept: averge effect
psa <- array(0, dim = c(nl, ns, nR)) # posterior samples adjusted
for (jj in 1:(nl - 1)) {
psa[jj, , ] <- ps[nl, , ] + ps[jj, , ]
psa[nl, , ] <- psa[nl, , ] + ps[jj, , ]
}
psa[nl, , ] <- ps[nl, , ] - psa[nl, , ] # reference level
dimnames(psa)[[3]] <- dimnames(bb$mmROI1ROI2)[[2]]
oo <- apply(psa, 1, sumROI, ns, 3)
cat(sprintf("===== Summary of region effects for %s =====", EOIc[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
for (jj in 1:nl) {
cat(sprintf("----- %s level: %s", EOIc[ii], lvl[jj]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat(utils::capture.output(oo[[jj]]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
cat(sprintf("===== Summary of region effects for %s comparisons =====", EOIc[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
for (jj in 1:(nl - 1)) {
for (kk in (jj + 1):nl) {
cat(sprintf("----- level comparison: %s vs %s", lvl[jj], lvl[kk]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
oo <- sumROI(psa[jj, , ] - psa[kk, , ], ns, 3)
cat(utils::capture.output(oo), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
}
}
}
}
########## region pair effects #############
# for intercept or quantitative variable
if (any(!is.na(EOIq) == TRUE)) {
for (ii in 1:length(EOIq)) {
xx <- vv(ww(aa, bb, EOIq[ii], nR, ns), ns, nR)
cat(sprintf("===== Summary of region pair effects for %s =====", EOIq[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
prnt(90, 1, res(bb, xx, 0.1, 3), outFN, "region pairs")
prnt(95, 1, res(bb, xx, 0.05, 3), outFN, "region pairs")
prnt(95, 2, res(bb, xx, 0.025, 3), outFN, "region pairs")
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
mPlot(xx, EOIq[ii])
}
}
# for contrasts among quantitative variables
if (any(!is.na(qContr) == TRUE)) {
for (ii in 1:(length(qContrL) / 2)) {
xx <- vv(ww(aa, bb, qContrL[2 * ii - 1], nR, ns) - ww(aa, bb, qContrL[2 * ii], nR, ns), ns, nR)
cat(sprintf("===== Summary of region pair effects for %s vs %s =====", qContrL[2 * ii - 1], qContrL[2 * ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
prnt(90, 1, res(bb, xx, 0.1, 3), outFN, "region pairs")
prnt(95, 1, res(bb, xx, 0.05, 3), outFN, "region pairs")
prnt(95, 2, res(bb, xx, 0.025, 3), outFN, "region pairs")
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
mPlot(xx, paste0(qContrL[2 * ii - 1], "vs", qContrL[2 * ii]))
}
}
# for factor
if (any(!is.na(EOIc) == TRUE)) {
for (ii in 1:length(EOIc)) {
lvl <- levels(dataTable[[EOIc[ii]]]) # levels
nl <- nlevels(dataTable[[EOIc[ii]]]) # number of levels: last level is the reference in deviation coding
ps <- array(0, dim = c(nl, ns, nR, nR)) # posterior samples
for (jj in 1:(nl - 1)) ps[jj, , , ] <- ww(aa, bb, paste0(EOIc[ii], jj), nR)
ps[nl, , , ] <- ww(aa, bb, "Intercept", nR)
psa <- array(0, dim = c(nl, ns, nR, nR)) # posterior samples adjusted
for (jj in 1:(nl - 1)) {
psa[jj, , , ] <- ps[nl, , , ] + ps[jj, , , ]
psa[nl, , , ] <- psa[nl, , , ] + ps[jj, , , ]
}
psa[nl, , , ] <- ps[nl, , , ] - psa[nl, , , ] # reference level
dimnames(psa)[[3]] <- dimnames(bb$mmROI1ROI2)[[2]]
dimnames(psa)[[4]] <- dimnames(bb$mmROI1ROI2)[[2]]
# oo <- array(apply(psa, 1, vv, ns, nR), dim=c(nR, nR, 8, nl))
# dimnames(oo)[[3]] <- c('mean', 'sd', 'P+', '2.5%', '5%', '50%', '95%', '97.5%')
cat(sprintf("===== Summary of region pair effects for %s =====", EOIc[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
for (jj in 1:nl) {
cat(sprintf("----- %s level: %s", EOIc[ii], lvl[jj]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
oo <- vv(psa[jj, , , ], ns, nR)
prnt(90, 1, res(bb, oo, 0.1, 3), outFN, "region pairs")
prnt(95, 1, res(bb, oo, 0.05, 3), outFN, "region pairs")
prnt(95, 2, res(bb, oo, 0.025, 3), outFN, "region pairs")
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
mPlot(oo, paste0(EOIc[ii], "_", lvl[jj]))
}
cat(sprintf("===== Summary of region pair effects for %s comparisons =====", EOIc[ii]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
for (jj in 1:(nl - 1)) {
for (kk in (jj + 1):nl) {
cat(sprintf("----- level comparison: %s vs %s", lvl[jj], lvl[kk]), file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
oo <- vv(psa[jj, , , ] - psa[kk, , , ], ns, nR)
prnt(90, 1, res(bb, oo, 0.1), outFN, "region pairs")
prnt(95, 1, res(bb, oo, 0.05), outFN, "region pairs")
prnt(95, 2, res(bb, oo, 0.025), outFN, "region pairs")
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append = TRUE)
mPlot(oo, paste0(EOIc[ii], "_", lvl[jj], "vs", lvl[kk]))
}
}
}
}
# save it again
save.image(file = paste0(outFN, ".RData"))
cat("\nCongratulations! The above results are saved in file ", outFN, "\n\n", sep = "")
}
setup_dataTable <- function(data_path,model,MD,r2z,cVars,qVars,stdz,
qContr,Y,Subj,ROI1, ROI2=NULL){
dataTable <- utils::read.table(data_path,header=T)
# standardize the names for Y, ROI and subject
names(dataTable)[names(dataTable)==Subj] <- "Subj"
names(dataTable)[names(dataTable)==Y] <- "Y"
names(dataTable)[names(dataTable)==ROI1] <- "ROI1"
# make sure ROI1, ROI2 and Subj are treated as factors
if(!is.factor(dataTable[ROI1][[1]])) dataTable[ROI1][[1]] <- as.factor(dataTable[ROI1][[1]])
if(!is.factor(dataTable$Subj)) dataTable$Subj <- as.factor(dataTable$Subj)
if (!is.null(ROI2)){
if(!is.factor(dataTable[ROI2][[1]])) dataTable[ROI2][[1]] <- as.factor(dataTable[ROI2][[1]])
names(dataTable)[names(dataTable)==ROI2] <- "ROI2"
}
# verify variable types
if(model==1) terms <- 1 else terms <- strsplit(model, "\\+")[[1]]
if(length(terms) > 1) {
#terms <- terms[terms!="1"]
for(ii in 1:length(terms)) {
if(!is.null(cVars[1])) if(terms[ii] %in% strsplit(cVars, ",")[[1]] & !is.factor(dataTable[[terms[ii]]])) # declared factor with quantitative levels
dataTable[[terms[ii]]] <- as.factor(dataTable[[terms[ii]]])
if(terms[ii] %in% strsplit(qVars, ",")[[1]] & is.factor(dataTable[[terms[ii]]])) # declared numerical variable contains characters
stop(sprintf("Column %s in the data table is declared as numerical, but contains characters!", terms[ii]))
}
}
dataTable$w <- 1
# standardization
if(!is.null(stdz)) {
sl <- strsplit(stdz, ",")[[1]]
for(ii in 1:length(sl)) if(is.numeric(dataTable[[sl[ii]]]))
dataTable[[sl[ii]]] <- scale(dataTable[[sl[ii]]], center = TRUE, scale = TRUE) else
stop(sprintf("The column %s is categorical, not numerical! Why are you asking me to standardize it?", sl[ii]))
}
# number of ROIs
nR <- get_nr(dataTable,c(ROI1,ROI2))
if(!MD) if(nlevels(dataTable$Subj)*nR*(nR-1)/2 > nrow(dataTable))
stop(sprintf("Error: with %d regions and %d subjects, it is expected to have %d rows per subject, leading to toally %d rows in the input data table. However, there are only %d rows. If you have missing data, use option -MD", nR, nlevels(dataTable$Subj), nR*(nR-1)/2, nlevels(dataTable$Subj)*nR*(nR-1)/2, nrow(dataTable)))
if(any(!is.null(qContr))) {
qContrL <- unlist(strsplit(qContr, ","))
# verify "vs" in alternating location
ll <- which(qContrL %in% "vs")
if(!all(ll == seq(2,300,3)[1:length(ll)]))
stop(sprintf("Quantitative contrast specification -qContr is incorrect!"))
qContrL <- qContrL[!qContrL %in% "vs"]
# verify that variable names are correct
if(!all(qContrL %in% c(QV, "Intercept")))
stop(sprintf("At least one of the variable labels in quantitative contrast specification -qContr is incorrect!"))
}
dataTable
}
#' Get number of rows based on the count of variable levels
#'
#' Given a dataframe with columns that represent categorical
#' variables this function will return the total number of unique
#' elements that are found across all columns.
#'
#' @param df A dataframe in which some categorical variables are stored
#' @param col_names The column labels that refer to categorical variables
#' used to fit the model
#'
#' @return count
#' @export
#'
#' @examples
#' col_names <- c("cat_var_1","cat_var_2")
#'
#' df <- tibble::tribble(
#' ~col_1, ~cat_var_1, ~cat_var_2,
#' "text", "unique_val_1", "unique_val_1",
#' "text", "unique_val_2", "unique_val_1",
#' "text", "unique_val_1", "unique_val_3",
#' "text", "unique_val_1", "unique_val_4",
#' )
#'
#' get_nr(df,col_names)
get_nr <- function(df,roi_names){
purrr::map(roi_names, ~ as.character(df[.x][[1]])) %>%
purrr::flatten_chr() %>%
dplyr::n_distinct()
}
run_mba <- function(dataTable,model,chains,iterations){
set.seed(1234)
if(model==1) {
modelForm <- stats::as.formula(paste("Y ~ 1 + (1|Subj) + (1|ROI1:ROI2) +
(1|mm(ROI1, ROI2, weights = cbind(w, w), scale=FALSE)) +
(1|mm(ROI1:Subj, ROI2:Subj, weights = cbind(w, w), scale=FALSE))"))
}else{
modelForm <- stats::as.formula(paste("Y~", model, "+(1|Subj)+(", model, "|ROI1:ROI2)+(",
model, "|mm(ROI1, ROI2, weights = cbind(w, w), scale=FALSE))"))
}
if(model==1){
fm <- brm(modelForm, data=dataTable, chains = chains,
iter=iterations, control = list(adapt_delta = 0.99, max_treedepth = 15))
}else{
fm <- brm(modelForm, data=dataTable,
prior=c(prior(normal(0, 1), class = "Intercept"),prior(normal(0, 0.5), class = "sd")),
chains = chains, iter=iterations, control = list(adapt_delta = 0.99, max_treedepth = 15))
fm
}
}
log_setup_info <- function(dataTable,outFN,ROI1,ROI2=NULL){
nR <- get_nr(dataTable,c(ROI1,ROI2))
cat("===== Summary of variable information =====", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
cat(sprintf("Total number of ROIs: %i", nR),
file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
cat(sprintf("Response variable Y - mean: %f; SD: %f", mean(dataTable$Y), stats::sd(dataTable$Y)),
file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
outDF(summary(dataTable$Y), outFN)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
cat("Data structure:", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
outDF(utils::str(dataTable), outFN)
cat("Subjects:", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
outDF(summary(dataTable$Subj), outFN)
cat("ROIs:", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
outDF(summary(dataTable[ROI1][[1]]), outFN)
if (!is.null(ROI2)) outDF(summary(dataTable[ROI2][[1]]), outFN)
cat("\n", file = paste0(outFN, ".txt"), sep = "\n", append=TRUE)
}
# write data.frame to a file
outDF <- function(DF, fl) cat(utils::capture.output(DF), file = paste0(fl, '.txt'), sep = '\n', append=TRUE)
# Fisher transformation
fisher <- function(r) ifelse(abs(r) < .995, 0.5*(log(1+r)-log(1-r)), stop('Are you sure that you have correlation values so close to 1 or -1?'))
# compute P+
cnt <- function(x, ns) return(sum(x>0)/ns)
# extract region-pair posterior samples for an effect 'tm'
ww <- function(aa, bb, tm, nR,ns) {
ps0 <- array(apply(bb[['mmROI1ROI2']][,,tm], 2, "+", bb[['mmROI1ROI2']][,,tm]), c(ns, nR, nR))
ps <- apply(ps0, c(2,3), '+', aa[,tm])
dimnames(ps) <- list(1:ns, dimnames(bb$mmROI1ROI2)[[2]], dimnames(bb$mmROI1ROI2)[[2]])
tmp <- ps
sel1 <- match(dimnames(bb$`ROI1:ROI2`)[[2]], outer(dimnames(ps)[[2]],dimnames(ps)[[3]], function(x,y) paste(x,y,sep="_")))
sel2 <- match(dimnames(bb$`ROI1:ROI2`)[[2]], outer(dimnames(ps)[[2]],dimnames(ps)[[3]], function(x,y) paste(y,x,sep="_")))
ad <- function(tt,bb,s1,s2) {tt[s1] <- tt[s1] + bb; tt[s2] <- tt[s2] + bb; return(tt)}
for(ii in 1:ns) tmp[ii,,] <- ad(tmp[ii,,], bb$`ROI1:ROI2`[ii,,tm], sel1, sel2)
ps <- tmp
return(ps)
}
# ps <- ww(aa, bb, 'Intercept', nR)
# obtain summary informatin of posterior samples for RPs
vv <- function(ps, ns, nR) {
mm <- apply(ps, c(2,3), mean,ns)
for(ii in 1:nR) for(jj in 1:nR) ps[,ii,jj] <- sqrt(2)*(ps[,ii,jj] - mm[ii,jj]) + mm[ii,jj]
RP <- array(NA, dim=c(nR, nR, 8))
RP[,,1] <- apply(ps, c(2,3), mean)
RP[,,2] <- apply(ps, c(2,3), stats::sd)
RP[,,3] <- apply(ps, c(2,3), cnt, ns)
RP[,,4:8] <- aperm(apply(ps, c(2,3), stats::quantile, probs=c(0.025, 0.05, 0.5, 0.95, 0.975)), dim=c(2,3,1))
dimnames(RP)[[1]] <- dimnames(ps)[[2]]
dimnames(RP)[[2]] <- dimnames(ps)[[3]]
dimnames(RP)[[3]] <- c('mean', 'SD', 'P+', '2.5%', '5%', '50%', '95%', '97.5%')
return(RP)
}
# full region pair result without thresholding
#xx <- vv(ww(aa, bb, 'Intercept', nR), ns, nR)
#subset(xx[,,c(1,8)], xx[,,'P+'] >= 0.975 | xx[,,'P+'] <= 0.025)
# graded thresholding
res <- function(bb, xx, pp, nd) {
RP <- which(xx[,,'P+'] >= 1-pp | xx[,,'P+'] <= pp, arr.ind = T)
RP <- RP[RP[,1] < RP[,2],]
tmp <- data.frame(ROI1=factor(), ROI2=factor(), mean=factor(), SD=factor(), `P+`=factor(), check.names = FALSE)
if(length(RP) > 2) {
tmp <- cbind(dimnames(bb$mmROI1ROI2)[[2]][RP[,1]], dimnames(bb$mmROI1ROI2)[[2]][RP[,2]],
round(t(mapply(function(i, j) xx[i, j, 1:3], RP[,1], RP[,2])), nd))
colnames(tmp)[1:2] <- c('ROI1', 'ROI2')
tmp <- data.frame(tmp, row.names = NULL, check.names = FALSE) } else
if(length(RP)==2) {
tmp <- c(dimnames(bb$mmROI1ROI2)[[2]][RP[1]], dimnames(bb$mmROI1ROI2)[[2]][RP[2]], round(xx[RP[1], RP[2], 1:3],3))
#tmp <- paste(RP[1], RP[2], round(xx[RP[1], RP[2], 1:3], nd))
#names(tmp)[1:2] <- c('ROI1', 'ROI2')
tmp <- data.frame(t(tmp), row.names = NULL, check.names = FALSE)
}
return(tmp)
}
# standardize the output
prnt <- function(pct, side, dat, fl, entity) {
cat(sprintf('***** %i %s based on %i-sided %i uncertainty interval *****',
nrow(dat), entity, side, pct), file = paste0(fl, '.txt'), sep = '\n', append=TRUE)
if(nrow(dat) > 0) cat(utils::capture.output(dat), file = paste0(fl, '.txt'), sep = '\n', append=TRUE) else
cat('NULL', file = paste0(fl, '.txt'), sep = '\n', append=TRUE)
}
# matrix plot for RPs: assuming no diagonals for now
addTrans <- function(color,trans)
{
# This function adds transparancy to a color.
# Define transparancy with an integer between 0 and 255
# 0 being fully transparant and 255 being fully visable
# Works with either color and trans a vector of equal length,
# or one of the two of length 1.
if (length(color)!=length(trans)&!any(c(length(color),length(trans))==1)) stop("Vector lengths not correct")
if (length(color)==1 & length(trans)>1) color <- rep(color,length(trans))
if (length(trans)==1 & length(color)>1) trans <- rep(trans,length(color))
num2hex <- function(x)
{
hex <- unlist(strsplit("0123456789ABCDEF",split=""))
return(paste(hex[(x-x%%16)/16+1],hex[x%%16+1],sep=""))
}
rgb <- rbind(grDevices::col2rgb(color),trans)
res <- paste("#",apply(apply(rgb,2,num2hex),2,paste,collapse=""),sep="")
return(res)
}
mPlot <- function(xx, fn) {
mm <- xx[,,6] # median
pp <- xx[,,3] # P+
BC1 <- ((pp >= 0.975 ) | (pp <= 0.025)) # background color
BC <- ((pp >= 0.95 ) | (pp <= 0.05)) # background color
BC2 <- (((pp > 0.9) & (pp < 0.95)) | ((pp < 0.1) & (pp > 0.05)))
BC[BC == T] <- addTrans('yellow',150)
BC[BC1 == T] <- addTrans('green',175)
BC[BC == F] <- "white"
BC[BC2 == T] <- addTrans('gray',125)
#BC[BC == T] <- "blue"
#BC[BC1 == T] <- "green"
#BC[BC == F] <- "white"
#BC[BC2 == T] <- 'yellow'
rng <- range(mm)
diag(mm) <- NA # diagonals are meaningful in the case of correlation matrix
diag(BC) <- "white" # if the diagonal values shall be white
ii <- !kronecker(diag(1, nrow(BC)), matrix(1, ncol=1, nrow=1))
BC <- matrix(BC[ii], ncol = ncol(BC)-1)
col2 <- grDevices::colorRampPalette(c("#67001F", "#B2182B", "#D6604D", "#F4A582",
"#FDDBC7", "#FFFFFF", "#D1E5F0", "#92C5DE",
"#4393C3", "#2166AC", "#053061"))
grDevices::pdf(paste0(fn, ".pdf"), width=8, height=8)
corrplot::corrplot(mm, method="circle", type = "full", is.corr = FALSE, bg=BC, tl.pos='lt', tl.col='black', col=rev(col2(200)), cl.pos='r', na.label = "square", na.label.col='white')
grDevices::dev.off()
}
sumROI <- function(R0, ns, nd) {
hubs <- data.frame(cbind(apply(R0, 2, mean), apply(R0, 2, stats::sd), apply(R0, 2, cnt, ns), t(apply(R0, 2, stats::quantile,
probs=c(0.025, 0.05, 0.5, 0.95, 0.975)))))
names(hubs) <- c('mean', 'SD', 'P+', '2.5%', '5%', '50%', '95%', '97.5%')
return(round(hubs,nd))
}
psROI <- function(aa, bb, tm, nR) {
R0 <- apply(bb$mmROI1ROI2[,,tm], 2, '+', 0.5*aa[,tm])
for(jj in 1:nR) {
mm <- stats::quantile(R0[,jj], probs=.5)
R0[,jj] <- sqrt(2)*(R0[,jj] - mm)+mm
}
return(R0)
}
first.in.path <- function(file) {
ff <- paste(strsplit(Sys.getenv('PATH'),':')[[1]],'/', file, sep='')
ff<-ff[lapply(ff,file.exists)==TRUE];
#cat('Using ', ff[1],'\n');
return(gsub('//','/',ff[1], fixed=TRUE))
}
pprefix.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
return(an$pprefix);
}
prefix.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
return(an$prefix);
}
view.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
return(an$view);
}
pv.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
return(paste(an$pprefix,an$view,sep=''));
}
head.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
if (an$type == 'BRIK' && !is.na(an$view)) {
return(paste(an$pprefix,an$view,".HEAD",sep=''));
} else {
return((an$orig_name));
}
}
brik.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
if (an$type == 'BRIK' && !is.na(an$view)) {
return(paste(an$pprefix,an$view,".BRIK",sep=''));
} else {
return((an$orig_name));
}
}
compressed.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
if (length(grep('\\.gz$', an$ext))) {
return('gz')
} else if (length(grep('\\.bz2$', an$ext))) {
return('bz2')
} else if (length(grep('\\.Z$', an$ext))) {
return('Z')
} else {
return('')
}
}
modify.AFNI.name <- function (name, what="append", val="_new", cwd=NULL) {
if (!is.loaded('R_SUMA_ParseModifyName')) {
err.AFNI("Missing R_io.so");
return(NULL);
}
an <- .Call("R_SUMA_ParseModifyName",
name = name,
what = what,
val = val,
cwd = cwd)
return(an)
}
parse.AFNI.name <- function(filename, verb = 0) {
if (filename == '-self_test') { #Secret testing flag
note.AFNI('Function running in test mode');
show.AFNI.name(parse.AFNI.name('DePath/hello.DePrefix', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc.', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc.HEAD', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc.BRIK.gz', verb))
show.AFNI.name(parse.AFNI.name('DePath/DePrefix+acpc.HEAD[23]', verb))
show.AFNI.name(
parse.AFNI.name('DePath/DePrefix+acpc.HEAD[DeLabel]{DeRow}', verb))
show.AFNI.name(
parse.AFNI.name('DePath/DePrefix+acpc[DeLabel]{DeRow}', verb))
show.AFNI.name(
parse.AFNI.name('DePath/DePrefix+acpc.[DeLabel]{DeRow}', verb))
return(NULL)
}
an <- list()
an$view <- NULL
an$pprefix <- NULL
an$brsel <- NULL;
an$rosel <- NULL;
an$rasel <- NULL;
an$insel <- NULL;
an$type <- NULL;
an$path <- NULL;
an$orig_name <- filename;
an$file <- NULL;
if (verb) { cat ('Parsing >>',filename,'<<\n', sep=''); }
if (!is.character(filename)) {
warning(paste('filename >>',
filename, '<< not a character string\n', sep=''),
immediate. = TRUE);
traceback();
return(NULL);
}
#Deal with special names:
if (length(grep("^1D:.*$",filename))) {
an$type = '1Ds'
return(an)
} else if (length(grep("^R:.*$",filename))) {
an$type = 'Rs'
return(an)
}
#Deal with selectors
n <- parse.AFNI.name.selectors(filename, verb)
filename <- n$name
an$file <- n$name
an$brsel <- n$brsel;
an$rosel <- n$rosel;
an$rasel <- n$rasel;
an$insel <- n$insel;
#Remove last dot if there
filename <- sub('\\.$','',filename)
#NIFTI?
n <- strip.extension(filename, c('.nii', '.nii.gz'), verb)
if (n$ext != '') {
an$ext <- n$ext
an$type <- 'NIFTI'
an$pprefix <- n$name_noext
} else {
#remove other extensions
n <- strip.extension(filename, c('.HEAD','.BRIK','.BRIK.gz',
'.BRIK.bz2','.BRIK.Z',
'.1D', '.1D.dset',
'.niml.dset',
'.' ),
verb)
if (n$ext == '.1D' || n$ext == '.1D.dset') {
an$type <- '1D'
} else if (n$ext == '.niml.dset') {
an$type <- 'NIML'
} else {
an$type <- 'BRIK'
}
if (n$ext == '.') {
n$ext <- ''
}
an$ext <- n$ext
filename <- n$name_noext
n <- strip.extension(filename, c('+orig','+tlrc','+acpc'), verb)
if (n$ext != '') {
an$view <- n$ext
} else {
an$view <- NA
}
an$pprefix <- n$name_noext
}
#a prefix with no path
an$prefix <- basename(an$pprefix)
#and the path
an$path <- dirname(an$orig_name)
if (verb > 2) {
note.AFNI("Browser not active");
# browser()
}
if ( an$type != '1D' && (
!is.null(an$brsel) || !is.null(an$rosel) ||
!is.null(an$rasel) || !is.null(an$insel))) {
#Remove trailing quote if any
an$prefix <- gsub("'$", '', an$prefix);
an$prefix <- gsub('"$', '', an$prefix);
an$pprefix <- gsub("'$",'', an$pprefix);
an$pprefix <- gsub('"$','', an$pprefix);
}
if ( an$type != 'BRIK' ) {
#Put the extension back on
an$pprefix <- paste(an$pprefix,an$ext, sep='');
an$prefix <- paste(an$prefix,an$ext, sep='');
}
return(an)
}
exists.AFNI.name <- function(an) {
if (is.character(an)) an <- parse.AFNI.name(an);
ans <- 0
if (file.exists(head.AFNI.name(an))) ans <- ans + 1;
if (file.exists(brik.AFNI.name(an)) ||
file.exists(paste(brik.AFNI.name(an),'.gz', sep='')) ||
file.exists(paste(brik.AFNI.name(an),'.Z', sep=''))) ans <- ans + 2;
return(ans);
}
AFNI.new.options.list <- function(history = '', parsed_args = NULL) {
lop <- list (com_history = history);
#Look for defaults
lop$overwrite <- FALSE
for (i in 1:length(parsed_args)) {
opname <- strsplit(names(parsed_args)[i],'^-')[[1]];
opname <- opname[length(opname)];
switch(opname,
overwrite = lop$overwrite <- TRUE )
}
return(lop)
}
parse.AFNI.name.selectors <- function(filename,verb=0) {
n <- list()
n$brsel<- NULL;
n$rosel<- NULL;
n$rasel<- NULL;
n$insel<- NULL;
selecs <- strsplit(filename,"\\[|\\{|<|#")[[1]];
n$name <- selecs[1]
for (ss in selecs[2:length(selecs)]) {
if (length(grep("]",ss))) {
n$brsel <- strsplit(ss,"\\]")[[1]][1];
} else if (length(grep("}",ss))) {
n$rosel <- strsplit(ss,"\\}")[[1]][1];
} else if (length(grep(">",ss))) {
n$rasel <- strsplit(ss,">")[[1]][1];
}
}
selecs <- strsplit(filename,"#")[[1]];
if (length(selecs) > 1) {
n$insel <- selecs[2]
}
return(n)
}
strip.extension <- function (filename, extvec=NULL, verb=0) {
n <- list()
if (is.null(extvec)) {
ff <- strsplit(filename, '\\.')[[1]]
if (length(ff) > 1) {
n$ext <- paste('.',ff[length(ff)], sep='')
n$name_noext <- paste(ff[1:length(ff)-1],collapse='.')
} else {
n$ext <- ''
n$name_noext <- filename
}
} else {
n$ext <- ''
n$name_noext <- filename
for (ex in extvec) {
patt <- paste('\\',ex,'$',collapse='', sep='')
if (length(grep(patt, filename))) {
n$ext <- ex
n$name_noext <- sub(patt,'',filename)
return(n)
}
}
}
return(n)
}
|
context("use_cassette: works as expected")
test_that("use_cassette works as expected", {
skip_on_cran()
library(crul)
mydir <- file.path(tempdir(), "asdfasdfsd")
invisible(vcr_configure(dir = mydir))
unlink(file.path(vcr_c$dir, "testing1.yml"))
aa <- use_cassette(name = "testing1", {
res <- crul::HttpClient$new("https://eu.httpbin.org/get")$get()
})
expect_is(aa, "Cassette")
expect_is(aa$name, "character")
expect_equal(aa$name, "testing1")
expect_false(aa$allow_playback_repeats)
# expect_true(aa$any_new_recorded_interactions()) # FIXME: uncomment w/ webmockr update
expect_is(aa$args, "list")
expect_is(aa$call_block, "function")
expect_is(res, "HttpResponse")
expect_is(res$content, "raw")
cas <- readLines(file.path(vcr_c$dir, "testing1.yml"))
expect_is(cas, "character")
# expect_gt(length(cas), 10) # FIXME: uncomment w/ webmockr update
# expect_true(any(grepl('http_interactions', cas))) # FIXME: uncomment w/ webmockr update
# expect_true(any(grepl('recorded_with', cas))) # FIXME: uncomment w/ webmockr update
})
context("use_cassette fails well")
test_that("use_cassette fails well", {
# requires a code block
unlink(file.path(vcr_c$dir, "foobar333.yml"))
expect_error(
suppressMessages(use_cassette("foobar333")),
"`vcr::use_cassette` requires a code block"
)
# must pass a cassette name
expect_error(use_cassette(), "argument \"name\" is missing")
# record valid values
expect_error(
suppressMessages(use_cassette("newbar", {}, record = "stuff")),
"'record' value of 'stuff' is not in the allowed set"
)
# match_requests_on valid values
expect_error(
suppressMessages(use_cassette("newbar", {}, match_requests_on = "stuff")),
"'match_requests_on' values \\(stuff\\) is not in the allowed set"
)
# update_content_length_header valid type
expect_error(
suppressMessages(use_cassette("newbar3", {}, update_content_length_header = 5)),
"update_content_length_header must be of class logical"
)
# preserve_exact_body_bytes valid type
expect_error(
suppressMessages(use_cassette("newbar4", {}, preserve_exact_body_bytes = 5)),
"preserve_exact_body_bytes must be of class logical"
)
# persist_with valid value
expect_error(
suppressMessages(use_cassette("newbar5", {}, persist_with = "jello")),
"The requested VCR cassette persister \\(jello\\) is not registered"
)
# persist_with valid value
expect_error(
suppressMessages(use_cassette("newbar6", {}, serialize_with = "howdy")),
"The requested VCR cassette serializer \\(howdy\\) is not registered"
)
})
# cleanup
unlink(list.files(pattern = "newbar", full.names = TRUE))
unlink("foobar333.yml")
unlink("testing1.yml")
# reset configuration
vcr_configure_reset()
| /data/genthat_extracted_code/vcr/tests/test-ause_cassette.R | no_license | surayaaramli/typeRrh | R | false | false | 2,789 | r | context("use_cassette: works as expected")
test_that("use_cassette works as expected", {
skip_on_cran()
library(crul)
mydir <- file.path(tempdir(), "asdfasdfsd")
invisible(vcr_configure(dir = mydir))
unlink(file.path(vcr_c$dir, "testing1.yml"))
aa <- use_cassette(name = "testing1", {
res <- crul::HttpClient$new("https://eu.httpbin.org/get")$get()
})
expect_is(aa, "Cassette")
expect_is(aa$name, "character")
expect_equal(aa$name, "testing1")
expect_false(aa$allow_playback_repeats)
# expect_true(aa$any_new_recorded_interactions()) # FIXME: uncomment w/ webmockr update
expect_is(aa$args, "list")
expect_is(aa$call_block, "function")
expect_is(res, "HttpResponse")
expect_is(res$content, "raw")
cas <- readLines(file.path(vcr_c$dir, "testing1.yml"))
expect_is(cas, "character")
# expect_gt(length(cas), 10) # FIXME: uncomment w/ webmockr update
# expect_true(any(grepl('http_interactions', cas))) # FIXME: uncomment w/ webmockr update
# expect_true(any(grepl('recorded_with', cas))) # FIXME: uncomment w/ webmockr update
})
context("use_cassette fails well")
test_that("use_cassette fails well", {
# requires a code block
unlink(file.path(vcr_c$dir, "foobar333.yml"))
expect_error(
suppressMessages(use_cassette("foobar333")),
"`vcr::use_cassette` requires a code block"
)
# must pass a cassette name
expect_error(use_cassette(), "argument \"name\" is missing")
# record valid values
expect_error(
suppressMessages(use_cassette("newbar", {}, record = "stuff")),
"'record' value of 'stuff' is not in the allowed set"
)
# match_requests_on valid values
expect_error(
suppressMessages(use_cassette("newbar", {}, match_requests_on = "stuff")),
"'match_requests_on' values \\(stuff\\) is not in the allowed set"
)
# update_content_length_header valid type
expect_error(
suppressMessages(use_cassette("newbar3", {}, update_content_length_header = 5)),
"update_content_length_header must be of class logical"
)
# preserve_exact_body_bytes valid type
expect_error(
suppressMessages(use_cassette("newbar4", {}, preserve_exact_body_bytes = 5)),
"preserve_exact_body_bytes must be of class logical"
)
# persist_with valid value
expect_error(
suppressMessages(use_cassette("newbar5", {}, persist_with = "jello")),
"The requested VCR cassette persister \\(jello\\) is not registered"
)
# persist_with valid value
expect_error(
suppressMessages(use_cassette("newbar6", {}, serialize_with = "howdy")),
"The requested VCR cassette serializer \\(howdy\\) is not registered"
)
})
# cleanup
unlink(list.files(pattern = "newbar", full.names = TRUE))
unlink("foobar333.yml")
unlink("testing1.yml")
# reset configuration
vcr_configure_reset()
|
#-----------------------------------------------------------------------------
## LOANDING DATA
airb <- read_csv("1.data/AB_NYC_2019.csv")
set.seed(1)
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.1 CHANGE TYPE IN VALUES AND REMOVING UNNECESSARY VARIABLES
airb <- airb %>%
mutate(neighbourhood_group = as.factor(neighbourhood_group)) %>%
mutate(room_type = as.factor(room_type))
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.2 NEW COLUMN
airb <- airb %>%
mutate(year_add = year(last_review),
day_add = day(last_review),
month_add = month(last_review))
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.3 NEW COLUMN
airb <- airb %>%
add_count(neighbourhood) %>%
rename(n_neighbourhood = n)
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.4 NEW COLUMN
airb <- airb %>% mutate(
price_category = as.factor(case_when(
price <= 69 ~ "cheap",
price >= 70 & price <= 106 ~ "regular price",
price >= 107 & price <= 175 ~ "expensive",
price >= 176 & price <= 2000 ~ "the most expensive",
price >= 2001 ~ "luxuary"
)))
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.5 NEW COLUMN
airb <- airb %>%
mutate(words_number_name = as.double(sapply(strsplit(name, " "), length)))
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.6 CREATE NEW NAME FOR COLUMNS
airb <- airb %>%
rename(
availability = availability_365,
min_nights = minimum_nights,
reviews_num = number_of_reviews,
reviews_month = reviews_per_month
) | /scripts/modeling/2variable_modifications.R | no_license | natalia-kozlowska/arbnb_raport_modeling | R | false | false | 1,894 | r |
#-----------------------------------------------------------------------------
## LOANDING DATA
airb <- read_csv("1.data/AB_NYC_2019.csv")
set.seed(1)
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.1 CHANGE TYPE IN VALUES AND REMOVING UNNECESSARY VARIABLES
airb <- airb %>%
mutate(neighbourhood_group = as.factor(neighbourhood_group)) %>%
mutate(room_type = as.factor(room_type))
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.2 NEW COLUMN
airb <- airb %>%
mutate(year_add = year(last_review),
day_add = day(last_review),
month_add = month(last_review))
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.3 NEW COLUMN
airb <- airb %>%
add_count(neighbourhood) %>%
rename(n_neighbourhood = n)
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.4 NEW COLUMN
airb <- airb %>% mutate(
price_category = as.factor(case_when(
price <= 69 ~ "cheap",
price >= 70 & price <= 106 ~ "regular price",
price >= 107 & price <= 175 ~ "expensive",
price >= 176 & price <= 2000 ~ "the most expensive",
price >= 2001 ~ "luxuary"
)))
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.5 NEW COLUMN
airb <- airb %>%
mutate(words_number_name = as.double(sapply(strsplit(name, " "), length)))
#-----------------------------------------------------------------------------
## 1. VARIABLE MODIFICATION
### 1.6 CREATE NEW NAME FOR COLUMNS
airb <- airb %>%
rename(
availability = availability_365,
min_nights = minimum_nights,
reviews_num = number_of_reviews,
reviews_month = reviews_per_month
) |
\alias{pango-Bidirectional-Text}
\alias{PangoDirection}
\alias{PangoBidiType}
\name{pango-Bidirectional-Text}
\title{Bidirectional Text}
\description{Types and functions to help with handling bidirectional text}
\section{Methods and Functions}{
\code{\link{pangoUnicharDirection}(ch)}\cr
\code{\link{pangoFindBaseDir}(text, length = -1)}\cr
\code{\link{pangoGetMirrorChar}(ch)}\cr
\code{\link{pangoBidiTypeForUnichar}(ch)}\cr
}
\section{Detailed Description}{Pango supports bidirectional text (like Arabic and Hebrew) automatically.
Some applications however, need some help to correctly handle bidirectional
text.
The \code{\link{PangoDirection}} type can be used with \code{\link{pangoContextSetBaseDir}} to
instruct Pango about direction of text, though in most cases Pango detects
that correctly and automatically. The rest of the facilities in this section
are used internally by Pango already, and are provided to help applications
that need more direct control over bidirectional setting of text.}
\section{Enums and Flags}{\describe{
\item{\verb{PangoDirection}}{
The \code{\link{PangoDirection}} type represents a direction in the
Unicode bidirectional algorithm; not every value in this
enumeration makes sense for every usage of \code{\link{PangoDirection}};
for example, the return value of \code{\link{pangoUnicharDirection}}
and \code{\link{pangoFindBaseDir}} cannot be \code{PANGO_DIRECTION_WEAK_LTR}
or \code{PANGO_DIRECTION_WEAK_RTL}, since every character is either
neutral or has a strong direction; on the other hand
\code{PANGO_DIRECTION_NEUTRAL} doesn't make sense to pass
to \code{\link{pangoItemizeWithBaseDir}}.
The \code{PANGO_DIRECTION_TTB_LTR}, \code{PANGO_DIRECTION_TTB_RTL}
values come from an earlier interpretation of this
enumeration as the writing direction of a block of
text and are no longer used; See \code{\link{PangoGravity}} for how
vertical text is handled in Pango.
\describe{
\item{\verb{ltr}}{ A strong left-to-right direction}
\item{\verb{rtl}}{ A strong right-to-left direction}
\item{\verb{ttb-ltr}}{ Deprecated value; treated the
same as \code{PANGO_DIRECTION_RTL}.}
\item{\verb{ttb-rtl}}{ Deprecated value; treated the
same as \code{PANGO_DIRECTION_LTR}}
}
}
\item{\verb{PangoBidiType}}{
The \code{\link{PangoBidiType}} type represents the bidirectional character
type of a Unicode character as specified by the
Unicode bidirectional algorithm (\url{http://www.unicode.org/reports/tr9/}).
Since 1.22
\describe{
\item{\verb{l}}{ Left-to-Right}
\item{\verb{lre}}{ Left-to-Right Embedding}
\item{\verb{lro}}{ Left-to-Right Override}
\item{\verb{r}}{ Right-to-Left}
\item{\verb{al}}{ Right-to-Left Arabic}
\item{\verb{rle}}{ Right-to-Left Embedding}
\item{\verb{rlo}}{ Right-to-Left Override}
\item{\verb{pdf}}{ Pop Directional Format}
\item{\verb{en}}{ European Number}
\item{\verb{es}}{ European Number Separator}
\item{\verb{et}}{ European Number Terminator}
\item{\verb{an}}{ Arabic Number}
\item{\verb{cs}}{ Common Number Separator}
\item{\verb{nsm}}{ Nonspacing Mark}
\item{\verb{bn}}{ Boundary Neutral}
\item{\verb{b}}{ Paragraph Separator}
\item{\verb{s}}{ Segment Separator}
\item{\verb{ws}}{ Whitespace}
\item{\verb{on}}{ Other Neutrals}
}
}
}}
\references{\url{http://library.gnome.org/devel//pango/pango-Bidirectional-Text.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/pango-Bidirectional-Text.Rd | no_license | hjy1210/RGtk2 | R | false | false | 3,380 | rd | \alias{pango-Bidirectional-Text}
\alias{PangoDirection}
\alias{PangoBidiType}
\name{pango-Bidirectional-Text}
\title{Bidirectional Text}
\description{Types and functions to help with handling bidirectional text}
\section{Methods and Functions}{
\code{\link{pangoUnicharDirection}(ch)}\cr
\code{\link{pangoFindBaseDir}(text, length = -1)}\cr
\code{\link{pangoGetMirrorChar}(ch)}\cr
\code{\link{pangoBidiTypeForUnichar}(ch)}\cr
}
\section{Detailed Description}{Pango supports bidirectional text (like Arabic and Hebrew) automatically.
Some applications however, need some help to correctly handle bidirectional
text.
The \code{\link{PangoDirection}} type can be used with \code{\link{pangoContextSetBaseDir}} to
instruct Pango about direction of text, though in most cases Pango detects
that correctly and automatically. The rest of the facilities in this section
are used internally by Pango already, and are provided to help applications
that need more direct control over bidirectional setting of text.}
\section{Enums and Flags}{\describe{
\item{\verb{PangoDirection}}{
The \code{\link{PangoDirection}} type represents a direction in the
Unicode bidirectional algorithm; not every value in this
enumeration makes sense for every usage of \code{\link{PangoDirection}};
for example, the return value of \code{\link{pangoUnicharDirection}}
and \code{\link{pangoFindBaseDir}} cannot be \code{PANGO_DIRECTION_WEAK_LTR}
or \code{PANGO_DIRECTION_WEAK_RTL}, since every character is either
neutral or has a strong direction; on the other hand
\code{PANGO_DIRECTION_NEUTRAL} doesn't make sense to pass
to \code{\link{pangoItemizeWithBaseDir}}.
The \code{PANGO_DIRECTION_TTB_LTR}, \code{PANGO_DIRECTION_TTB_RTL}
values come from an earlier interpretation of this
enumeration as the writing direction of a block of
text and are no longer used; See \code{\link{PangoGravity}} for how
vertical text is handled in Pango.
\describe{
\item{\verb{ltr}}{ A strong left-to-right direction}
\item{\verb{rtl}}{ A strong right-to-left direction}
\item{\verb{ttb-ltr}}{ Deprecated value; treated the
same as \code{PANGO_DIRECTION_RTL}.}
\item{\verb{ttb-rtl}}{ Deprecated value; treated the
same as \code{PANGO_DIRECTION_LTR}}
}
}
\item{\verb{PangoBidiType}}{
The \code{\link{PangoBidiType}} type represents the bidirectional character
type of a Unicode character as specified by the
Unicode bidirectional algorithm (\url{http://www.unicode.org/reports/tr9/}).
Since 1.22
\describe{
\item{\verb{l}}{ Left-to-Right}
\item{\verb{lre}}{ Left-to-Right Embedding}
\item{\verb{lro}}{ Left-to-Right Override}
\item{\verb{r}}{ Right-to-Left}
\item{\verb{al}}{ Right-to-Left Arabic}
\item{\verb{rle}}{ Right-to-Left Embedding}
\item{\verb{rlo}}{ Right-to-Left Override}
\item{\verb{pdf}}{ Pop Directional Format}
\item{\verb{en}}{ European Number}
\item{\verb{es}}{ European Number Separator}
\item{\verb{et}}{ European Number Terminator}
\item{\verb{an}}{ Arabic Number}
\item{\verb{cs}}{ Common Number Separator}
\item{\verb{nsm}}{ Nonspacing Mark}
\item{\verb{bn}}{ Boundary Neutral}
\item{\verb{b}}{ Paragraph Separator}
\item{\verb{s}}{ Segment Separator}
\item{\verb{ws}}{ Whitespace}
\item{\verb{on}}{ Other Neutrals}
}
}
}}
\references{\url{http://library.gnome.org/devel//pango/pango-Bidirectional-Text.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
setwd('~/docking/covid19-docking/')
require(data.table)
require(parallel)
source('scripts/dock_helpers.r')
df = fread('quercetin/smiles.txt')
mclapply(1:nrow(df), function(i){
name = df$V1[i]
smiles = df$V2[i]
path = generate_ligand_pdbqt(smiles, name, out_dir = 'quercetin/pdbqt')
if(is.na(path)){
unlink(sprintf('nano_drugbank/pdbqt/%s.pdbqt', name))
unlink(sprintf('nano_drugbank/docked/docked_%s.pdbqt', name))
unlink(sprintf('nano_drugbank/logs/%s.pdbqt.log', name))
return(NULL)
}
return(NULL)
fout = sprintf('quercetin/docked/docked_%s', basename(path))
if(file.exists(fout)){
message('-- skipping ', name)
return(NULL)
}
mwt = get_mwt(path)
if(!is.na(mwt) | mwt > 500){
message('-- too large')
return(NULL)
}
run_docking( path, exhaustiveness = 10, cores = 1, active_site = T,
dock_dir = 'quercetin/docked', log_dir = 'quercetin/logs' )
}, mc.cores = 40)
| /quercetin/run_docking.r | no_license | Ridhanya/covid19-docking | R | false | false | 966 | r | setwd('~/docking/covid19-docking/')
require(data.table)
require(parallel)
source('scripts/dock_helpers.r')
df = fread('quercetin/smiles.txt')
mclapply(1:nrow(df), function(i){
name = df$V1[i]
smiles = df$V2[i]
path = generate_ligand_pdbqt(smiles, name, out_dir = 'quercetin/pdbqt')
if(is.na(path)){
unlink(sprintf('nano_drugbank/pdbqt/%s.pdbqt', name))
unlink(sprintf('nano_drugbank/docked/docked_%s.pdbqt', name))
unlink(sprintf('nano_drugbank/logs/%s.pdbqt.log', name))
return(NULL)
}
return(NULL)
fout = sprintf('quercetin/docked/docked_%s', basename(path))
if(file.exists(fout)){
message('-- skipping ', name)
return(NULL)
}
mwt = get_mwt(path)
if(!is.na(mwt) | mwt > 500){
message('-- too large')
return(NULL)
}
run_docking( path, exhaustiveness = 10, cores = 1, active_site = T,
dock_dir = 'quercetin/docked', log_dir = 'quercetin/logs' )
}, mc.cores = 40)
|
# Linear Programming in R
library(lpSolve)
solve <- function(obj,constr,constr_dir,rhs){
prod.sol = lp(direction = "max",objective.in = obj.fun,const.mat = constr,const.dir = constr_dir,const.rhs = rhs,
compute.sens = TRUE)
}
obj.fun <- c(20,60)
constr <- matrix(c(30,20,5,10,1,1),ncol = 2,byrow = TRUE)
constr_dir <- c("<=","<=",">=")
rhs <- c(2700,850,95)
# solving model
prod.sol = lp(direction = "max",objective.in = obj.fun,const.mat = constr,const.dir = constr_dir,const.rhs = rhs,
compute.sens = TRUE)
# Accessing R output
prod.sol$objval #objective function value
prod.sol$solution #decision variables values
prod.sol$duals #Includes dual of constraints cost variables | /Lectures/Review Phase/Block 4/Operations Research/Lab/03122020/lpPlayground.R | no_license | yusufbrima/AIMS | R | false | false | 730 | r | # Linear Programming in R
library(lpSolve)
solve <- function(obj,constr,constr_dir,rhs){
prod.sol = lp(direction = "max",objective.in = obj.fun,const.mat = constr,const.dir = constr_dir,const.rhs = rhs,
compute.sens = TRUE)
}
obj.fun <- c(20,60)
constr <- matrix(c(30,20,5,10,1,1),ncol = 2,byrow = TRUE)
constr_dir <- c("<=","<=",">=")
rhs <- c(2700,850,95)
# solving model
prod.sol = lp(direction = "max",objective.in = obj.fun,const.mat = constr,const.dir = constr_dir,const.rhs = rhs,
compute.sens = TRUE)
# Accessing R output
prod.sol$objval #objective function value
prod.sol$solution #decision variables values
prod.sol$duals #Includes dual of constraints cost variables |
library(quantmod)
library(dplyr)
library(PerformanceAnalytics)
library(IntroCompFinR)
library(readxl)
# Set Workspace
rm(list = ls())
# Part 1
periodicity = "weekly"
from = "2018-01-01"
to = "2018-12-31"
ticker = "KIMBERA.MX"
try(getSymbols(ticker,
from = from, to = to,
periodicity = periodicity,
src = "yahoo"))
objList <- lapply(ticker, get)
prices.zoo <- do.call(merge, objList)
rm(objList)
prices.df <- as.data.frame(prices.zoo) %>%
na.omit() %>%
select(contains("Adjusted"))
returns.weekly.zoo <- diff(log(as.zoo(prices.df)))
returns.weekly.df = as.data.frame(returns.weekly.zoo)
# Risk Free
riskfree = "INTGSTMXM193N"
try(getSymbols(riskfree,
src = "FRED",
periodicity = "weekly"
))
rfrate.obj = lapply(riskfree, get)
rfrate.zoo <- do.call(merge, rfrate.obj)
rm(rfrate.obj)
rfrate.df <- rfrate.zoo[index(rfrate.zoo) >= as.Date("2018-01-01") & index(rfrate.zoo) <= as.Date("2018-12-01")]
risk_free_rate <- exp(mean(log(rfrate.df / 100))) # geometric mean of annualized risk free rate
# Multiperiod binomial model
weekly_sd <- sd(returns.weekly.zoo)
stock_price <- prices.df[nrow(prices.df), ]
multiperiodBinomialFunction <- function(periods, iterations, stock_price, weekly_sd_cc, risk_free, strike_price, type = "call") {
u <- exp(weekly_sd_cc)
d <- 1 / u
r <- (1 + risk_free)
q <- (r - d) / (u - d)
S <- rep(stock_price, iterations)
for(i in 1:iterations) {
for(rb in rbinom(periods, 1, q)) {
S[i] <- S[i] * ifelse(rb == 1, u, d)
}
}
if(type == "call") {
call_values <- S - strike_price
} else {
call_values <- strike_price - S
}
call_values[call_values < 0] <- 0
mean_call_value <- mean(call_values)
return(mean_call_value / (r ** periods))
}
strike_price = 40
multiperiodBinomialFunction(52, 10000, stock_price, weekly_sd, risk_free_rate/52, strike_price, "call")
multiperiodBinomialFunction(52, 10000, stock_price, weekly_sd, risk_free_rate/52, strike_price, "put")
# Black and Sholes model
# S = stock price
# K = strike price
# r = annual risk-free rate
# t = time to expiration date (meassured in years or fraction of years)
# sd = standard deviation (annualized) of the stock continuously compounded return
# N(z) = Cumulative density function of the standard normal probatility function (mean=0, standard deviation=1); it is the probability to get a Z equal or less than z.
annual_sd <- weekly_sd * sqrt(52)
stock_price <- prices.df[nrow(prices.df), ]
strike_price = 40
black_sholes_model_call = function(S, K, r, t, sd) {
d1 = (log(S / K) + ((r + ((sd ** 2)/2) ) * t)) / (sd * sqrt(t))
d2 = d1 - (sd * sqrt(t))
C = S * pnorm(d1) - K * exp(-1 * r * t) * pnorm(d2)
return(C)
}
call = black_sholes_model_call(stock_price, strike_price, risk_free_rate, 1, annual_sd)
black_sholes_model_put = function(S, K, r, t, sd) {
d1 = (log(S / K) + ((r + ((sd ** 2)/2) ) * t)) / (sd * sqrt(t))
d2 = d1 - (sd * sqrt(t))
P = K * exp(-1 * r * t) * pnorm(-d2) - S * pnorm(-d1)
return(P)
}
put = black_sholes_model_put(stock_price, strike_price, risk_free_rate, 1, annual_sd)
| /part1.R | no_license | crcz25/prografinanciera | R | false | false | 3,196 | r |
library(quantmod)
library(dplyr)
library(PerformanceAnalytics)
library(IntroCompFinR)
library(readxl)
# Set Workspace
rm(list = ls())
# Part 1
periodicity = "weekly"
from = "2018-01-01"
to = "2018-12-31"
ticker = "KIMBERA.MX"
try(getSymbols(ticker,
from = from, to = to,
periodicity = periodicity,
src = "yahoo"))
objList <- lapply(ticker, get)
prices.zoo <- do.call(merge, objList)
rm(objList)
prices.df <- as.data.frame(prices.zoo) %>%
na.omit() %>%
select(contains("Adjusted"))
returns.weekly.zoo <- diff(log(as.zoo(prices.df)))
returns.weekly.df = as.data.frame(returns.weekly.zoo)
# Risk Free
riskfree = "INTGSTMXM193N"
try(getSymbols(riskfree,
src = "FRED",
periodicity = "weekly"
))
rfrate.obj = lapply(riskfree, get)
rfrate.zoo <- do.call(merge, rfrate.obj)
rm(rfrate.obj)
rfrate.df <- rfrate.zoo[index(rfrate.zoo) >= as.Date("2018-01-01") & index(rfrate.zoo) <= as.Date("2018-12-01")]
risk_free_rate <- exp(mean(log(rfrate.df / 100))) # geometric mean of annualized risk free rate
# Multiperiod binomial model
weekly_sd <- sd(returns.weekly.zoo)
stock_price <- prices.df[nrow(prices.df), ]
multiperiodBinomialFunction <- function(periods, iterations, stock_price, weekly_sd_cc, risk_free, strike_price, type = "call") {
u <- exp(weekly_sd_cc)
d <- 1 / u
r <- (1 + risk_free)
q <- (r - d) / (u - d)
S <- rep(stock_price, iterations)
for(i in 1:iterations) {
for(rb in rbinom(periods, 1, q)) {
S[i] <- S[i] * ifelse(rb == 1, u, d)
}
}
if(type == "call") {
call_values <- S - strike_price
} else {
call_values <- strike_price - S
}
call_values[call_values < 0] <- 0
mean_call_value <- mean(call_values)
return(mean_call_value / (r ** periods))
}
strike_price = 40
multiperiodBinomialFunction(52, 10000, stock_price, weekly_sd, risk_free_rate/52, strike_price, "call")
multiperiodBinomialFunction(52, 10000, stock_price, weekly_sd, risk_free_rate/52, strike_price, "put")
# Black and Sholes model
# S = stock price
# K = strike price
# r = annual risk-free rate
# t = time to expiration date (meassured in years or fraction of years)
# sd = standard deviation (annualized) of the stock continuously compounded return
# N(z) = Cumulative density function of the standard normal probatility function (mean=0, standard deviation=1); it is the probability to get a Z equal or less than z.
annual_sd <- weekly_sd * sqrt(52)
stock_price <- prices.df[nrow(prices.df), ]
strike_price = 40
black_sholes_model_call = function(S, K, r, t, sd) {
d1 = (log(S / K) + ((r + ((sd ** 2)/2) ) * t)) / (sd * sqrt(t))
d2 = d1 - (sd * sqrt(t))
C = S * pnorm(d1) - K * exp(-1 * r * t) * pnorm(d2)
return(C)
}
call = black_sholes_model_call(stock_price, strike_price, risk_free_rate, 1, annual_sd)
black_sholes_model_put = function(S, K, r, t, sd) {
d1 = (log(S / K) + ((r + ((sd ** 2)/2) ) * t)) / (sd * sqrt(t))
d2 = d1 - (sd * sqrt(t))
P = K * exp(-1 * r * t) * pnorm(-d2) - S * pnorm(-d1)
return(P)
}
put = black_sholes_model_put(stock_price, strike_price, risk_free_rate, 1, annual_sd)
|
library(mschart)
### Name: chart_data_line_width
### Title: Modify line width
### Aliases: chart_data_line_width
### ** Examples
my_scatter <- ms_scatterchart(data = iris, x = "Sepal.Length",
y = "Sepal.Width", group = "Species")
my_scatter <- chart_settings(my_scatter, scatterstyle = "lineMarker")
my_scatter <- chart_data_fill(my_scatter,
values = c(virginica = "#6FA2FF", versicolor = "#FF6161", setosa = "#81FF5B") )
my_scatter <- chart_data_stroke(my_scatter,
values = c(virginica = "black", versicolor = "black", setosa = "black") )
my_scatter <- chart_data_symbol(my_scatter,
values = c(virginica = "circle", versicolor = "diamond", setosa = "circle") )
my_scatter <- chart_data_size(my_scatter,
values = c(virginica = 20, versicolor = 16, setosa = 20) )
my_scatter <- chart_data_line_width(my_scatter,
values = c(virginica = 2, versicolor = 3, setosa = 6) )
| /data/genthat_extracted_code/mschart/examples/chart_data_line_width.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 887 | r | library(mschart)
### Name: chart_data_line_width
### Title: Modify line width
### Aliases: chart_data_line_width
### ** Examples
my_scatter <- ms_scatterchart(data = iris, x = "Sepal.Length",
y = "Sepal.Width", group = "Species")
my_scatter <- chart_settings(my_scatter, scatterstyle = "lineMarker")
my_scatter <- chart_data_fill(my_scatter,
values = c(virginica = "#6FA2FF", versicolor = "#FF6161", setosa = "#81FF5B") )
my_scatter <- chart_data_stroke(my_scatter,
values = c(virginica = "black", versicolor = "black", setosa = "black") )
my_scatter <- chart_data_symbol(my_scatter,
values = c(virginica = "circle", versicolor = "diamond", setosa = "circle") )
my_scatter <- chart_data_size(my_scatter,
values = c(virginica = 20, versicolor = 16, setosa = 20) )
my_scatter <- chart_data_line_width(my_scatter,
values = c(virginica = 2, versicolor = 3, setosa = 6) )
|
##### Load libraries
library(gdsfmt)
library(SNPRelate)
library(ggplot2)
library(RColorBrewer)
##### Set working directory?
todaysdate=format(Sys.Date(),format="%Y%m%d")
calldate=20200410
setwd("/u/home/d/dechavez/project-rwayne/Clup/SNPRelate")
plotoutdir=paste("/u/home/d/dechavez/project-rwayne/Clup/SNPRelate",calldate,"/PCA/",sep="")
dir.create(plotoutdir,recursive = T)
##### Specify VCF filename
vcf.fn <- "NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs.vcf"
#vcf.fn <- "NA_CLup_joint_chr38_TrimAlt_Annot_Mask_Filter.vcf"
##### Convert VCF to GDS format
snpgdsVCF2GDS(vcf.fn, "NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs.gds", method="biallelic.only")
##### Specify which individuals to keep
# sample.list=c("ALG1","CL025","CL055","CL061","CL062","CL065",
# "CL067","CL075","CL141","CL152","CL175","CL189","Clup1185",
# "Clup1694","Clup2491","Clup4267","Clup5161","Clup5558","Clup6338",
# "Clup6459","ClupRKW3624","ClupRKW3637","ClupRKW7526","Clup_SRR8049197",
# "Cruf_SRR8049200","MEX1","RED1","RKW119","RKW2455","RKW2515","RKW2518",
# "RKW2523","RKW2524","RKW7619","RKW7639","RKW7640","RKW7649","SRR7976407_Algoquin",
# "SRR7976417_red","SRR7976421_570M_YNP","SRR7976422_569F_YNP","SRR7976423_302M_YNP",
# "SRR7976425_I450_97_IRNP","SRR7976431_Mexican_NewM","SRR7976432_Minesota","YNP2","YNP3")
######## Exclude low coverage genomes <6x SRR7976425(IRNP_BV),SRR7976407(Algoquin_BV),SRR8049200(MexicWolf_TG)
sample.list=c("ALG1","CL025","CL055","CL061","CL062","CL065",
"CL067","CL075","CL141","CL152","CL175","CL189","Clup1185",
"Clup1694","Clup2491","Clup4267","Clup5161","Clup5558","Clup6338",
"Clup6459","ClupRKW3624","ClupRKW3637","ClupRKW7526","Clup_SRR8049197",
"MEX1","RED1","RKW119","RKW2455","RKW2515","RKW2518",
"RKW2523","RKW2524","RKW7619","RKW7639","RKW7640","RKW7649",
"SRR7976417_red","SRR7976421_570M_YNP","SRR7976422_569F_YNP","SRR7976423_302M_YNP",
"SRR7976431_Mexican_NewM","SRR7976432_Minesota","YNP2","YNP3")
snpgdsCreateGenoSet("NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs.gds", "NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds.gds", sample.id=sample.list)
genofile <- snpgdsOpen("NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds.gds")
##### Prune SNPs based on LD
set.seed(1000)
snpset <- snpgdsLDpruning(genofile, ld.threshold=.2,maf=0.1)
snpset.id <- unlist(snpset)
snpgdsCreateGenoSet("NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds.gds", "NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds_pruned.gds", snp.id=snpset.id)
##### Close old genofile, open new genofile
snpgdsClose(genofile)
genofile <- snpgdsOpen("NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds_pruned.gds")
##### Add population information
pop_code=c("Canada","IRNP","IRNP","IRNP","IRNP","IRNP","IRNP","IRNP",
"IRNP","IRNP","IRNP","IRNP","Montana","Montana","Montana","Montana","Montana","Montana","Montana","Montana","YNP",
"YNP","YNP","ArticElles","NewM","NewM","CaptUSAHZ","Minesota","Minesota","Minesota",
"Minesota","Minesota","Minesota","Artic","Artc","Artic","Artic",
"Canada","CaptUSAHZ","YNP","YNP","YNP","IRNP","NewM","Minesota","YNP","YNP")
#pop_code <- read.gdsn(index.gdsn(genofile, "sample.annot/pop.group")) # <- doesn't work
##### Run PCA
# pca <- snpgdsPCA(genofile, snp.id=snpset.id, num.thread=1)
pca <- snpgdsPCA(genofile, num.thread=1)
pc.percent <- pca$varprop*100
# head(round(pc.percent, 2))
tab <- data.frame(sample.id = pca$sample.id,
EV1 = pca$eigenvect[,1],
EV2 = pca$eigenvect[,2],
EV3 = pca$eigenvect[,3],
EV4 = pca$eigenvect[,4],
stringsAsFactors = FALSE)
write.table(tab, file="NA_Clup_44_joint_chrALL_TrimAlt_Annot_VEP_Masked_Filter_passingSNPs_removeInds_noprune_PCA_1_2_3_4.txt", col.names=T, row.names=F, quote=F, sep='\t')
pdf("NA_Clup_44_joint_chrALL_TrimAlt_Annot_VEP_Masked_Filter_passingSNPs_removeInds_noprune_PCA_1_2.pdf", width=6, height=6)
plot(tab$EV2, tab$EV1, xlab="eigenvector 2", ylab="eigenvector 1")
dev.off()
##### Plot the first 4 PCs against each other
lbls <- paste("PC", 1:4, "\n", format(pc.percent[1:4], digits=2), "%", sep="")
pdf("NA_Clup_44_joint_chrALL_TrimAlt_Annot_VEP_Masked_Filter_passingSNPs_removeInds_noprune_PCA_1_2_3_4.pdf", width=6, height=6)
pairs(pca$eigenvect[,1:4], labels=lbls)
dev.off()
########### pop map ########
#population information
popmap = read.table("/u/home/d/dechavez/project-rwayne/Clup/VCF/list.47samples.for.PCA.txt",header=T) # this includes the RWAB samples
sample.id = as.character(popmap$Sample)
pop1_code = as.character(popmap$PrimaryPop)
#make a data.frame
tab1a <- data.frame(sample.id = pca$sample.id, pop1 = factor(pop1_code)[match(pca$sample.id, sample.id)],
EV1 = pca$eigenvect[,1],
EV2 = pca$eigenvect[,2],
EV3 = pca$eigenvect[,3],
EV4 = pca$eigenvect[,4],
stringsAsFactors = FALSE)
#head(tab1a)
############### set up your colors -- keep this consistent across all plots ######
colorPal=RColorBrewer::brewer.pal(n=8,name = "Dark2")
colors=list(IRNP=colorPal[1],Montana=colorPal[7],Artic=colorPal[6],
Minesota=colorPal[2],YNP=colorPal[8],
NewM=colorPal[4],CaptUSAHZ=colorPal[5],
Canada=colorPal[3]) # your population colors
#plot first 2 pc coloring by primary population
p1a <- ggplot(tab1a,aes(x=EV1,y=EV2,color=pop1))+
geom_point(size=3)+
theme_bw()+
ylab(paste("PC1", format(pc.percent[1], digits=2),"%", sep=""))+
xlab(paste("PC2", format(pc.percent[2], digits=2),"%", sep=""))+
ggtitle(paste("PCA based on ",as.character(length(pca$snp.id))," LD Pruned SNPs",sep=""))+
theme(legend.title = element_blank(),axis.text = element_text(size=14),
axis.title = element_text(size=14),legend.text = element_text(size=14))+
scale_shape_manual(values=c(1,16))+
scale_color_manual(values=unlist(colors))
# paste("PC", 1:4, "\n", format(pc.percent[1:4], digits=2), "%", sep="")
#p1a
ggsave(paste(plotoutdir,"/PCA.44NAClup.",todaysdate,".pdf",sep=""),p1a,device="pdf",width = 8,height=5)
##### Create cluster dendrogram
set.seed(100)
ibs.hc <- snpgdsHCluster(snpgdsIBS(genofile, num.thread=1))
rv <- snpgdsCutTree(ibs.hc)
pdf("NA_Clup_47_joint_chrALL_TrimAlt_Annot_VEP_Masked_Filter_passingSNPs_removeInds_noprune_IBScluster.pdf", width=8, height=12)
plot(rv$dendrogram, main="SNPRelate Clustering")
dev.off()
#PCA wuth Hihg coverage indiv
| /4-Demography/PCA/snprelate_PCA_cluster_JR.R | no_license | dechavezv/2nd.paper | R | false | false | 6,343 | r | ##### Load libraries
library(gdsfmt)
library(SNPRelate)
library(ggplot2)
library(RColorBrewer)
##### Set working directory?
todaysdate=format(Sys.Date(),format="%Y%m%d")
calldate=20200410
setwd("/u/home/d/dechavez/project-rwayne/Clup/SNPRelate")
plotoutdir=paste("/u/home/d/dechavez/project-rwayne/Clup/SNPRelate",calldate,"/PCA/",sep="")
dir.create(plotoutdir,recursive = T)
##### Specify VCF filename
vcf.fn <- "NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs.vcf"
#vcf.fn <- "NA_CLup_joint_chr38_TrimAlt_Annot_Mask_Filter.vcf"
##### Convert VCF to GDS format
snpgdsVCF2GDS(vcf.fn, "NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs.gds", method="biallelic.only")
##### Specify which individuals to keep
# sample.list=c("ALG1","CL025","CL055","CL061","CL062","CL065",
# "CL067","CL075","CL141","CL152","CL175","CL189","Clup1185",
# "Clup1694","Clup2491","Clup4267","Clup5161","Clup5558","Clup6338",
# "Clup6459","ClupRKW3624","ClupRKW3637","ClupRKW7526","Clup_SRR8049197",
# "Cruf_SRR8049200","MEX1","RED1","RKW119","RKW2455","RKW2515","RKW2518",
# "RKW2523","RKW2524","RKW7619","RKW7639","RKW7640","RKW7649","SRR7976407_Algoquin",
# "SRR7976417_red","SRR7976421_570M_YNP","SRR7976422_569F_YNP","SRR7976423_302M_YNP",
# "SRR7976425_I450_97_IRNP","SRR7976431_Mexican_NewM","SRR7976432_Minesota","YNP2","YNP3")
######## Exclude low coverage genomes <6x SRR7976425(IRNP_BV),SRR7976407(Algoquin_BV),SRR8049200(MexicWolf_TG)
sample.list=c("ALG1","CL025","CL055","CL061","CL062","CL065",
"CL067","CL075","CL141","CL152","CL175","CL189","Clup1185",
"Clup1694","Clup2491","Clup4267","Clup5161","Clup5558","Clup6338",
"Clup6459","ClupRKW3624","ClupRKW3637","ClupRKW7526","Clup_SRR8049197",
"MEX1","RED1","RKW119","RKW2455","RKW2515","RKW2518",
"RKW2523","RKW2524","RKW7619","RKW7639","RKW7640","RKW7649",
"SRR7976417_red","SRR7976421_570M_YNP","SRR7976422_569F_YNP","SRR7976423_302M_YNP",
"SRR7976431_Mexican_NewM","SRR7976432_Minesota","YNP2","YNP3")
snpgdsCreateGenoSet("NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs.gds", "NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds.gds", sample.id=sample.list)
genofile <- snpgdsOpen("NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds.gds")
##### Prune SNPs based on LD
set.seed(1000)
snpset <- snpgdsLDpruning(genofile, ld.threshold=.2,maf=0.1)
snpset.id <- unlist(snpset)
snpgdsCreateGenoSet("NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds.gds", "NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds_pruned.gds", snp.id=snpset.id)
##### Close old genofile, open new genofile
snpgdsClose(genofile)
genofile <- snpgdsOpen("NA_CLup_joint_chrAll_Annot_Mask_Filter_passingSNPs_removeInds_pruned.gds")
##### Add population information
pop_code=c("Canada","IRNP","IRNP","IRNP","IRNP","IRNP","IRNP","IRNP",
"IRNP","IRNP","IRNP","IRNP","Montana","Montana","Montana","Montana","Montana","Montana","Montana","Montana","YNP",
"YNP","YNP","ArticElles","NewM","NewM","CaptUSAHZ","Minesota","Minesota","Minesota",
"Minesota","Minesota","Minesota","Artic","Artc","Artic","Artic",
"Canada","CaptUSAHZ","YNP","YNP","YNP","IRNP","NewM","Minesota","YNP","YNP")
#pop_code <- read.gdsn(index.gdsn(genofile, "sample.annot/pop.group")) # <- doesn't work
##### Run PCA
# pca <- snpgdsPCA(genofile, snp.id=snpset.id, num.thread=1)
pca <- snpgdsPCA(genofile, num.thread=1)
pc.percent <- pca$varprop*100
# head(round(pc.percent, 2))
tab <- data.frame(sample.id = pca$sample.id,
EV1 = pca$eigenvect[,1],
EV2 = pca$eigenvect[,2],
EV3 = pca$eigenvect[,3],
EV4 = pca$eigenvect[,4],
stringsAsFactors = FALSE)
write.table(tab, file="NA_Clup_44_joint_chrALL_TrimAlt_Annot_VEP_Masked_Filter_passingSNPs_removeInds_noprune_PCA_1_2_3_4.txt", col.names=T, row.names=F, quote=F, sep='\t')
pdf("NA_Clup_44_joint_chrALL_TrimAlt_Annot_VEP_Masked_Filter_passingSNPs_removeInds_noprune_PCA_1_2.pdf", width=6, height=6)
plot(tab$EV2, tab$EV1, xlab="eigenvector 2", ylab="eigenvector 1")
dev.off()
##### Plot the first 4 PCs against each other
lbls <- paste("PC", 1:4, "\n", format(pc.percent[1:4], digits=2), "%", sep="")
pdf("NA_Clup_44_joint_chrALL_TrimAlt_Annot_VEP_Masked_Filter_passingSNPs_removeInds_noprune_PCA_1_2_3_4.pdf", width=6, height=6)
pairs(pca$eigenvect[,1:4], labels=lbls)
dev.off()
########### pop map ########
#population information
popmap = read.table("/u/home/d/dechavez/project-rwayne/Clup/VCF/list.47samples.for.PCA.txt",header=T) # this includes the RWAB samples
sample.id = as.character(popmap$Sample)
pop1_code = as.character(popmap$PrimaryPop)
#make a data.frame
tab1a <- data.frame(sample.id = pca$sample.id, pop1 = factor(pop1_code)[match(pca$sample.id, sample.id)],
EV1 = pca$eigenvect[,1],
EV2 = pca$eigenvect[,2],
EV3 = pca$eigenvect[,3],
EV4 = pca$eigenvect[,4],
stringsAsFactors = FALSE)
#head(tab1a)
############### set up your colors -- keep this consistent across all plots ######
colorPal=RColorBrewer::brewer.pal(n=8,name = "Dark2")
colors=list(IRNP=colorPal[1],Montana=colorPal[7],Artic=colorPal[6],
Minesota=colorPal[2],YNP=colorPal[8],
NewM=colorPal[4],CaptUSAHZ=colorPal[5],
Canada=colorPal[3]) # your population colors
#plot first 2 pc coloring by primary population
p1a <- ggplot(tab1a,aes(x=EV1,y=EV2,color=pop1))+
geom_point(size=3)+
theme_bw()+
ylab(paste("PC1", format(pc.percent[1], digits=2),"%", sep=""))+
xlab(paste("PC2", format(pc.percent[2], digits=2),"%", sep=""))+
ggtitle(paste("PCA based on ",as.character(length(pca$snp.id))," LD Pruned SNPs",sep=""))+
theme(legend.title = element_blank(),axis.text = element_text(size=14),
axis.title = element_text(size=14),legend.text = element_text(size=14))+
scale_shape_manual(values=c(1,16))+
scale_color_manual(values=unlist(colors))
# paste("PC", 1:4, "\n", format(pc.percent[1:4], digits=2), "%", sep="")
#p1a
ggsave(paste(plotoutdir,"/PCA.44NAClup.",todaysdate,".pdf",sep=""),p1a,device="pdf",width = 8,height=5)
##### Create cluster dendrogram
set.seed(100)
ibs.hc <- snpgdsHCluster(snpgdsIBS(genofile, num.thread=1))
rv <- snpgdsCutTree(ibs.hc)
pdf("NA_Clup_47_joint_chrALL_TrimAlt_Annot_VEP_Masked_Filter_passingSNPs_removeInds_noprune_IBScluster.pdf", width=8, height=12)
plot(rv$dendrogram, main="SNPRelate Clustering")
dev.off()
#PCA wuth Hihg coverage indiv
|
### Graficos Estadisticos
options(repos = c(CRAN = "http://cran.rstudio.com"))
install.packages("aplpack") # permite hacer caritas de Chernov
install.packages("corrplot") # permite personalizar colores y estilos de fuente para graficos
install.packages("ggplot2") # permite realizar graficos con movimiento
install.packages("plotrix") # permite realizar graficos de torta con volumen
install.packages("rgl") # permite realizar graficos en 3D
install.packages("tcltk") # posee comandos de lenguaje de herramientas para la creacion de interfases graficas
install.packages("tcltk2") # posee comandos adicionales a tcltk
install.packages("here") # posee comandos adicionales a tcltk
installed.packages() # muestra los paquetes que estan instalados en el dispositivo
library(grDevices) # Equipos graficos y soporte para la base y la red de graficos
library(tcltk)
library(aplpack)
library(corrplot)
library(ggplot2)
library(plotrix)
library(rgl)
library(tcltk2)
library(readxl)
library(here)
### Diagrama circular
IMCinfantil<-read_excel("D:/MaestriaDataMining-DeptoCompu/AID/IMCinfantil.xlsx")
View(IMCinfantil)
#IMCinfantil <- read.csv2("C:/Users/ceci/Datos/IMCinfantil.csv") # importa la base IMCinfantil
attach(IMCinfantil) # carga la base en la memoria activa
frec.catpeso<-table(CatPeso) # construye la distribucion de frecuencias
pie(frec.catpeso) # dibuja el diagrama circular
pie(frec.catpeso, col=rainbow(25)) # cambia la gama de colores
pie(frec.catpeso, col=rainbow(25),font=8) # cambia el tipo de letra
pie(frec.catpeso, col=rainbow(25),font=8,cex=1.5) # cambia el tamaño de letra
pie(frec.catpeso, col=rainbow(25),font=8,cex=1.5,radius=1) # cambia el tamaño de la torta
pie(frec.catpeso, col=rainbow(25),font=8,cex=1.5,radius=1,border=F) # quita el borde
pie(frec.catpeso, col=rainbow(25),font=8,cex=1.5,radius=1,border=F,main="Grafico de Torta") # pone nombre
etiquetas<-c("Deficiente","Normal","Obeso","Con sobrepeso") # define etiquetas
pct<-round(frec.catpeso/sum(frec.catpeso)*100) # calcula las frecuencias porcentuales
etiquetas<-paste(etiquetas,pct) # agrega los porcentajes a las etiquetas
etiquetas<-paste(etiquetas,"%",sep="") # agrega el simbolo % a los porcentajes
pie(frec.catpeso,labels =etiquetas,col=heat.colors(4,alpha=1)) # otra manera de asignar una paleta de colores
pie(frec.catpeso,labels =etiquetas,col=terrain.colors(4,alpha=1)) # otra manera de asignar una paleta de colores
pie(frec.catpeso,labels =etiquetas,col=topo.colors(4,alpha=1)) # otra manera de asignar una paleta de colores
pie(frec.catpeso,labels =etiquetas,col=cm.colors(4,alpha=1)) # otra manera de asignar una paleta de colores
pie(frec.catpeso,labels =etiquetas,col=cm.colors(4,alpha=1),main="Diagrama circular con etiquetas")
### con volumen perspectiva y sombra
pie3D(frec.catpeso) # grafica una torta con volumen
pie3D(frec.catpeso,labels=etiquetas)
pie3D(frec.catpeso,labels=etiquetas,explode=0.1) # separa los sectores
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9) # cambia el tamaño de las etiquetas
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9,radius=1.5)
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9,radius=1.5,height=0.2) # cambia el alto de la torta
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9,radius=1.5,height=0.2,shade=0.6) # sombrea
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9,radius=1.5,height=0.2,shade=0.6,col=terrain.colors(4:8,alpha=1))
### Diagramas de barras- barras adyacentes
par(bg="mistyrose")
barplot(table(CatPeso),main="Categorias de Peso",col="mediumpurple1") # hace un grafico de barras simple
barplot(table(SEXO,CatPeso)) # hace un gr?fico de barras superpuesto
barplot(table(SEXO,CatPeso)[,c(1,2,4,3)]) # cambia el orden de las barras
barplot(table(SEXO,CatPeso)[,c(1,2,4,3)],col=rainbow(11),main="Categorias de Peso segun Sexo")
legend("topright",cex=1,title="Sexo",c("F","M"),fill=rainbow(11),horiz=T) # asigna leyendas en posici?n horizontal
tabla<-table(SEXO,CatPeso)
barplot(tabla,main="Grafico de barras",horiz= TRUE,col=c("olivedrab1","springgreen1")) # hace un gr?fico de barras horizontales
legend("topright",cex=0.5, title="Sexo",c("F","M"),
fill=c("olivedrab1","springgreen1"),horiz=F) # asigna leyendas en posici?n vertical
barplot(tabla,main="Grafico de barras",beside=TRUE,col= c("tan1","mistyrose4")) # hace un grafico de barras adyacentes
legend("topleft",cex=0.5,title="Sexo",c("F","M"), fill=c("tan1","mistyrose4"),horiz=F) # cambia la ubicacion de las leyendas
### Grafico de mosaicos
tabla2=table(EDAD,CatPeso)
par(bg="lightcyan")
mosaicplot(tabla2) # hace un grafico de mosaicos simple
mosaicplot(tabla2[,c(1,2,4,3)],col=terrain.colors(7:11),main="Grafico de Mosaicos",ylab="Categoria de Peso",xlab="Edad",
cex=0.8) # este grafico permite visualizar una tabla de contingencia
### Grafico de bastones
Modelos<-2010:2016 # ingresa los modelos de los autos
Ventas<-c(2,3,7,4,9,0,5) # ingresa las frecuencias de las ventas de cada modelo
par(bg="snow2")
plot(Modelos,Ventas) # grafica los puntos
plot(Modelos,Ventas,type="h") # grafica bastones
plot(Modelos,Ventas,type="h",lty="twodash") #cambia el estilo de la l?nea
plot(Modelos,Ventas,type="h",lty="dotdash",lwd=4) # cambia el grosor
plot(Modelos,Ventas,type="h",lty="solid",lwd=4,col=heat.colors(9)) # cambia el color
title("Ventas mensuales de una Agencia Chevrolet")
### Bastones como segmentos
plot(Modelos,Ventas)
segments(2010,0,2010,2) # agrega un segmento del punto (2010,0) al punto (2010,2)
segments(2010,0,2010,2,lwd=3,lty="dashed",col=1) # estilo rayado
segments(2011,0,2011,3,lwd=3,lty="dotted",col=2) # estilo punteado
segments(2012,0,2012,7,lwd=3,lty="solid",col=3) # estilo s?lido
segments(2013,0,2013,4,lwd=3,lty="dotdash",col=4) # alterna estilos punteado y rayado
segments(2014,0,2014,9,lwd=3,lty="twodash",col=5) # estilo doble rayado
segments(2016,0,2016,5,lwd=3,lty="longdash",col=6) # estilo rayado largo
### Diagrama de tallo hoja
datos=PESO
stem(datos,scale=0.5) # da un histograma en el que se pueden apreciar los valores
stem(datos,scale=1) # cambia la escala
### Diagrama de dispersion en dos y tres variables
gorr<- read_excel("D:/MaestriaDataMining-DeptoCompu/AID/TP1/gorriones.xlsx")
gorr<-as.data.frame(gorr)
names(gorr)
plot(gorr[,2],gorr[,3],pch=16,col=1,ylim=c(0,300),xlab="Largo total",ylab="Extensión alar y largo del pico y cabeza")
points(gorr[,2],gorr[,4],pch=16,col=2)
legend(160,150,c("Extensión alar","Largo del pico y cabeza"),cex=0.7,pch=16,col=c(1,2),box.lty=0)
title("Pájaros")
attach(IMCinfantil)
base.ninios=data.frame(EDAD,PESO,TALLA,IMC,CC) # arma una sub-base con las variables num?ricas de IMCinfantil
par(bg="white")
pairs(base.ninios) # representa todos los diagramas de dispersion de a pares
pairs(base.ninios,col=rainbow(dim(base.ninios)[2])) # cambia color
##### Histogramas
attach(IMCinfantil)
par(bg="oldlace")
hist(PESO) # grafica el histograma de los pesos de todos los niños
hist(PESO,col="maroon1") # rellena las barras con color
hist(PESO,col="maroon1",density=18) # rellena las barras con rayas
hist(PESO,col="maroon1",density=18,angle=70) # cambia la inclinacion del rayado
hist(PESO,col="maroon1",density=18,border="blueviolet") # cambia el color de los bordes
hist(PESO,col="maroon1",density=18,border="blueviolet",main="Histograma",ylab="Frecuencia")
R=quantile(PESO,0.75)-quantile(PESO,0.25) # calcula el rango intercuartil
n=length(PESO) # guarda la cantidad de observaciones
h.FD=2*R*n^(-1/3) # sugerencia de Freedman-Diaconis para el ancho de clase
h.Scott=3.39*sd(PESO)*n^(-1/3) # sugerencia de Scott para el ancho de clase
primero=floor(min(PESO))-1 # guarda primer valor de la grilla
ultimo=ceiling(max(PESO))+3 # guarda ultimo valor de la grilla
grilla.FD=seq(primero,ultimo,h.FD) # defino primer valor de la grilla de Freedman Diaconis
grilla.Scott=seq(primero,ultimo,h.Scott)# defino primer valor de la grilla de Scott
hist(PESO,breaks=grilla.FD) # cambia el ancho de las columnas
hist(PESO,breaks=grilla.FD,col=2:8,main="Histograma de Freedman-Diaconis",ylab="Frecuencia")
hist(PESO,breaks=grilla.Scott,col=22:28,main="Histograma de Scott",ylab="Frecuencia")
##### Poligono de frecuencias
a=length(grilla.FD)
pto.medio=rep(0,a-1) # inicia un vector
for (i in 1:length(grilla.FD)-1){
pto.medio[i]=(grilla.FD[i]+grilla.FD[i+1])/2} # calcula los puntos medios de los intervalos
alt.dens=hist(PESO,breaks=grilla.FD,plot=F)$counts # calcula la altura correspondiente a cada punto medio
par(bg="blanchedalmond")
hist(PESO,breaks=grilla.FD,col=heat.colors(a-1,alpha=1),
main="Poligono de frecuencia usando Freedman-Diaconis",
ylab="Frecuencia")
points(pto.medio,alt.dens,type="l",lwd=2) # superpone el poligono de frecuencias al histograma
b=length(grilla.Scott)
pto.medio=rep(0,b-1)
for (i in 1:length(grilla.Scott)-1)
pto.medio[i]=(grilla.Scott[i]+grilla.Scott[i+1])/2
alt.dens=hist(PESO,breaks=grilla.Scott,plot=F)$counts
par(bg="blanchedalmond")
hist(PESO,breaks=grilla.Scott,col=heat.colors(b-1,alpha=1),main="Poligono de frecuencia usando Scott",ylab="Frecuencia")
points(pto.medio,alt.dens,type="l",lwd=2)
### Funcion de densidad
par(bg="white")
dens=density(PESO) # Kernel density estimation, es una manera no param?trica de estimar la funci?n de densidad de una variable aleatoria
plot(dens,main="Densidad de Peso",xlab="Peso",ylab="Densidad") # grafica la estimaci?n de la densidad de la variable PESO
polygon(dens,lwd=2,col="khaki1",border="khaki4",main="Densidad de Peso") # cambia colores de relleno y borde
hist(PESO,col=cm.colors(8,alpha=1),probability=T,breaks=grilla.Scott,main="Suavizado normal",ylab="Densidad") # histograma de densidad
xfit=seq(min(PESO),max(PESO),length=40) # arma una grilla de valores de datos
yfit=dnorm(xfit,mean=mean(PESO),sd=sd(PESO)) # realiza un suavizado normal de datos
lines(xfit,yfit,col="dodgerblue",lwd=2) # superpone el suavizado al histograma
### Funcion de distribucion empirica
par(mfrow=c(1,2)) # dividimos el area de graficos en dos columnas
plot.ecdf(PESO,col="magenta",main="Peso",ylab="F(x)") # dibuja la funcion de distribucion empirica
plot.ecdf(TALLA,col="chartreuse1",main="Talla",ylab="F(x)")
par(mfrow=c(1,1)) # unifica la pantalla de graficos
n=length(PESO)
plot(stepfun(1:(n-1),sort(PESO)),main="Funcion escalonada") # otra manera de definir y graficar la funcion acumulada
plot(stepfun(1:(n-1),sort(PESO)),main="Funcion escalonada",col="coral",lwd=2,ylab="F(x)")
### Boxplot
muestra=c(14,18,24,26,35,39,43,45,56,62,68,92,198)
Md=median(muestra)
summary(muestra)
Q1=quantile(muestra,0.25)
Q3=quantile(muestra,0.75)
DI=Q3-Q1
Q3+1.5*DI
Q1-1.5*DI
Q3+3*DI
Q1-3*DI
attach(IMCinfantil)
par(mfrow=c(1,2),oma=c(0,0,2,0)) # personaliza el espacio de grafico
boxplot(PESO) # realiza un boxplot basico
boxplot(PESO,horizontal=T) # realiza un boxplot horizontal
mtext("Graficos de cajas basicos", outer = TRUE, cex = 1.5) # pone un titulo para ambos graficos
par(mfrow=c(1,1),col.main="aquamarine4",adj=0) # cambia el color y la posicion del titulo
boxplot(PESO,horizontal=T,boxcol=2) # colorea el borde de la caja
boxplot(PESO,horizontal=T,col=3) # colorea el interior de la caja
par(mfrow=c(1,1),col.main="aquamarine4",adj=1) # cambia el color y la posicion del titulo
boxplot(PESO,horizontal=T,col="antiquewhite",boxcol="antiquewhite4",main="Distribucion del Peso")
### Boxplots paralelos
par(col.main="aquamarine3",adj=0.5)
boxplot(CC~CatPeso) # hace un boxplot para cada categoria de peso
boxplot(split(CC,CatPeso)) # idem anterior
boxplot(CC~CatPeso,horizontal=T) # grafica horizontalmente
IMCinfantil$CatPeso<-ordered(IMCinfantil$CatPeso,levels=c("D","N","SO","OB")) # cambia el orden de las cajas
with(IMCinfantil,boxplot(CC~CatPeso)) # hace el boxplot con el orden cambiado
with(IMCinfantil,boxplot(CC~CatPeso,boxcol=topo.colors(5),col=terrain.colors(5),main="Circunferencia de cintura segun peso"))
par(col.main="black")
boxplot(PESO~SEXO*CatPeso,data=IMCinfantil) # otra manera de relaizar un grafico de cajas
boxplot(PESO~SEXO*CatPeso,data=IMCinfantil,notch=T) # cambia el estilo de las cajas
boxplot(PESO~SEXO*CatPeso,data=IMCinfantil,notch=T,col=(c("gold","darkgreen")),
main="Pesos por categoria y sexo",cex.axis=0.7, xlab="Categorias")
### Graficos de correlacion
attach(IMCinfantil)
base.ninios=data.frame(EDAD,PESO,TALLA,IMC,CC) # arma una sub-base con las variables numericas de IMCinfantil
base.ninios$CC=max(base.ninios$CC)-base.ninios$CC # cambiamos una variable para que correlacione en forma negativa con las restantes
M=cor(base.ninios) # calcula la matriz de correlacion de las variables de la base
M
cov(base.ninios)
var(base.ninios)#idem anterior
corrplot(M,method="circle") # representa la matriz de correlaciones mediante circulos
corrplot(M,method="square") # representa la matriz de correlaciones mediante cuadrados
corrplot(M,method="ellipse") # representa la matriz de correlaciones mediante elipses
corrplot(M,method="number") # representa la matriz de correlaciones mediante numeros
corrplot(M,method="shade") # representa la matriz de correlaciones mediante sombreandos
corrplot(M,method="pie") # representa la matriz de correlaciones mediante graficos de torta
corrplot(M,type="upper") # representa solo la parte superior de la matriz de correlacion
corrplot(M,type="lower") # representa s?lo la parte inferior de la matriz de correlaci?n
corrplot(M,method="ellipse",type="upper") # permite combinaciones de estilos
corrplot.mixed(M) # representa la matriz de correlacion combinando circulos y numeros
corrplot.mixed(M,lower="circle",upper="shade") # permite combinaciones de estilos por bloques
par(mfrow=c(1,1))
### Graficos de nivel
x=y=seq(-4*pi,4*pi,len=27)
r=sqrt(outer(x^2,y^2,"+"))
filled.contour(exp(-0.1*r),axes=FALSE) # grafica las curvas de nivel del cono dado porla funcion r
filled.contour(exp(-0.1*r),frame.plot=FALSE,plot.axes={}) # pone referencias de colores
### Caritas de Chernoff
par(mfrow=c(1,1),adj=0)
par(col.main="blue") # cambia el color de los textos
galle=read_excel("D:/MaestriaDataMining-DeptoCompu/AID/galletitasCO.xlsx")
galle.salad=galle[c(1:3,7,15:17),] # agrupa las galletitas saladas
galle.dulce=galle[c(4:6,8:14),] # agrupa las galletitas dulces
galle.salad.mat<-as.matrix(galle.salad[,2:6],nrow=7,ncol=5)
mode(galle.salad.mat)<-"numeric"
galle.dulce.mat<-as.matrix(galle.dulce[,2:6],nrow=10,ncol=5)
mode(galle.dulce.mat)<-"numeric"
rownames(galle.salad.mat)<-galle.salad$Marca
rownames(galle.dulce.mat)<-galle.dulce$Marca
faces(galle.salad.mat)# hace un grafico con las caras de Chernoff
faces(galle.salad.mat,nrow.plot=3) # ajusta el alto de las caras
faces(galle.salad.mat,ncol.plot=4) # acomoda la cantidad de caras por fila
faces(galle.salad.mat,face.type=0) # grafica las caras sin color
faces(galle.salad.mat,face.type=2) # cambia el estilo de cara
faces(galle.salad.mat,labels=galle.salad$Marca) # etiqueta las caras
title("Caritas de Chernoff saladas",outer=TRUE) # ponemos titulo
faces(galle.dulce.mat,nrow.plot=3,ncol.plot=5,face.type=2,labels=galle.dulce$Marca)
title("Galletitas Dulces",outer=TRUE)
### Grafico de estrellas
par(col.main="black",adj=0.5)
stars(galle.salad.mat) # hace un grafico de estrellas
stars(galle.salad.mat,full=T) # dibuja con volumen
stars(galle.salad.mat,full=F) # dibuja en perspectiva
stars(galle.salad.mat,radius=F) # omite aristas
stars(galle.salad.mat,axes=T) # dibuja los ejes
stars(galle.salad.mat,frame.plot=T) # recuadra el grafico
stars(galle.salad.mat,draw.segments=T) # cambia el estilo
stars(galle.salad.mat,col.lines=rainbow(15)) # cambia el color a las lineas
stars(galle.salad.mat,cex=0.8,flip.labels=T) # cambia la posicion de las etiquetas
stars(galle.salad.mat,cex=0.8,flip.labels=F,len=0.8) # cambia el tamaño de las estrellas
stars(galle.salad.mat,cex=0.8,flip.labels=F,len=0.8,col.stars=terrain.colors(7)) # colorea los interiores de las estrellas
stars(galle.salad.mat,cex=0.8,flip.labels=F,len=0.8,col.stars=terrain.colors(7),ncol=4,frame.plot=T,main="Galletitas saladas")
stars(galle.dulce.mat,full=T,draw.segments=T,cex=0.9,len=0.8,ncol=4,frame.plot=T,main="Galletitas dulces")
### mtcars
cars=mtcars[1:9,]
stars(cars,cex=0.7,col.stars=c("red","green","orange","gold","blue",
"yellow", "pink","purple","cyan"))
title("Grafico de Estrellas")
par(mfrow=c(1,3))
stars(galle.salad.mat,ncol=2,full=F)
stars(galle.salad.mat,ncol=2,axes=T)
stars(galle.salad.mat,ncol=2,col.lines=rainbow(15))
######################
#### Tranformaciones por fila
#######################
recep<- read_excel(here("labs", "lab3", "resources", "../../../exercises/capitulo_2/ds/recepcionistas.xls"))
recep<-as.data.frame(recep)
colnames(recep)<-c("candidatos","cordialidadJuez1","presenciaJuez1","idiomaJuez1","cordialidadJuez2","presenciaJuez2","idiomaJuez2")
attach(recep)
# Graficos de cajas para visualizar diferencias entre los jueces
par(mfrow=c(1,1))
boxplot(recep[,c(2,5)],horizontal=T,col=c("seagreen1","salmon"),main="Puntaje de cordialidad segun juez")
boxplot(recep[,c(3,6)],horizontal=T,col=c("seagreen1","salmon"),main="Puntaje de presencia segun juez")
boxplot(recep[,c(4,7)],horizontal=T,col=c("seagreen1","salmon"),main="Puntaje de idioma segun juez")
#Rearmo una tabla que junte las características de ambos jueces identificando el juez en una nueva columna
recep2<-recep
colnames(recep2)<-NULL
CaracJuez1<-cbind(recep2[,1:4],rep(1,nrow(recep2)))
colnames(CaracJuez1)<-c("candidatos","cordialidad","presencia","idioma","juez")
CaracJuez2<-cbind(recep2[,1],recep2[,5:7],rep(2,nrow(recep2)))
colnames(CaracJuez2)<-c("candidatos","cordialidad","presencia","idioma","juez")
recepUnion<-rbind(CaracJuez1,CaracJuez2)
### Transformacion de datos por fila
mediasF=apply(recep[,-1],1,mean)
rangosF=apply(recep[,-1],1,max)-apply(recep[,-1],1,min)
deviosF=apply(recep[,-1],1,sd)
rec.transF=(recep[,-1]-mediasF)/rangosF
rec.transF.2=(recep[,-1]-mediasF)/deviosF
#verifico que tienen media 0 y desvío estándar 1
apply(rec.transF.2,1,mean)
apply(rec.transF.2,1,sd)
# scale transforma los datos (de las columnas de una matriz dada) para obtener media 0 y desvío 1
estandarizoFil<-scale(t(recep[,-1]),center=T,scale=TRUE)# Notar que se transpone para afectar las filas originales
#verifico que tienen media 0 y desvío estándar 1
apply(t(estandarizoFil),1,mean)
apply(t(estandarizoFil),1,sd)
### Transformacion de datos por fila separando por juez
medias=apply(recepUnion[,2:4],1,mean)
rangos=apply(recepUnion[,2:4],1,max)-apply(recepUnion[,2:4],1,min)
devios=apply(recepUnion[,2:4],1,sd)
rec.trans=(recepUnion[,2:4]-medias)/rangos
rec.trans.2=(recepUnion[,2:4]-medias)/desvios
#gráfico de coordenadas paralelas
plot(1:3,rec.trans.2[1,1:3],type="l",col=4,lwd=2,xlab=" ",
ylim=c(-2,2),ylab="Puntuación estandarizada",xlim=c(1,3.5),xaxt="n")
axis(1, at=1:3,labels=c("Cordialidad","Presencia","Idioma"), las=2)
for(i in 2:6){
points(1:3,rec.trans.2[i,1:3],type="l",col=4,lwd=2)
}
for(j in 7:12){
points(1:3,rec.trans.2[j,1:3],type="l",col=6,lwd=2)
}
mtext("Comparación de candidatas según gráfico de coordenadas paralelas",line=1,font=2)
legend.text=c("Juez 1","Juez 2")
legend(3.1,0,legend.text,text.col=c(4,6),lty=1,col=c(4,6),lwd=2,
cex=0.7,text.width=1.5,box.lty=0,bty="n")
#gráfico de perfiles
MediaJuez1<-apply(recepUnion[1:6,2:4],2,mean)
MediaJuez2<-apply(recepUnion[7:12,2:4],2,mean)
plot(1:3,MediaJuez1,type="l",col=4,lwd=2,xlab=" ",
ylim=c(50,90),ylab="Media de Puntajes",xlim=c(1,3.5),xaxt="n")
axis(1, at=1:3,labels=c("Cordialidad","Presencia","Idioma"), las=2)
points(1:3,MediaJuez2,type="l",col=6,lwd=2)
mtext("Comparación de puntajes por Juez según gráfico de perfiles",line=1,font=2)
legend.text=c("Juez 1","Juez 2")
legend(3.1,70,legend.text,text.col=c(4,6),lty=1,col=c(4,6),lwd=2,
cex=0.7,text.width=1.5,box.lty=0,bty="n")
## Visualizacion de diferencias entre jueces
#Rearmo la matriz de variables transformadas agregando la columna que identifica al juez para hacer boxplot
J1<-cbind(rec.transF.2[,1:3],rep(1,nrow(rec.transF.2)))
colnames(J1)<-c("cordialidad","presencia","idioma","juez")
J2<-cbind(rec.transF.2[,4:6],rep(2,nrow(rec.transF.2)))
colnames(J2)<-c("cordialidad","presencia","idioma","juez")
J1J2<-rbind(J1,J2)
boxplot(split(J1J2$cordialidad,J1J2$juez),horizontal=T,col=c("royalblue","navajowhite"),main="Puntaje de cordialidad segun juez")
boxplot(split(J1J2$presencia,J1J2$juez),horizontal=T,col=c("royalblue","navajowhite"),main="Puntaje de presencia segun juez")
boxplot(split(J1J2$idioma,J1J2$juez),horizontal=T,col=c("royalblue","navajowhite"),main="Puntaje de idioma segun juez")
plot(1:12,rec.trans$cordialidad,type="o",col="red1",lwd=2,xlab="Candidatas",
ylim=c(-1,1),ylab="Puntuación estandarizada",xlim=c(1,12))
points(1:12,rec.trans$presencia,type="o",col="olivedrab1",lwd=2)
points(1:12,rec.trans$idioma,type="o",col="turquoise1",lwd=2)
title("Comparación de perfiles")
legend.text=c("Cordialidad","Presencia","Idioma")
legend(10,1,legend.text,text.col=c("red1","olivedrab1","turquoise1"),
cex=0.7,text.width=1.5,box.lty=0,bty="n")
plot(1:12,rec.trans$cordialidad,type="o",col="red1",lwd=2,xlab=" ",
ylim=c(-1,1),ylab="Puntuación estandarizada",xlim=c(1,12),xaxt="n")
Map(axis, side=1, at=1:13, col.axis=c(rep(4,6),rep(6,6)), labels=recepUnion[,1], las=2)
#axis(1, at=1:12,labels=FALSE, las=2)
points(1:12,rec.trans$presencia,type="o",col="olivedrab1",lwd=2)
points(1:12,rec.trans$idioma,type="o",col="turquoise1",lwd=2)
title("Comparación de perfiles")
legend.text=c("Cordialidad","Presencia","Idioma")
legend(10,1,legend.text,text.col=c("red1","olivedrab1","turquoise1"),
cex=0.7,text.width=1.5,box.lty=0,bty="n")
legend(2,-0.8,"Juez 1",text.col=4,
cex=0.7,text.width=1.5,box.lty=0,bty="n")
legend(7,-0.8,"Juez 2",text.col=6,
cex=0.7,text.width=1.5,box.lty=0,bty="n")
#################################
## Transformaciones por columna
##################################
estandarizoCol<-scale(recepUnion[,2:4],center=T,scale=TRUE)
#verifico que tienen media 0 y desvío estándar 1
apply(estandarizoCol,2,mean)
apply(estandarizoCol,2,sd)
###primer objetivo: hacer comparables las variables
galle=read_excel("D:/MaestriaDataMining-DeptoCompu/AID/galletitasCO.xlsx")
galle.salad=galle[c(1:3,7,15:17),] # agrupa las galletitas saladas
galle.dulce=galle[c(4:6,8:14),] # agrupa las galletitas dulces
galle.salad.mat<-as.matrix(galle.salad[,2:6],nrow=7,ncol=5)
mode(galle.salad.mat)<-"numeric"
galle.dulce.mat<-as.matrix(galle.dulce[,2:6],nrow=10,ncol=5)
mode(galle.dulce.mat)<-"numeric"
rownames(galle.salad.mat)<-galle.salad$Marca
rownames(galle.dulce.mat)<-galle.dulce$Marca
gallet<-as.data.frame(galle[,2:6])
gallett<-matrix(as.numeric(unlist(gallet)),nrow=dim(gallet)[1])
# Calculo de media y desvio por columna
medias=apply(gallett,2,mean)#ojo, a veces tira error si no es numerico, por eso uso gallett, en lugar de gallet
desvios=apply(gallet,2,sd)
marcas=dim(gallet)[1]
variab=dim(gallet)[2]
# Conversion en variables comparables
med=matrix(rep(medias,marcas),byrow=T,nrow=marcas)
des=matrix(rep(desvios,marcas),byrow=T,nrow=marcas)
gall.tran=(gallett-med)/des# es lo mismo que hacer scale(gallett,center=T,scale=T)
# verificacion de la transformacion
round(apply(gall.tran,2,mean),3)#0 0 0 0 0
round(apply(gall.tran,2,sd),3)#1 1 1 1 1
gall.trans<-as.data.frame(gall.tran)
colnames(gall.trans)<-colnames(gallet)
head(gall.trans)
attach(gall.trans)
nombres=c("Calorias","Carbohidratos","Proteinas","Grasas","Sodio")
boxplot(gall.trans,col=terrain.colors(8),names=nombres,
cex.axis=0.6, ylab="",main="Valores nutricionales")
| /AID/labs/lab3/resources/AID20_Clase3_CO.R | no_license | dhruszecki/cdatos-uba | R | false | false | 23,688 | r | ### Graficos Estadisticos
options(repos = c(CRAN = "http://cran.rstudio.com"))
install.packages("aplpack") # permite hacer caritas de Chernov
install.packages("corrplot") # permite personalizar colores y estilos de fuente para graficos
install.packages("ggplot2") # permite realizar graficos con movimiento
install.packages("plotrix") # permite realizar graficos de torta con volumen
install.packages("rgl") # permite realizar graficos en 3D
install.packages("tcltk") # posee comandos de lenguaje de herramientas para la creacion de interfases graficas
install.packages("tcltk2") # posee comandos adicionales a tcltk
install.packages("here") # posee comandos adicionales a tcltk
installed.packages() # muestra los paquetes que estan instalados en el dispositivo
library(grDevices) # Equipos graficos y soporte para la base y la red de graficos
library(tcltk)
library(aplpack)
library(corrplot)
library(ggplot2)
library(plotrix)
library(rgl)
library(tcltk2)
library(readxl)
library(here)
### Diagrama circular
IMCinfantil<-read_excel("D:/MaestriaDataMining-DeptoCompu/AID/IMCinfantil.xlsx")
View(IMCinfantil)
#IMCinfantil <- read.csv2("C:/Users/ceci/Datos/IMCinfantil.csv") # importa la base IMCinfantil
attach(IMCinfantil) # carga la base en la memoria activa
frec.catpeso<-table(CatPeso) # construye la distribucion de frecuencias
pie(frec.catpeso) # dibuja el diagrama circular
pie(frec.catpeso, col=rainbow(25)) # cambia la gama de colores
pie(frec.catpeso, col=rainbow(25),font=8) # cambia el tipo de letra
pie(frec.catpeso, col=rainbow(25),font=8,cex=1.5) # cambia el tamaño de letra
pie(frec.catpeso, col=rainbow(25),font=8,cex=1.5,radius=1) # cambia el tamaño de la torta
pie(frec.catpeso, col=rainbow(25),font=8,cex=1.5,radius=1,border=F) # quita el borde
pie(frec.catpeso, col=rainbow(25),font=8,cex=1.5,radius=1,border=F,main="Grafico de Torta") # pone nombre
etiquetas<-c("Deficiente","Normal","Obeso","Con sobrepeso") # define etiquetas
pct<-round(frec.catpeso/sum(frec.catpeso)*100) # calcula las frecuencias porcentuales
etiquetas<-paste(etiquetas,pct) # agrega los porcentajes a las etiquetas
etiquetas<-paste(etiquetas,"%",sep="") # agrega el simbolo % a los porcentajes
pie(frec.catpeso,labels =etiquetas,col=heat.colors(4,alpha=1)) # otra manera de asignar una paleta de colores
pie(frec.catpeso,labels =etiquetas,col=terrain.colors(4,alpha=1)) # otra manera de asignar una paleta de colores
pie(frec.catpeso,labels =etiquetas,col=topo.colors(4,alpha=1)) # otra manera de asignar una paleta de colores
pie(frec.catpeso,labels =etiquetas,col=cm.colors(4,alpha=1)) # otra manera de asignar una paleta de colores
pie(frec.catpeso,labels =etiquetas,col=cm.colors(4,alpha=1),main="Diagrama circular con etiquetas")
### con volumen perspectiva y sombra
pie3D(frec.catpeso) # grafica una torta con volumen
pie3D(frec.catpeso,labels=etiquetas)
pie3D(frec.catpeso,labels=etiquetas,explode=0.1) # separa los sectores
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9) # cambia el tamaño de las etiquetas
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9,radius=1.5)
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9,radius=1.5,height=0.2) # cambia el alto de la torta
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9,radius=1.5,height=0.2,shade=0.6) # sombrea
pie3D(frec.catpeso,labels=etiquetas,explode=0.1,labelcex=0.9,radius=1.5,height=0.2,shade=0.6,col=terrain.colors(4:8,alpha=1))
### Diagramas de barras- barras adyacentes
par(bg="mistyrose")
barplot(table(CatPeso),main="Categorias de Peso",col="mediumpurple1") # hace un grafico de barras simple
barplot(table(SEXO,CatPeso)) # hace un gr?fico de barras superpuesto
barplot(table(SEXO,CatPeso)[,c(1,2,4,3)]) # cambia el orden de las barras
barplot(table(SEXO,CatPeso)[,c(1,2,4,3)],col=rainbow(11),main="Categorias de Peso segun Sexo")
legend("topright",cex=1,title="Sexo",c("F","M"),fill=rainbow(11),horiz=T) # asigna leyendas en posici?n horizontal
tabla<-table(SEXO,CatPeso)
barplot(tabla,main="Grafico de barras",horiz= TRUE,col=c("olivedrab1","springgreen1")) # hace un gr?fico de barras horizontales
legend("topright",cex=0.5, title="Sexo",c("F","M"),
fill=c("olivedrab1","springgreen1"),horiz=F) # asigna leyendas en posici?n vertical
barplot(tabla,main="Grafico de barras",beside=TRUE,col= c("tan1","mistyrose4")) # hace un grafico de barras adyacentes
legend("topleft",cex=0.5,title="Sexo",c("F","M"), fill=c("tan1","mistyrose4"),horiz=F) # cambia la ubicacion de las leyendas
### Grafico de mosaicos
tabla2=table(EDAD,CatPeso)
par(bg="lightcyan")
mosaicplot(tabla2) # hace un grafico de mosaicos simple
mosaicplot(tabla2[,c(1,2,4,3)],col=terrain.colors(7:11),main="Grafico de Mosaicos",ylab="Categoria de Peso",xlab="Edad",
cex=0.8) # este grafico permite visualizar una tabla de contingencia
### Grafico de bastones
Modelos<-2010:2016 # ingresa los modelos de los autos
Ventas<-c(2,3,7,4,9,0,5) # ingresa las frecuencias de las ventas de cada modelo
par(bg="snow2")
plot(Modelos,Ventas) # grafica los puntos
plot(Modelos,Ventas,type="h") # grafica bastones
plot(Modelos,Ventas,type="h",lty="twodash") #cambia el estilo de la l?nea
plot(Modelos,Ventas,type="h",lty="dotdash",lwd=4) # cambia el grosor
plot(Modelos,Ventas,type="h",lty="solid",lwd=4,col=heat.colors(9)) # cambia el color
title("Ventas mensuales de una Agencia Chevrolet")
### Bastones como segmentos
plot(Modelos,Ventas)
segments(2010,0,2010,2) # agrega un segmento del punto (2010,0) al punto (2010,2)
segments(2010,0,2010,2,lwd=3,lty="dashed",col=1) # estilo rayado
segments(2011,0,2011,3,lwd=3,lty="dotted",col=2) # estilo punteado
segments(2012,0,2012,7,lwd=3,lty="solid",col=3) # estilo s?lido
segments(2013,0,2013,4,lwd=3,lty="dotdash",col=4) # alterna estilos punteado y rayado
segments(2014,0,2014,9,lwd=3,lty="twodash",col=5) # estilo doble rayado
segments(2016,0,2016,5,lwd=3,lty="longdash",col=6) # estilo rayado largo
### Diagrama de tallo hoja
datos=PESO
stem(datos,scale=0.5) # da un histograma en el que se pueden apreciar los valores
stem(datos,scale=1) # cambia la escala
### Diagrama de dispersion en dos y tres variables
gorr<- read_excel("D:/MaestriaDataMining-DeptoCompu/AID/TP1/gorriones.xlsx")
gorr<-as.data.frame(gorr)
names(gorr)
plot(gorr[,2],gorr[,3],pch=16,col=1,ylim=c(0,300),xlab="Largo total",ylab="Extensión alar y largo del pico y cabeza")
points(gorr[,2],gorr[,4],pch=16,col=2)
legend(160,150,c("Extensión alar","Largo del pico y cabeza"),cex=0.7,pch=16,col=c(1,2),box.lty=0)
title("Pájaros")
attach(IMCinfantil)
base.ninios=data.frame(EDAD,PESO,TALLA,IMC,CC) # arma una sub-base con las variables num?ricas de IMCinfantil
par(bg="white")
pairs(base.ninios) # representa todos los diagramas de dispersion de a pares
pairs(base.ninios,col=rainbow(dim(base.ninios)[2])) # cambia color
##### Histogramas
attach(IMCinfantil)
par(bg="oldlace")
hist(PESO) # grafica el histograma de los pesos de todos los niños
hist(PESO,col="maroon1") # rellena las barras con color
hist(PESO,col="maroon1",density=18) # rellena las barras con rayas
hist(PESO,col="maroon1",density=18,angle=70) # cambia la inclinacion del rayado
hist(PESO,col="maroon1",density=18,border="blueviolet") # cambia el color de los bordes
hist(PESO,col="maroon1",density=18,border="blueviolet",main="Histograma",ylab="Frecuencia")
R=quantile(PESO,0.75)-quantile(PESO,0.25) # calcula el rango intercuartil
n=length(PESO) # guarda la cantidad de observaciones
h.FD=2*R*n^(-1/3) # sugerencia de Freedman-Diaconis para el ancho de clase
h.Scott=3.39*sd(PESO)*n^(-1/3) # sugerencia de Scott para el ancho de clase
primero=floor(min(PESO))-1 # guarda primer valor de la grilla
ultimo=ceiling(max(PESO))+3 # guarda ultimo valor de la grilla
grilla.FD=seq(primero,ultimo,h.FD) # defino primer valor de la grilla de Freedman Diaconis
grilla.Scott=seq(primero,ultimo,h.Scott)# defino primer valor de la grilla de Scott
hist(PESO,breaks=grilla.FD) # cambia el ancho de las columnas
hist(PESO,breaks=grilla.FD,col=2:8,main="Histograma de Freedman-Diaconis",ylab="Frecuencia")
hist(PESO,breaks=grilla.Scott,col=22:28,main="Histograma de Scott",ylab="Frecuencia")
##### Poligono de frecuencias
a=length(grilla.FD)
pto.medio=rep(0,a-1) # inicia un vector
for (i in 1:length(grilla.FD)-1){
pto.medio[i]=(grilla.FD[i]+grilla.FD[i+1])/2} # calcula los puntos medios de los intervalos
alt.dens=hist(PESO,breaks=grilla.FD,plot=F)$counts # calcula la altura correspondiente a cada punto medio
par(bg="blanchedalmond")
hist(PESO,breaks=grilla.FD,col=heat.colors(a-1,alpha=1),
main="Poligono de frecuencia usando Freedman-Diaconis",
ylab="Frecuencia")
points(pto.medio,alt.dens,type="l",lwd=2) # superpone el poligono de frecuencias al histograma
b=length(grilla.Scott)
pto.medio=rep(0,b-1)
for (i in 1:length(grilla.Scott)-1)
pto.medio[i]=(grilla.Scott[i]+grilla.Scott[i+1])/2
alt.dens=hist(PESO,breaks=grilla.Scott,plot=F)$counts
par(bg="blanchedalmond")
hist(PESO,breaks=grilla.Scott,col=heat.colors(b-1,alpha=1),main="Poligono de frecuencia usando Scott",ylab="Frecuencia")
points(pto.medio,alt.dens,type="l",lwd=2)
### Funcion de densidad
par(bg="white")
dens=density(PESO) # Kernel density estimation, es una manera no param?trica de estimar la funci?n de densidad de una variable aleatoria
plot(dens,main="Densidad de Peso",xlab="Peso",ylab="Densidad") # grafica la estimaci?n de la densidad de la variable PESO
polygon(dens,lwd=2,col="khaki1",border="khaki4",main="Densidad de Peso") # cambia colores de relleno y borde
hist(PESO,col=cm.colors(8,alpha=1),probability=T,breaks=grilla.Scott,main="Suavizado normal",ylab="Densidad") # histograma de densidad
xfit=seq(min(PESO),max(PESO),length=40) # arma una grilla de valores de datos
yfit=dnorm(xfit,mean=mean(PESO),sd=sd(PESO)) # realiza un suavizado normal de datos
lines(xfit,yfit,col="dodgerblue",lwd=2) # superpone el suavizado al histograma
### Funcion de distribucion empirica
par(mfrow=c(1,2)) # dividimos el area de graficos en dos columnas
plot.ecdf(PESO,col="magenta",main="Peso",ylab="F(x)") # dibuja la funcion de distribucion empirica
plot.ecdf(TALLA,col="chartreuse1",main="Talla",ylab="F(x)")
par(mfrow=c(1,1)) # unifica la pantalla de graficos
n=length(PESO)
plot(stepfun(1:(n-1),sort(PESO)),main="Funcion escalonada") # otra manera de definir y graficar la funcion acumulada
plot(stepfun(1:(n-1),sort(PESO)),main="Funcion escalonada",col="coral",lwd=2,ylab="F(x)")
### Boxplot
muestra=c(14,18,24,26,35,39,43,45,56,62,68,92,198)
Md=median(muestra)
summary(muestra)
Q1=quantile(muestra,0.25)
Q3=quantile(muestra,0.75)
DI=Q3-Q1
Q3+1.5*DI
Q1-1.5*DI
Q3+3*DI
Q1-3*DI
attach(IMCinfantil)
par(mfrow=c(1,2),oma=c(0,0,2,0)) # personaliza el espacio de grafico
boxplot(PESO) # realiza un boxplot basico
boxplot(PESO,horizontal=T) # realiza un boxplot horizontal
mtext("Graficos de cajas basicos", outer = TRUE, cex = 1.5) # pone un titulo para ambos graficos
par(mfrow=c(1,1),col.main="aquamarine4",adj=0) # cambia el color y la posicion del titulo
boxplot(PESO,horizontal=T,boxcol=2) # colorea el borde de la caja
boxplot(PESO,horizontal=T,col=3) # colorea el interior de la caja
par(mfrow=c(1,1),col.main="aquamarine4",adj=1) # cambia el color y la posicion del titulo
boxplot(PESO,horizontal=T,col="antiquewhite",boxcol="antiquewhite4",main="Distribucion del Peso")
### Boxplots paralelos
par(col.main="aquamarine3",adj=0.5)
boxplot(CC~CatPeso) # hace un boxplot para cada categoria de peso
boxplot(split(CC,CatPeso)) # idem anterior
boxplot(CC~CatPeso,horizontal=T) # grafica horizontalmente
IMCinfantil$CatPeso<-ordered(IMCinfantil$CatPeso,levels=c("D","N","SO","OB")) # cambia el orden de las cajas
with(IMCinfantil,boxplot(CC~CatPeso)) # hace el boxplot con el orden cambiado
with(IMCinfantil,boxplot(CC~CatPeso,boxcol=topo.colors(5),col=terrain.colors(5),main="Circunferencia de cintura segun peso"))
par(col.main="black")
boxplot(PESO~SEXO*CatPeso,data=IMCinfantil) # otra manera de relaizar un grafico de cajas
boxplot(PESO~SEXO*CatPeso,data=IMCinfantil,notch=T) # cambia el estilo de las cajas
boxplot(PESO~SEXO*CatPeso,data=IMCinfantil,notch=T,col=(c("gold","darkgreen")),
main="Pesos por categoria y sexo",cex.axis=0.7, xlab="Categorias")
### Graficos de correlacion
attach(IMCinfantil)
base.ninios=data.frame(EDAD,PESO,TALLA,IMC,CC) # arma una sub-base con las variables numericas de IMCinfantil
base.ninios$CC=max(base.ninios$CC)-base.ninios$CC # cambiamos una variable para que correlacione en forma negativa con las restantes
M=cor(base.ninios) # calcula la matriz de correlacion de las variables de la base
M
cov(base.ninios)
var(base.ninios)#idem anterior
corrplot(M,method="circle") # representa la matriz de correlaciones mediante circulos
corrplot(M,method="square") # representa la matriz de correlaciones mediante cuadrados
corrplot(M,method="ellipse") # representa la matriz de correlaciones mediante elipses
corrplot(M,method="number") # representa la matriz de correlaciones mediante numeros
corrplot(M,method="shade") # representa la matriz de correlaciones mediante sombreandos
corrplot(M,method="pie") # representa la matriz de correlaciones mediante graficos de torta
corrplot(M,type="upper") # representa solo la parte superior de la matriz de correlacion
corrplot(M,type="lower") # representa s?lo la parte inferior de la matriz de correlaci?n
corrplot(M,method="ellipse",type="upper") # permite combinaciones de estilos
corrplot.mixed(M) # representa la matriz de correlacion combinando circulos y numeros
corrplot.mixed(M,lower="circle",upper="shade") # permite combinaciones de estilos por bloques
par(mfrow=c(1,1))
### Graficos de nivel
x=y=seq(-4*pi,4*pi,len=27)
r=sqrt(outer(x^2,y^2,"+"))
filled.contour(exp(-0.1*r),axes=FALSE) # grafica las curvas de nivel del cono dado porla funcion r
filled.contour(exp(-0.1*r),frame.plot=FALSE,plot.axes={}) # pone referencias de colores
### Caritas de Chernoff
par(mfrow=c(1,1),adj=0)
par(col.main="blue") # cambia el color de los textos
galle=read_excel("D:/MaestriaDataMining-DeptoCompu/AID/galletitasCO.xlsx")
galle.salad=galle[c(1:3,7,15:17),] # agrupa las galletitas saladas
galle.dulce=galle[c(4:6,8:14),] # agrupa las galletitas dulces
galle.salad.mat<-as.matrix(galle.salad[,2:6],nrow=7,ncol=5)
mode(galle.salad.mat)<-"numeric"
galle.dulce.mat<-as.matrix(galle.dulce[,2:6],nrow=10,ncol=5)
mode(galle.dulce.mat)<-"numeric"
rownames(galle.salad.mat)<-galle.salad$Marca
rownames(galle.dulce.mat)<-galle.dulce$Marca
faces(galle.salad.mat)# hace un grafico con las caras de Chernoff
faces(galle.salad.mat,nrow.plot=3) # ajusta el alto de las caras
faces(galle.salad.mat,ncol.plot=4) # acomoda la cantidad de caras por fila
faces(galle.salad.mat,face.type=0) # grafica las caras sin color
faces(galle.salad.mat,face.type=2) # cambia el estilo de cara
faces(galle.salad.mat,labels=galle.salad$Marca) # etiqueta las caras
title("Caritas de Chernoff saladas",outer=TRUE) # ponemos titulo
faces(galle.dulce.mat,nrow.plot=3,ncol.plot=5,face.type=2,labels=galle.dulce$Marca)
title("Galletitas Dulces",outer=TRUE)
### Grafico de estrellas
par(col.main="black",adj=0.5)
stars(galle.salad.mat) # hace un grafico de estrellas
stars(galle.salad.mat,full=T) # dibuja con volumen
stars(galle.salad.mat,full=F) # dibuja en perspectiva
stars(galle.salad.mat,radius=F) # omite aristas
stars(galle.salad.mat,axes=T) # dibuja los ejes
stars(galle.salad.mat,frame.plot=T) # recuadra el grafico
stars(galle.salad.mat,draw.segments=T) # cambia el estilo
stars(galle.salad.mat,col.lines=rainbow(15)) # cambia el color a las lineas
stars(galle.salad.mat,cex=0.8,flip.labels=T) # cambia la posicion de las etiquetas
stars(galle.salad.mat,cex=0.8,flip.labels=F,len=0.8) # cambia el tamaño de las estrellas
stars(galle.salad.mat,cex=0.8,flip.labels=F,len=0.8,col.stars=terrain.colors(7)) # colorea los interiores de las estrellas
stars(galle.salad.mat,cex=0.8,flip.labels=F,len=0.8,col.stars=terrain.colors(7),ncol=4,frame.plot=T,main="Galletitas saladas")
stars(galle.dulce.mat,full=T,draw.segments=T,cex=0.9,len=0.8,ncol=4,frame.plot=T,main="Galletitas dulces")
### mtcars
cars=mtcars[1:9,]
stars(cars,cex=0.7,col.stars=c("red","green","orange","gold","blue",
"yellow", "pink","purple","cyan"))
title("Grafico de Estrellas")
par(mfrow=c(1,3))
stars(galle.salad.mat,ncol=2,full=F)
stars(galle.salad.mat,ncol=2,axes=T)
stars(galle.salad.mat,ncol=2,col.lines=rainbow(15))
######################
#### Tranformaciones por fila
#######################
recep<- read_excel(here("labs", "lab3", "resources", "../../../exercises/capitulo_2/ds/recepcionistas.xls"))
recep<-as.data.frame(recep)
colnames(recep)<-c("candidatos","cordialidadJuez1","presenciaJuez1","idiomaJuez1","cordialidadJuez2","presenciaJuez2","idiomaJuez2")
attach(recep)
# Graficos de cajas para visualizar diferencias entre los jueces
par(mfrow=c(1,1))
boxplot(recep[,c(2,5)],horizontal=T,col=c("seagreen1","salmon"),main="Puntaje de cordialidad segun juez")
boxplot(recep[,c(3,6)],horizontal=T,col=c("seagreen1","salmon"),main="Puntaje de presencia segun juez")
boxplot(recep[,c(4,7)],horizontal=T,col=c("seagreen1","salmon"),main="Puntaje de idioma segun juez")
#Rearmo una tabla que junte las características de ambos jueces identificando el juez en una nueva columna
recep2<-recep
colnames(recep2)<-NULL
CaracJuez1<-cbind(recep2[,1:4],rep(1,nrow(recep2)))
colnames(CaracJuez1)<-c("candidatos","cordialidad","presencia","idioma","juez")
CaracJuez2<-cbind(recep2[,1],recep2[,5:7],rep(2,nrow(recep2)))
colnames(CaracJuez2)<-c("candidatos","cordialidad","presencia","idioma","juez")
recepUnion<-rbind(CaracJuez1,CaracJuez2)
### Transformacion de datos por fila
mediasF=apply(recep[,-1],1,mean)
rangosF=apply(recep[,-1],1,max)-apply(recep[,-1],1,min)
deviosF=apply(recep[,-1],1,sd)
rec.transF=(recep[,-1]-mediasF)/rangosF
rec.transF.2=(recep[,-1]-mediasF)/deviosF
#verifico que tienen media 0 y desvío estándar 1
apply(rec.transF.2,1,mean)
apply(rec.transF.2,1,sd)
# scale transforma los datos (de las columnas de una matriz dada) para obtener media 0 y desvío 1
estandarizoFil<-scale(t(recep[,-1]),center=T,scale=TRUE)# Notar que se transpone para afectar las filas originales
#verifico que tienen media 0 y desvío estándar 1
apply(t(estandarizoFil),1,mean)
apply(t(estandarizoFil),1,sd)
### Transformacion de datos por fila separando por juez
medias=apply(recepUnion[,2:4],1,mean)
rangos=apply(recepUnion[,2:4],1,max)-apply(recepUnion[,2:4],1,min)
devios=apply(recepUnion[,2:4],1,sd)
rec.trans=(recepUnion[,2:4]-medias)/rangos
rec.trans.2=(recepUnion[,2:4]-medias)/desvios
#gráfico de coordenadas paralelas
plot(1:3,rec.trans.2[1,1:3],type="l",col=4,lwd=2,xlab=" ",
ylim=c(-2,2),ylab="Puntuación estandarizada",xlim=c(1,3.5),xaxt="n")
axis(1, at=1:3,labels=c("Cordialidad","Presencia","Idioma"), las=2)
for(i in 2:6){
points(1:3,rec.trans.2[i,1:3],type="l",col=4,lwd=2)
}
for(j in 7:12){
points(1:3,rec.trans.2[j,1:3],type="l",col=6,lwd=2)
}
mtext("Comparación de candidatas según gráfico de coordenadas paralelas",line=1,font=2)
legend.text=c("Juez 1","Juez 2")
legend(3.1,0,legend.text,text.col=c(4,6),lty=1,col=c(4,6),lwd=2,
cex=0.7,text.width=1.5,box.lty=0,bty="n")
#gráfico de perfiles
MediaJuez1<-apply(recepUnion[1:6,2:4],2,mean)
MediaJuez2<-apply(recepUnion[7:12,2:4],2,mean)
plot(1:3,MediaJuez1,type="l",col=4,lwd=2,xlab=" ",
ylim=c(50,90),ylab="Media de Puntajes",xlim=c(1,3.5),xaxt="n")
axis(1, at=1:3,labels=c("Cordialidad","Presencia","Idioma"), las=2)
points(1:3,MediaJuez2,type="l",col=6,lwd=2)
mtext("Comparación de puntajes por Juez según gráfico de perfiles",line=1,font=2)
legend.text=c("Juez 1","Juez 2")
legend(3.1,70,legend.text,text.col=c(4,6),lty=1,col=c(4,6),lwd=2,
cex=0.7,text.width=1.5,box.lty=0,bty="n")
## Visualizacion de diferencias entre jueces
#Rearmo la matriz de variables transformadas agregando la columna que identifica al juez para hacer boxplot
J1<-cbind(rec.transF.2[,1:3],rep(1,nrow(rec.transF.2)))
colnames(J1)<-c("cordialidad","presencia","idioma","juez")
J2<-cbind(rec.transF.2[,4:6],rep(2,nrow(rec.transF.2)))
colnames(J2)<-c("cordialidad","presencia","idioma","juez")
J1J2<-rbind(J1,J2)
boxplot(split(J1J2$cordialidad,J1J2$juez),horizontal=T,col=c("royalblue","navajowhite"),main="Puntaje de cordialidad segun juez")
boxplot(split(J1J2$presencia,J1J2$juez),horizontal=T,col=c("royalblue","navajowhite"),main="Puntaje de presencia segun juez")
boxplot(split(J1J2$idioma,J1J2$juez),horizontal=T,col=c("royalblue","navajowhite"),main="Puntaje de idioma segun juez")
plot(1:12,rec.trans$cordialidad,type="o",col="red1",lwd=2,xlab="Candidatas",
ylim=c(-1,1),ylab="Puntuación estandarizada",xlim=c(1,12))
points(1:12,rec.trans$presencia,type="o",col="olivedrab1",lwd=2)
points(1:12,rec.trans$idioma,type="o",col="turquoise1",lwd=2)
title("Comparación de perfiles")
legend.text=c("Cordialidad","Presencia","Idioma")
legend(10,1,legend.text,text.col=c("red1","olivedrab1","turquoise1"),
cex=0.7,text.width=1.5,box.lty=0,bty="n")
plot(1:12,rec.trans$cordialidad,type="o",col="red1",lwd=2,xlab=" ",
ylim=c(-1,1),ylab="Puntuación estandarizada",xlim=c(1,12),xaxt="n")
Map(axis, side=1, at=1:13, col.axis=c(rep(4,6),rep(6,6)), labels=recepUnion[,1], las=2)
#axis(1, at=1:12,labels=FALSE, las=2)
points(1:12,rec.trans$presencia,type="o",col="olivedrab1",lwd=2)
points(1:12,rec.trans$idioma,type="o",col="turquoise1",lwd=2)
title("Comparación de perfiles")
legend.text=c("Cordialidad","Presencia","Idioma")
legend(10,1,legend.text,text.col=c("red1","olivedrab1","turquoise1"),
cex=0.7,text.width=1.5,box.lty=0,bty="n")
legend(2,-0.8,"Juez 1",text.col=4,
cex=0.7,text.width=1.5,box.lty=0,bty="n")
legend(7,-0.8,"Juez 2",text.col=6,
cex=0.7,text.width=1.5,box.lty=0,bty="n")
#################################
## Transformaciones por columna
##################################
estandarizoCol<-scale(recepUnion[,2:4],center=T,scale=TRUE)
#verifico que tienen media 0 y desvío estándar 1
apply(estandarizoCol,2,mean)
apply(estandarizoCol,2,sd)
###primer objetivo: hacer comparables las variables
galle=read_excel("D:/MaestriaDataMining-DeptoCompu/AID/galletitasCO.xlsx")
galle.salad=galle[c(1:3,7,15:17),] # agrupa las galletitas saladas
galle.dulce=galle[c(4:6,8:14),] # agrupa las galletitas dulces
galle.salad.mat<-as.matrix(galle.salad[,2:6],nrow=7,ncol=5)
mode(galle.salad.mat)<-"numeric"
galle.dulce.mat<-as.matrix(galle.dulce[,2:6],nrow=10,ncol=5)
mode(galle.dulce.mat)<-"numeric"
rownames(galle.salad.mat)<-galle.salad$Marca
rownames(galle.dulce.mat)<-galle.dulce$Marca
gallet<-as.data.frame(galle[,2:6])
gallett<-matrix(as.numeric(unlist(gallet)),nrow=dim(gallet)[1])
# Calculo de media y desvio por columna
medias=apply(gallett,2,mean)#ojo, a veces tira error si no es numerico, por eso uso gallett, en lugar de gallet
desvios=apply(gallet,2,sd)
marcas=dim(gallet)[1]
variab=dim(gallet)[2]
# Conversion en variables comparables
med=matrix(rep(medias,marcas),byrow=T,nrow=marcas)
des=matrix(rep(desvios,marcas),byrow=T,nrow=marcas)
gall.tran=(gallett-med)/des# es lo mismo que hacer scale(gallett,center=T,scale=T)
# verificacion de la transformacion
round(apply(gall.tran,2,mean),3)#0 0 0 0 0
round(apply(gall.tran,2,sd),3)#1 1 1 1 1
gall.trans<-as.data.frame(gall.tran)
colnames(gall.trans)<-colnames(gallet)
head(gall.trans)
attach(gall.trans)
nombres=c("Calorias","Carbohidratos","Proteinas","Grasas","Sodio")
boxplot(gall.trans,col=terrain.colors(8),names=nombres,
cex.axis=0.6, ylab="",main="Valores nutricionales")
|
Create_Weg <- function(Weg_DF){
setkey(Weg_DF, Baan, HmStart, Strook, Datum_tijd)
# Define and determine the amount of unique Banen from the given Weg.
UniqueBanen <- unique(Weg_DF$Baan)
Current_Weg <- Weg.template
Current_Weg@wegID <- unique(Weg_DF$Weg)
Banen_List.length <- length(UniqueBanen)
# Define and preallocate the Banen_List.
Banen_List <- vector('list', length = Banen_List.length)
for(B in seq_along(UniqueBanen)){
BaaN <- UniqueBanen[B]
# Take subset of the given data based on Baan.
Weg_DF_Baan <- Weg_DF[.(BaaN)]
# Create a Baan object and fill.
Current_Baan <- temporal.Baan.template
Current_Baan@baanID <- BaaN
Current_Baan@hmVakVector <- unique(Weg_DF_Baan$HmStart)
# Define and preallocate the hmVakken_List.
hmVakken_List.length <- length(Current_Baan@hmVakVector)
hmVakken_List <- vector('list', length = hmVakken_List.length)
for(hmV in seq_along(Current_Baan@hmVakVector)){
hmVak <- Current_Baan@hmVakVector[hmV]
# Take subset of the given data based on Baan and hmVak.
Weg_DF_Baan_hmVak <- Weg_DF_Baan[.(BaaN, hmVak)]
# Create a hmVak object and fill.
Current_hmVak <- temporal.hmVak.template
Current_hmVak@hmStartPos <- hmVak
Current_hmVak@strookVector <- unique(Weg_DF_Baan_hmVak$Strook)
# Define and preallocate the Stroken_List
Stroken_List.length <- length(Current_hmVak@strookVector)
Stroken_List <- vector('list', length = Stroken_List.length)
for(S in seq_along(Current_hmVak@strookVector)){
StrooK <- Current_hmVak@strookVector[S]
# Take subset of the given data based on Baan, hmVak and Strook.
Weg_DF_Baan_hmVak_Strook <- Weg_DF_Baan_hmVak[.(BaaN, hmVak, StrooK)]
# Create a Strook object and fill.
Current_Strook <- temporal.Strook.template
Current_Strook@strookID <- StrooK
Current_Strook@dateVector <- Weg_DF_Baan_hmVak_Strook$Datum_tijd
LCMS_Traces_List.length <- nrow(Weg_DF_Baan_hmVak_Strook)
LCMS_Traces_List <- vector('list', length = LCMS_Traces_List.length)
for(D in seq_along(Current_Strook@dateVector)){
Date <- Current_Strook@dateVector[D]
# Create an LCMS_Trace object and fill.
Current_LCMS_Trace <- temporal.LCMS_Trace.template
Current_LCMS_Trace@Datum_tijd <- Date
Current_LCMS_Trace@Vehicle <- Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date]$Vehicle
Current_LCMS_Trace@Errorcode <- Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date]$Errorcode
Current_LCMS_Trace@lengte_meting <- Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date]$lengte_meting
Current_LCMS_Trace@overallData <- as.numeric(Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date, c(ColNames('Overall', 'No'))])
Current_LCMS_Trace@leftData <- as.numeric(Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date, c(ColNames('Left', 'No'))])
Current_LCMS_Trace@rightData <- as.numeric(Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date, c(ColNames('Right', 'No'))])
# Fill the LCMS_Traces_List.
LCMS_Traces_List[D] <- Current_LCMS_Trace
}
Current_Strook@temporal.LCMS_Traces <- LCMS_Traces_List
Stroken_List[S] <- Current_Strook
}
Current_hmVak@temporal.Stroken <- Stroken_List
hmVakken_List[hmV] <- Current_hmVak
}
Current_Baan@temporal.hmVakken <- hmVakken_List
Banen_List[B] <- Current_Baan
}
Current_Weg@banen <- Banen_List
return(Current_Weg)
}
# Save the Weg objects.
# {
# for(W in unique(All_DF$Weg)){
# Current_DF <- All_DF[Weg == W]
# assign(paste0(W, '_DF'), Current_DF)
# Current_Weg <- Create_Weg(Current_DF)
# saveit(Weg = Current_Weg, string = W, file = paste0('LCMS_DB_', W, '.Rdata'))
# } | /Create_Weg.R | no_license | liyongg/asphalt | R | false | false | 3,903 | r | Create_Weg <- function(Weg_DF){
setkey(Weg_DF, Baan, HmStart, Strook, Datum_tijd)
# Define and determine the amount of unique Banen from the given Weg.
UniqueBanen <- unique(Weg_DF$Baan)
Current_Weg <- Weg.template
Current_Weg@wegID <- unique(Weg_DF$Weg)
Banen_List.length <- length(UniqueBanen)
# Define and preallocate the Banen_List.
Banen_List <- vector('list', length = Banen_List.length)
for(B in seq_along(UniqueBanen)){
BaaN <- UniqueBanen[B]
# Take subset of the given data based on Baan.
Weg_DF_Baan <- Weg_DF[.(BaaN)]
# Create a Baan object and fill.
Current_Baan <- temporal.Baan.template
Current_Baan@baanID <- BaaN
Current_Baan@hmVakVector <- unique(Weg_DF_Baan$HmStart)
# Define and preallocate the hmVakken_List.
hmVakken_List.length <- length(Current_Baan@hmVakVector)
hmVakken_List <- vector('list', length = hmVakken_List.length)
for(hmV in seq_along(Current_Baan@hmVakVector)){
hmVak <- Current_Baan@hmVakVector[hmV]
# Take subset of the given data based on Baan and hmVak.
Weg_DF_Baan_hmVak <- Weg_DF_Baan[.(BaaN, hmVak)]
# Create a hmVak object and fill.
Current_hmVak <- temporal.hmVak.template
Current_hmVak@hmStartPos <- hmVak
Current_hmVak@strookVector <- unique(Weg_DF_Baan_hmVak$Strook)
# Define and preallocate the Stroken_List
Stroken_List.length <- length(Current_hmVak@strookVector)
Stroken_List <- vector('list', length = Stroken_List.length)
for(S in seq_along(Current_hmVak@strookVector)){
StrooK <- Current_hmVak@strookVector[S]
# Take subset of the given data based on Baan, hmVak and Strook.
Weg_DF_Baan_hmVak_Strook <- Weg_DF_Baan_hmVak[.(BaaN, hmVak, StrooK)]
# Create a Strook object and fill.
Current_Strook <- temporal.Strook.template
Current_Strook@strookID <- StrooK
Current_Strook@dateVector <- Weg_DF_Baan_hmVak_Strook$Datum_tijd
LCMS_Traces_List.length <- nrow(Weg_DF_Baan_hmVak_Strook)
LCMS_Traces_List <- vector('list', length = LCMS_Traces_List.length)
for(D in seq_along(Current_Strook@dateVector)){
Date <- Current_Strook@dateVector[D]
# Create an LCMS_Trace object and fill.
Current_LCMS_Trace <- temporal.LCMS_Trace.template
Current_LCMS_Trace@Datum_tijd <- Date
Current_LCMS_Trace@Vehicle <- Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date]$Vehicle
Current_LCMS_Trace@Errorcode <- Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date]$Errorcode
Current_LCMS_Trace@lengte_meting <- Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date]$lengte_meting
Current_LCMS_Trace@overallData <- as.numeric(Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date, c(ColNames('Overall', 'No'))])
Current_LCMS_Trace@leftData <- as.numeric(Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date, c(ColNames('Left', 'No'))])
Current_LCMS_Trace@rightData <- as.numeric(Weg_DF_Baan_hmVak_Strook[Datum_tijd == Date, c(ColNames('Right', 'No'))])
# Fill the LCMS_Traces_List.
LCMS_Traces_List[D] <- Current_LCMS_Trace
}
Current_Strook@temporal.LCMS_Traces <- LCMS_Traces_List
Stroken_List[S] <- Current_Strook
}
Current_hmVak@temporal.Stroken <- Stroken_List
hmVakken_List[hmV] <- Current_hmVak
}
Current_Baan@temporal.hmVakken <- hmVakken_List
Banen_List[B] <- Current_Baan
}
Current_Weg@banen <- Banen_List
return(Current_Weg)
}
# Save the Weg objects.
# {
# for(W in unique(All_DF$Weg)){
# Current_DF <- All_DF[Weg == W]
# assign(paste0(W, '_DF'), Current_DF)
# Current_Weg <- Create_Weg(Current_DF)
# saveit(Weg = Current_Weg, string = W, file = paste0('LCMS_DB_', W, '.Rdata'))
# } |
library(tidyverse)
library(stringr)
# Load data ---------------------------------------------------------------
seats_raw <- read_delim(here::here("2020", "raw_data", "day_11.txt"),
delim = "\t",
col_names = "layout")
# Main --------------------------------------------------------------------
##### Part 1 #####
# convert L's to 0 and .'s to NA's
# then convert to matrix
layout_ncol <- str_count(seats_raw[["layout"]][1])
seats_tidy <-
seats_raw %>%
mutate(layout = layout %>%
str_replace_all("L", "0")) %>%
separate(layout, into = str_c("X", 0:layout_ncol), sep = "") %>%
dplyr::select(-X0) %>%
mutate_all(as.integer)
# create function to obtain all adj seats
get_adj_seat <- function(row_col){
adj_seats <-
tibble(row = (row_col[["row"]] - 1):(row_col[["row"]] + 1),
col = (row_col[["col"]] - 1):(row_col[["col"]] + 1)) %>%
expand(row, col) %>%
anti_join(row_col, by = c("row", "col"))
return(adj_seats)
}
# apply function to each cell
adj_seats_key <- seats_tidy %>%
mutate(row = row_number()) %>%
gather(key = "col", value = "seat", contains("X")) %>%
dplyr::select(-seat) %>%
mutate(col = col %>%
str_remove("X") %>%
as.integer(),
adj_seat = vector("list", n()))
for(i in seq_len(nrow(adj_seats_key))){
adj_seats_key[["adj_seat"]][[i]] <- get_adj_seat(adj_seats_key[i,])
}
seats_mat <- seats_tidy %>%
as.matrix()
# create a function to update the seat layout
update_seats <- function(seats_mat, adj_seats_key){
# create fresh mat as we simultaneously update
seats_mat_next <- matrix(nrow = nrow(seats_mat),
ncol = ncol(seats_mat))
for(i in seq_len(nrow(adj_seats_key))){
seat_occ <- seats_mat[[adj_seats_key[["row"]][i], adj_seats_key[["col"]][i]]]
# skip if not a seat
if(is.na(seat_occ)){
next
}
adj_seats_occ <- sum_adj_seats(seats_mat, adj_seats_key[["adj_seat"]][[i]])
if(seat_occ == 0 && adj_seats_occ == 0){
seats_mat_next[adj_seats_key[["row"]][i], adj_seats_key[["col"]][i]] <- 1
}else if(seat_occ == 1 && adj_seats_occ >= 4){
seats_mat_next[adj_seats_key[["row"]][i], adj_seats_key[["col"]][i]] <- 0
}else{
seats_mat_next[adj_seats_key[["row"]][i], adj_seats_key[["col"]][i]] <- seat_occ
}
}
return(seats_mat_next)
}
sum_adj_seats <- function(seats_mat, adj_seats){
adj_seats_occ <- 0
for(j in seq_len(nrow(adj_seats))){
# check if the seat actually exists (not e.g. row 0, col 0)
real_seat <-
tryCatch(expr = {
seats_mat[[adj_seats[["row"]][j], adj_seats[["col"]][j]]]
}, error = function(x) FALSE)
if(is.na(real_seat) | real_seat == FALSE){
next
}
adj_seats_occ <- adj_seats_occ + real_seat
}
return(adj_seats_occ)
}
seats_mat_prev <- NA
seats_mat_curr <- seats_mat
iter <- 1
while(!identical(seats_mat_curr, seats_mat_prev)){
print(iter)
seats_mat_prev <- seats_mat_curr
seats_mat_curr <- update_seats(seats_mat_prev, adj_seats_key)
iter <- iter + 1
}
sum(seats_mat_curr, na.rm = TRUE)
##### Part 2 #####
# create a new function to update the seat layout
update_seats <- function(seats_mat){
# create fresh mat as we simultaneously update
seats_mat_next <- matrix(nrow = nrow(seats_mat),
ncol = ncol(seats_mat))
for(i in seq_len(nrow(seats_mat))){
for(j in seq_len(ncol(seats_mat))){
seat_occ <- seats_mat[[i, j]]
# skip if not a seat
if(is.na(seat_occ)){
next
}
adj_seats_occ <- sum_adj_seats(seats_mat, i, j)
if(seat_occ == 0 && adj_seats_occ == 0){
seats_mat_next[i, j] <- 1
}else if(seat_occ == 1 && adj_seats_occ >= 5){
seats_mat_next[i, j] <- 0
}else{
seats_mat_next[i, j] <- seat_occ
}
}
}
return(seats_mat_next)
}
# in part 2, this function needs to be more complex
# we will search in all possible directions
# for the first seat (non-NA value) then add this to get the sum
# this is a real slow and dirty brute force solution
sum_adj_seats <- function(seats_mat, i, j){
ops <-
tibble(row = c("add", "minus", "none"),
col = c("add", "minus", "none")) %>%
expand(row, col) %>%
filter(!(row == "none" & col == "none")) %>%
mutate(occ = NA_integer_)
for(k in seq_len(nrow(ops))){
ops_curr <- ops[k, ]
row_col_curr <- c(row = i, col = j)
occ_curr <- NA
while(is.na(occ_curr)){
row_col_curr <- update_row_col(row_col_curr, ops_curr)
occ_curr <-
tryCatch(expr = {
seats_mat[[row_col_curr["row"], row_col_curr["col"]]]
}, error = function(x) 0)
}
ops[["occ"]][k] <- occ_curr
}
adj_seats_occ <- sum(ops[["occ"]])
return(adj_seats_occ)
}
update_row_col <- function(row_col_curr, ops_curr){
for(l in c("row", "col")){
if(ops_curr[[l]] == "add"){
row_col_curr[l] <- row_col_curr[l] + 1
}else if(ops_curr[[l]] == "minus"){
row_col_curr[l] <- row_col_curr[l] - 1
}
}
return(row_col_curr)
}
seats_mat_prev <- NA
seats_mat_curr <- seats_mat
iter <- 1
while(!identical(seats_mat_curr, seats_mat_prev)){
print(iter)
seats_mat_prev <- seats_mat_curr
seats_mat_curr <- update_seats(seats_mat = seats_mat_prev)
iter <- iter + 1
}
sum(seats_mat_curr, na.rm = TRUE)
| /2020/scripts/day_11.R | no_license | dzhang32/advent_of_code | R | false | false | 5,846 | r | library(tidyverse)
library(stringr)
# Load data ---------------------------------------------------------------
seats_raw <- read_delim(here::here("2020", "raw_data", "day_11.txt"),
delim = "\t",
col_names = "layout")
# Main --------------------------------------------------------------------
##### Part 1 #####
# convert L's to 0 and .'s to NA's
# then convert to matrix
layout_ncol <- str_count(seats_raw[["layout"]][1])
seats_tidy <-
seats_raw %>%
mutate(layout = layout %>%
str_replace_all("L", "0")) %>%
separate(layout, into = str_c("X", 0:layout_ncol), sep = "") %>%
dplyr::select(-X0) %>%
mutate_all(as.integer)
# create function to obtain all adj seats
get_adj_seat <- function(row_col){
adj_seats <-
tibble(row = (row_col[["row"]] - 1):(row_col[["row"]] + 1),
col = (row_col[["col"]] - 1):(row_col[["col"]] + 1)) %>%
expand(row, col) %>%
anti_join(row_col, by = c("row", "col"))
return(adj_seats)
}
# apply function to each cell
adj_seats_key <- seats_tidy %>%
mutate(row = row_number()) %>%
gather(key = "col", value = "seat", contains("X")) %>%
dplyr::select(-seat) %>%
mutate(col = col %>%
str_remove("X") %>%
as.integer(),
adj_seat = vector("list", n()))
for(i in seq_len(nrow(adj_seats_key))){
adj_seats_key[["adj_seat"]][[i]] <- get_adj_seat(adj_seats_key[i,])
}
seats_mat <- seats_tidy %>%
as.matrix()
# create a function to update the seat layout
update_seats <- function(seats_mat, adj_seats_key){
# create fresh mat as we simultaneously update
seats_mat_next <- matrix(nrow = nrow(seats_mat),
ncol = ncol(seats_mat))
for(i in seq_len(nrow(adj_seats_key))){
seat_occ <- seats_mat[[adj_seats_key[["row"]][i], adj_seats_key[["col"]][i]]]
# skip if not a seat
if(is.na(seat_occ)){
next
}
adj_seats_occ <- sum_adj_seats(seats_mat, adj_seats_key[["adj_seat"]][[i]])
if(seat_occ == 0 && adj_seats_occ == 0){
seats_mat_next[adj_seats_key[["row"]][i], adj_seats_key[["col"]][i]] <- 1
}else if(seat_occ == 1 && adj_seats_occ >= 4){
seats_mat_next[adj_seats_key[["row"]][i], adj_seats_key[["col"]][i]] <- 0
}else{
seats_mat_next[adj_seats_key[["row"]][i], adj_seats_key[["col"]][i]] <- seat_occ
}
}
return(seats_mat_next)
}
sum_adj_seats <- function(seats_mat, adj_seats){
adj_seats_occ <- 0
for(j in seq_len(nrow(adj_seats))){
# check if the seat actually exists (not e.g. row 0, col 0)
real_seat <-
tryCatch(expr = {
seats_mat[[adj_seats[["row"]][j], adj_seats[["col"]][j]]]
}, error = function(x) FALSE)
if(is.na(real_seat) | real_seat == FALSE){
next
}
adj_seats_occ <- adj_seats_occ + real_seat
}
return(adj_seats_occ)
}
seats_mat_prev <- NA
seats_mat_curr <- seats_mat
iter <- 1
while(!identical(seats_mat_curr, seats_mat_prev)){
print(iter)
seats_mat_prev <- seats_mat_curr
seats_mat_curr <- update_seats(seats_mat_prev, adj_seats_key)
iter <- iter + 1
}
sum(seats_mat_curr, na.rm = TRUE)
##### Part 2 #####
# create a new function to update the seat layout
update_seats <- function(seats_mat){
# create fresh mat as we simultaneously update
seats_mat_next <- matrix(nrow = nrow(seats_mat),
ncol = ncol(seats_mat))
for(i in seq_len(nrow(seats_mat))){
for(j in seq_len(ncol(seats_mat))){
seat_occ <- seats_mat[[i, j]]
# skip if not a seat
if(is.na(seat_occ)){
next
}
adj_seats_occ <- sum_adj_seats(seats_mat, i, j)
if(seat_occ == 0 && adj_seats_occ == 0){
seats_mat_next[i, j] <- 1
}else if(seat_occ == 1 && adj_seats_occ >= 5){
seats_mat_next[i, j] <- 0
}else{
seats_mat_next[i, j] <- seat_occ
}
}
}
return(seats_mat_next)
}
# in part 2, this function needs to be more complex
# we will search in all possible directions
# for the first seat (non-NA value) then add this to get the sum
# this is a real slow and dirty brute force solution
sum_adj_seats <- function(seats_mat, i, j){
ops <-
tibble(row = c("add", "minus", "none"),
col = c("add", "minus", "none")) %>%
expand(row, col) %>%
filter(!(row == "none" & col == "none")) %>%
mutate(occ = NA_integer_)
for(k in seq_len(nrow(ops))){
ops_curr <- ops[k, ]
row_col_curr <- c(row = i, col = j)
occ_curr <- NA
while(is.na(occ_curr)){
row_col_curr <- update_row_col(row_col_curr, ops_curr)
occ_curr <-
tryCatch(expr = {
seats_mat[[row_col_curr["row"], row_col_curr["col"]]]
}, error = function(x) 0)
}
ops[["occ"]][k] <- occ_curr
}
adj_seats_occ <- sum(ops[["occ"]])
return(adj_seats_occ)
}
update_row_col <- function(row_col_curr, ops_curr){
for(l in c("row", "col")){
if(ops_curr[[l]] == "add"){
row_col_curr[l] <- row_col_curr[l] + 1
}else if(ops_curr[[l]] == "minus"){
row_col_curr[l] <- row_col_curr[l] - 1
}
}
return(row_col_curr)
}
seats_mat_prev <- NA
seats_mat_curr <- seats_mat
iter <- 1
while(!identical(seats_mat_curr, seats_mat_prev)){
print(iter)
seats_mat_prev <- seats_mat_curr
seats_mat_curr <- update_seats(seats_mat = seats_mat_prev)
iter <- iter + 1
}
sum(seats_mat_curr, na.rm = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classError.R
\name{classError}
\alias{classError}
\title{Classification Error}
\usage{
classError(true, estimated, estimated.prob = NULL, trace = 0)
}
\arguments{
\item{true}{Vector, factor: True values}
\item{estimated}{Vector, factor: Estimated probabilities}
\item{trace}{Integer: If > 0, print diagnostic messages. Default = 0}
}
\value{
S3 object of type "classError"
}
\description{
Calculates Classification Metrics
}
\author{
Efstathios D. Gennatas
}
| /man/classError.Rd | no_license | zeta1999/rtemis | R | false | true | 541 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classError.R
\name{classError}
\alias{classError}
\title{Classification Error}
\usage{
classError(true, estimated, estimated.prob = NULL, trace = 0)
}
\arguments{
\item{true}{Vector, factor: True values}
\item{estimated}{Vector, factor: Estimated probabilities}
\item{trace}{Integer: If > 0, print diagnostic messages. Default = 0}
}
\value{
S3 object of type "classError"
}
\description{
Calculates Classification Metrics
}
\author{
Efstathios D. Gennatas
}
|
#####################
# Perf for classes #
#####################
# change case of d or nd
perftable = function (list_predict, list_real){
nb_value = length (list_real)
i = 1
tp = 0
fp = 0
tn = 0
fn = 0
while(i <= nb_value ){
if (list_predict[i]==1){
if (list_predict[i] == list_real[i]){
tp = tp + 1
}else {
fp = fp + 1
}
}else{
if (list_predict[i] == list_real[i]){
tn = tn + 1
}else {
fn = fn + 1
}
}
i = i + 1
}
#print (paste ("TP : ", tp, sep = ""))
#print (paste ("TN : ", tn, sep = ""))
#print (paste ("FP : ", fp, sep = ""))
#print (paste ("FN : ", fn, sep = ""))
tableval = c(tp,tn,fp,fn)
return (tableval)
}
accuracy = function (tp, tn, fp, fn){
return ((tp + tn)/(tp + fp + tn +fn))
}
precision = function (tp, fp){
return (tp/(tp + fp))
}
recall = function (tp, fn){
return (tp/(tp + fn))
}
specificity = function (tn, fp){
return (tn/(tn + fp))
}
sensibility = function (tp, fn){
return (tp/(tp + fn))
}
BCR = function (tp, tn, fp, fn){
return (0.5*(tp/(tp+fn) + tn/(tn+fp)))
}
MCC = function (tp, tn, fp, fn){
numerator = tp*tn-fp*fn
denumerator = (tp+fp) * (tp+fn) * (tn+fp) * (tn+fn)
return (numerator / sqrt(denumerator))
}
qualityPredict = function (predict, Y2){
print (as.vector(predict)[[1]])
print (as.vector(Y2)[[1]])
v_predict = calculTaux (as.vector(predict)[[1]], as.vector(Y2)[[1]])
print (paste ("accuracy : ", accuracy(v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("precision : ",precision(v_predict[1], v_predict[3]), sep = ""))
#print (paste ("recall : ", recall(v_predict[1], v_predict[4]), sep = ""))
print (paste ("sensibility : ", sensibility(v_predict[1], v_predict[4]), sep = ""))
print (paste ("specificity : ", sensibility(v_predict[2], v_predict[3]), sep = ""))
print (paste ("BCR (balanced classification rate) : ", BCR (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("BER (balanced error rate) : ", 1 - BCR (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("MCC (Matthew) : ", MCC (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
return (v_predict)
}
qualityPredictList = function (test_vector, real_vector){
v_predict = calculTaux (test_vector, real_vector)
print (paste ("accuracy : ", accuracy(v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("precision : ",precision(v_predict[1], v_predict[3]), sep = ""))
#print (paste ("recall : ", recall(v_predict[1], v_predict[4]), sep = ""))
print (paste ("sensibility : ", sensibility(v_predict[1], v_predict[4]), sep = ""))
print (paste ("specificity : ", sensibility(v_predict[2], v_predict[3]), sep = ""))
print (paste ("BCR (balanced classification rate) : ", BCR (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("BER (balanced error rate) : ", 1 - BCR (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("MCC (Matthew) : ", MCC (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
return (v_predict)
}
qualityShowModelSelection = function (list_des_model, coef, v_real_train, v_predict_train, v_real_test, v_predict_test, v_real_loo, v_predict_loo, l_out_CV){
# loo
criteria_loo = computedCriteria(v_real_loo, v_predict_loo)
# CV
criteria_CV = computedCriteriaCV(l_out_CV)
# train
criteria_train = computedCriteria(v_real_train, v_predict_train)
# test
criteria_test = computedCriteria(v_real_test, v_predict_test)
# show
print ("descriptor")
print (list_des_model)
print (as.vector(abs(coef[list_des_model])))
print ("Acc_loo --- Acc_train --- Acc_test --- Acc_CV_train --- SD_CV_train --- Acc_CV_test --- SD_CV_test")
print (paste (criteria_loo[[1]], criteria_train[[1]], criteria_test[[1]], criteria_CV["acc_train"], criteria_CV["acc_train_SD"], criteria_CV["acc_test"], criteria_CV["acc_test_SD"], sep = "---"))
print ("Se_loo --- Sp_loo --- Se_train --- Sp_train --- Se_test --- Sp_test --- Se_CV_train --- SD_CV_train --- Se_CV_test --- SD_CV_test --- Sp_CV_train --- SD_CV_train --- Sp_CV_test --- SD_CV_test")
print (paste (criteria_loo[[2]], criteria_loo[[3]], criteria_train[[2]], criteria_train[[3]], criteria_test[[2]], criteria_test[[3]], criteria_CV["se_train"], criteria_CV["se_train_SD"], criteria_CV["se_test"],criteria_CV["se_test_SD"], criteria_CV["sp_train"], criteria_CV["sp_train_SD"], criteria_CV["sp_test"],criteria_CV["sp_test_SD"], sep = "---"))
print ("MCC_loo --- MCC_train --- MCC_test --- MCC_CV_train --- SD_CV_train --- MCC_CV_test --- SD_CV_test")
print (paste (criteria_loo[[4]], criteria_train[[4]], criteria_test[[4]], criteria_CV["mcc_train"], criteria_CV["mcc_train_SD"], criteria_CV["mcc_test"], criteria_CV["mcc_test_SD"], sep = "---"))
print ("**********************************************************************")
}
classPerf = function (v_real, v_predict){
rate = perftable (v_predict, v_real)
acc = accuracy(rate[1], rate[2], rate[3], rate[4])
se = sensibility(rate[1], rate[4])
sp = sensibility(rate[2], rate[3])
mcc = MCC(rate[1], rate[2], rate[3], rate[4])
return (list (acc, se, sp, mcc))
}
computedCriteriaCV = function (l_out_CV){
CV_train = l_out_CV[[1]]
CV_test = l_out_CV[[2]]
v_acc_train = NULL
v_acc_test = NULL
v_se_train = NULL
v_se_test = NULL
v_sp_train = NULL
v_sp_test = NULL
v_mcc_train = NULL
v_mcc_test = NULL
for (i in seq (1,length (CV_train))){
v_acc_train = append (v_acc_train, CV_train[[i]][[1]])
v_acc_test = append (v_acc_test, CV_test[[i]][[1]])
v_se_train = append (v_se_train, CV_train[[i]][[2]])
v_se_test = append (v_se_test, CV_test[[i]][[2]])
v_sp_train = append (v_sp_train, CV_train[[i]][[3]])
v_sp_test = append (v_sp_test, CV_test[[i]][[3]])
v_mcc_train = append (v_mcc_train, CV_train[[i]][[4]])
v_mcc_test = append (v_mcc_test, CV_test[[i]][[4]])
}
v_out = c(mean (v_acc_train), sd (v_acc_train), mean (v_acc_test), sd (v_acc_test), mean (v_se_train), sd (v_se_train), mean (v_se_test), sd (v_se_test), mean (v_sp_train), sd (v_sp_train), mean (v_sp_test), sd (v_sp_test),mean (v_mcc_train), sd (v_mcc_train), mean (v_mcc_test), sd (v_mcc_test) )
names (v_out) = c("acc_train", "acc_train_SD","acc_test", "acc_test_SD","se_train", "se_train_SD", "se_test", "se_test_SD", "sp_train", "sp_train_SD", "sp_test", "sp_test_SD", "mcc_train", "mcc_train_SD", "mcc_test", "mcc_test_SD")
return (v_out)
}
cumulTaux = function (taux1, taux2){
tp = taux1[1] + taux2[1]
tn = taux1[2] + taux2[2]
fp = taux2[3]
fn = taux2[4]
print (paste ("accuracy : ", accuracy(tp, tn, fp, fn), sep = ""))
print (paste ("precision : ",precision(tp, fp), sep = ""))
#print (paste ("recall : ", recall(tp, fn), sep = ""))
print (paste ("sensibility : ", sensibility(tp, fn), sep = ""))
print (paste ("specificity : ", sensibility(tn, fp), sep = ""))
print (paste ("BCR (balanced classification rate) : ", BCR (tp, tn, fp, fn), sep = ""))
print (paste ("BER (balanced error rate) : ", 1 - BCR (tp, tn, fp, fn), sep = ""))
print (paste ("MCC (Matthew) : ", MCC (tp, tn, fp, fn), sep = ""))
}
# for ROC curve -> calcul vecteur prediction with probability (just for druggability)
generateVect = function(proba_out_predict, threshold){
proba_class1 = proba_out_predict[,1]
vect_out = NULL
for (proba in proba_class1){
if (proba > threshold){
vect_out = c(vect_out, "d")
}else{
vect_out = c(vect_out, "nd")
}
}
return (vect_out)
}
#########################
# PERF regression #
#########################
vrmsep = function(dreal, dpredict){
#dpredict = dpredict[rownames(dreal),]
i = 1
imax = length(dreal)
valout = 0
while(i <= imax){
valout = valout + ((dreal[i] - dpredict[i])^2)
i = i + 1
}
return(sqrt(valout))
}
calR2 = function(dreal, dpredict){
dreal = as.vector(dreal)
dpredict = as.vector(dpredict)
#print("Nb val in perf:")
#print(length(dreal))
dperf = cbind(dreal, dpredict)
dperf = na.omit(dperf)
#print("Nb val predict:")
#print(dim(dperf))
M = mean(dperf[,1])
SCEy = 0
SCEtot = 0
for (i in seq(1, dim(dperf)[1])){
#print (i)
SCEy = SCEy + (dperf[i, 1] - dperf[i, 2])*(dperf[i, 1] - dperf[i, 2])
SCEtot = SCEtot + (dperf[i, 1] - M)*(dperf[i, 1] - M)
}
r2 = 1 - (SCEy/SCEtot)
return (as.double(r2))
}
MAE = function(dreal, dpredict){
#dpredict = dpredict[rownames(dreal),]
i = 1
imax = length(dreal)
valout = 0
while(i <= imax){
valout = valout + (abs(dreal[i] - dpredict[i]))
i = i + 1
}
return(valout/imax)
}
R02 = function(dreal, dpredict){
dreal = as.vector(dreal)
dpredict = as.vector(dpredict)
#print("Nb val in perf:")
#print(length(dreal))
dperf = cbind(dreal, dpredict)
dperf = na.omit(dperf)
#print("Nb val predict:")
#print(dim(dperf))
Mreal = mean(dperf[,1])
Mpredict = mean(dperf[,2])
#print(paste("Mpred - ",Mpredict))
A = 0
B = 0
k = 0
yypred = 0
Sumpredict = 0
# first loop for k
for (i in seq(1, dim(dperf)[1])){
#print (i)
yypred = yypred + (dperf[i,1]*dperf[i,2])
Sumpredict = Sumpredict + (dperf[i,2]^2)
}
#print(yypred)
#print(Sumpredict)
k = yypred/Sumpredict
#print(paste("k - ", k))
for (i in seq(1, dim(dperf)[1])){
#print (i)
tempA = ((dperf[i,2]-(k*dperf[i,2]))^2)
tempB = ((dperf[i,2]-Mpredict)^2)
#print(paste(tempA, tempB))
A = A + ((dperf[i,2]-(k*dperf[i,2]))^2)
B = B + ((dperf[i,2]-Mpredict)^2)
}
#print(k)
#print(paste("A -", A))
#print(paste("B -",B))
r02 = as.double(A/B)
return (1 - r02)
}
| /Rscripts/performance.R | no_license | ABorrel/MDQSAR-imatinib | R | false | false | 9,923 | r |
#####################
# Perf for classes #
#####################
# change case of d or nd
perftable = function (list_predict, list_real){
nb_value = length (list_real)
i = 1
tp = 0
fp = 0
tn = 0
fn = 0
while(i <= nb_value ){
if (list_predict[i]==1){
if (list_predict[i] == list_real[i]){
tp = tp + 1
}else {
fp = fp + 1
}
}else{
if (list_predict[i] == list_real[i]){
tn = tn + 1
}else {
fn = fn + 1
}
}
i = i + 1
}
#print (paste ("TP : ", tp, sep = ""))
#print (paste ("TN : ", tn, sep = ""))
#print (paste ("FP : ", fp, sep = ""))
#print (paste ("FN : ", fn, sep = ""))
tableval = c(tp,tn,fp,fn)
return (tableval)
}
accuracy = function (tp, tn, fp, fn){
return ((tp + tn)/(tp + fp + tn +fn))
}
precision = function (tp, fp){
return (tp/(tp + fp))
}
recall = function (tp, fn){
return (tp/(tp + fn))
}
specificity = function (tn, fp){
return (tn/(tn + fp))
}
sensibility = function (tp, fn){
return (tp/(tp + fn))
}
BCR = function (tp, tn, fp, fn){
return (0.5*(tp/(tp+fn) + tn/(tn+fp)))
}
MCC = function (tp, tn, fp, fn){
numerator = tp*tn-fp*fn
denumerator = (tp+fp) * (tp+fn) * (tn+fp) * (tn+fn)
return (numerator / sqrt(denumerator))
}
qualityPredict = function (predict, Y2){
print (as.vector(predict)[[1]])
print (as.vector(Y2)[[1]])
v_predict = calculTaux (as.vector(predict)[[1]], as.vector(Y2)[[1]])
print (paste ("accuracy : ", accuracy(v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("precision : ",precision(v_predict[1], v_predict[3]), sep = ""))
#print (paste ("recall : ", recall(v_predict[1], v_predict[4]), sep = ""))
print (paste ("sensibility : ", sensibility(v_predict[1], v_predict[4]), sep = ""))
print (paste ("specificity : ", sensibility(v_predict[2], v_predict[3]), sep = ""))
print (paste ("BCR (balanced classification rate) : ", BCR (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("BER (balanced error rate) : ", 1 - BCR (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("MCC (Matthew) : ", MCC (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
return (v_predict)
}
qualityPredictList = function (test_vector, real_vector){
v_predict = calculTaux (test_vector, real_vector)
print (paste ("accuracy : ", accuracy(v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("precision : ",precision(v_predict[1], v_predict[3]), sep = ""))
#print (paste ("recall : ", recall(v_predict[1], v_predict[4]), sep = ""))
print (paste ("sensibility : ", sensibility(v_predict[1], v_predict[4]), sep = ""))
print (paste ("specificity : ", sensibility(v_predict[2], v_predict[3]), sep = ""))
print (paste ("BCR (balanced classification rate) : ", BCR (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("BER (balanced error rate) : ", 1 - BCR (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
print (paste ("MCC (Matthew) : ", MCC (v_predict[1], v_predict[2], v_predict[3], v_predict[4]), sep = ""))
return (v_predict)
}
qualityShowModelSelection = function (list_des_model, coef, v_real_train, v_predict_train, v_real_test, v_predict_test, v_real_loo, v_predict_loo, l_out_CV){
# loo
criteria_loo = computedCriteria(v_real_loo, v_predict_loo)
# CV
criteria_CV = computedCriteriaCV(l_out_CV)
# train
criteria_train = computedCriteria(v_real_train, v_predict_train)
# test
criteria_test = computedCriteria(v_real_test, v_predict_test)
# show
print ("descriptor")
print (list_des_model)
print (as.vector(abs(coef[list_des_model])))
print ("Acc_loo --- Acc_train --- Acc_test --- Acc_CV_train --- SD_CV_train --- Acc_CV_test --- SD_CV_test")
print (paste (criteria_loo[[1]], criteria_train[[1]], criteria_test[[1]], criteria_CV["acc_train"], criteria_CV["acc_train_SD"], criteria_CV["acc_test"], criteria_CV["acc_test_SD"], sep = "---"))
print ("Se_loo --- Sp_loo --- Se_train --- Sp_train --- Se_test --- Sp_test --- Se_CV_train --- SD_CV_train --- Se_CV_test --- SD_CV_test --- Sp_CV_train --- SD_CV_train --- Sp_CV_test --- SD_CV_test")
print (paste (criteria_loo[[2]], criteria_loo[[3]], criteria_train[[2]], criteria_train[[3]], criteria_test[[2]], criteria_test[[3]], criteria_CV["se_train"], criteria_CV["se_train_SD"], criteria_CV["se_test"],criteria_CV["se_test_SD"], criteria_CV["sp_train"], criteria_CV["sp_train_SD"], criteria_CV["sp_test"],criteria_CV["sp_test_SD"], sep = "---"))
print ("MCC_loo --- MCC_train --- MCC_test --- MCC_CV_train --- SD_CV_train --- MCC_CV_test --- SD_CV_test")
print (paste (criteria_loo[[4]], criteria_train[[4]], criteria_test[[4]], criteria_CV["mcc_train"], criteria_CV["mcc_train_SD"], criteria_CV["mcc_test"], criteria_CV["mcc_test_SD"], sep = "---"))
print ("**********************************************************************")
}
classPerf = function (v_real, v_predict){
rate = perftable (v_predict, v_real)
acc = accuracy(rate[1], rate[2], rate[3], rate[4])
se = sensibility(rate[1], rate[4])
sp = sensibility(rate[2], rate[3])
mcc = MCC(rate[1], rate[2], rate[3], rate[4])
return (list (acc, se, sp, mcc))
}
computedCriteriaCV = function (l_out_CV){
CV_train = l_out_CV[[1]]
CV_test = l_out_CV[[2]]
v_acc_train = NULL
v_acc_test = NULL
v_se_train = NULL
v_se_test = NULL
v_sp_train = NULL
v_sp_test = NULL
v_mcc_train = NULL
v_mcc_test = NULL
for (i in seq (1,length (CV_train))){
v_acc_train = append (v_acc_train, CV_train[[i]][[1]])
v_acc_test = append (v_acc_test, CV_test[[i]][[1]])
v_se_train = append (v_se_train, CV_train[[i]][[2]])
v_se_test = append (v_se_test, CV_test[[i]][[2]])
v_sp_train = append (v_sp_train, CV_train[[i]][[3]])
v_sp_test = append (v_sp_test, CV_test[[i]][[3]])
v_mcc_train = append (v_mcc_train, CV_train[[i]][[4]])
v_mcc_test = append (v_mcc_test, CV_test[[i]][[4]])
}
v_out = c(mean (v_acc_train), sd (v_acc_train), mean (v_acc_test), sd (v_acc_test), mean (v_se_train), sd (v_se_train), mean (v_se_test), sd (v_se_test), mean (v_sp_train), sd (v_sp_train), mean (v_sp_test), sd (v_sp_test),mean (v_mcc_train), sd (v_mcc_train), mean (v_mcc_test), sd (v_mcc_test) )
names (v_out) = c("acc_train", "acc_train_SD","acc_test", "acc_test_SD","se_train", "se_train_SD", "se_test", "se_test_SD", "sp_train", "sp_train_SD", "sp_test", "sp_test_SD", "mcc_train", "mcc_train_SD", "mcc_test", "mcc_test_SD")
return (v_out)
}
cumulTaux = function (taux1, taux2){
tp = taux1[1] + taux2[1]
tn = taux1[2] + taux2[2]
fp = taux2[3]
fn = taux2[4]
print (paste ("accuracy : ", accuracy(tp, tn, fp, fn), sep = ""))
print (paste ("precision : ",precision(tp, fp), sep = ""))
#print (paste ("recall : ", recall(tp, fn), sep = ""))
print (paste ("sensibility : ", sensibility(tp, fn), sep = ""))
print (paste ("specificity : ", sensibility(tn, fp), sep = ""))
print (paste ("BCR (balanced classification rate) : ", BCR (tp, tn, fp, fn), sep = ""))
print (paste ("BER (balanced error rate) : ", 1 - BCR (tp, tn, fp, fn), sep = ""))
print (paste ("MCC (Matthew) : ", MCC (tp, tn, fp, fn), sep = ""))
}
# for ROC curve -> calcul vecteur prediction with probability (just for druggability)
generateVect = function(proba_out_predict, threshold){
proba_class1 = proba_out_predict[,1]
vect_out = NULL
for (proba in proba_class1){
if (proba > threshold){
vect_out = c(vect_out, "d")
}else{
vect_out = c(vect_out, "nd")
}
}
return (vect_out)
}
#########################
# PERF regression #
#########################
vrmsep = function(dreal, dpredict){
#dpredict = dpredict[rownames(dreal),]
i = 1
imax = length(dreal)
valout = 0
while(i <= imax){
valout = valout + ((dreal[i] - dpredict[i])^2)
i = i + 1
}
return(sqrt(valout))
}
calR2 = function(dreal, dpredict){
dreal = as.vector(dreal)
dpredict = as.vector(dpredict)
#print("Nb val in perf:")
#print(length(dreal))
dperf = cbind(dreal, dpredict)
dperf = na.omit(dperf)
#print("Nb val predict:")
#print(dim(dperf))
M = mean(dperf[,1])
SCEy = 0
SCEtot = 0
for (i in seq(1, dim(dperf)[1])){
#print (i)
SCEy = SCEy + (dperf[i, 1] - dperf[i, 2])*(dperf[i, 1] - dperf[i, 2])
SCEtot = SCEtot + (dperf[i, 1] - M)*(dperf[i, 1] - M)
}
r2 = 1 - (SCEy/SCEtot)
return (as.double(r2))
}
MAE = function(dreal, dpredict){
#dpredict = dpredict[rownames(dreal),]
i = 1
imax = length(dreal)
valout = 0
while(i <= imax){
valout = valout + (abs(dreal[i] - dpredict[i]))
i = i + 1
}
return(valout/imax)
}
R02 = function(dreal, dpredict){
dreal = as.vector(dreal)
dpredict = as.vector(dpredict)
#print("Nb val in perf:")
#print(length(dreal))
dperf = cbind(dreal, dpredict)
dperf = na.omit(dperf)
#print("Nb val predict:")
#print(dim(dperf))
Mreal = mean(dperf[,1])
Mpredict = mean(dperf[,2])
#print(paste("Mpred - ",Mpredict))
A = 0
B = 0
k = 0
yypred = 0
Sumpredict = 0
# first loop for k
for (i in seq(1, dim(dperf)[1])){
#print (i)
yypred = yypred + (dperf[i,1]*dperf[i,2])
Sumpredict = Sumpredict + (dperf[i,2]^2)
}
#print(yypred)
#print(Sumpredict)
k = yypred/Sumpredict
#print(paste("k - ", k))
for (i in seq(1, dim(dperf)[1])){
#print (i)
tempA = ((dperf[i,2]-(k*dperf[i,2]))^2)
tempB = ((dperf[i,2]-Mpredict)^2)
#print(paste(tempA, tempB))
A = A + ((dperf[i,2]-(k*dperf[i,2]))^2)
B = B + ((dperf[i,2]-Mpredict)^2)
}
#print(k)
#print(paste("A -", A))
#print(paste("B -",B))
r02 = as.double(A/B)
return (1 - r02)
}
|
library(beepr)
library(data.table)
library(tau)
library(plyr)
source('~/makeNgrams.R')
badwords <- readLines("./CapstoneprojectData/final/en_US/profanity-words.txt")
badwords <- c(badwords, "fucking")
en_Twitter <- readLines("./CapstoneprojectData/final/en_US/en_US.twitter.txt", encoding = "UTF-8")
badwordIndexTwitter <- sapply(en_Twitter, function(text){
any(sapply(X = badwords, function(x) grepl(x, text)))
})
save(badwordIndexTwitter, file = "./CapstoneprojectData/final/en_US/badwordIndexTwitter.RData")
badwordIndexTwitter <- as.logical(badwordIndexTwitter)
rm(en_Twitter)
en_News <- readLines("./CapstoneprojectData/final/en_US/en_US.news.txt", encoding = "UTF-8")
badwordIndexNews <- sapply(en_News, function(text){
any(sapply(X = badwords, function(x) grepl(x, text)))
})
save(badwordIndexNews, file = "./CapstoneprojectData/final/en_US/badwordIndexNews.RData")
badwordIndexNews <- as.logical(badwordIndexNews)
rm(en_News)
en_Blogs <- readLines("./CapstoneprojectData/final/en_US/en_US.blogs.txt", encoding = "UTF-8")
badwordIndexBlogs <- sapply(en_Blogs, function(text){
any(sapply(X = badwords, function(x) grepl(x, text)))
})
save(badwordIndexBlogs, file = "./CapstoneprojectData/final/en_US/badwordIndexBlogs.RData")
badwordIndexBlogs <- as.logical(badwordIndexBlogs)
rm(en_Blogs)
# Skip-n-grams -------------------------------------------------------
# Twitter
load("./CapstoneprojectData/final/en_US/badwordIndexTwitter.RData")
en_Twitter <- readLines("./CapstoneprojectData/final/en_US/en_US.twitter.txt", encoding = "UTF-8")
en_Twitter_clean <- en_Twitter[!badwordIndexTwitter]
rm(en_Twitter)
gc()
load("./CapstoneprojectData/final/en_US/twitterTrainIndices.RData")
skipFiveGramsTwitter <- makeNgrams(en_Twitter_clean[twitterTrainIndices],
skip = T,
ngram = 5,
markSentences = F)
save(skipFiveGramsTwitter, file = "./CapstoneprojectData/final/en_US/skipFiveGramsTwitter_clean.RData")
rm(skipFiveGramsTwitter)
gc()
skipSixGramsTwitter <- makeNgrams(en_Twitter_clean[twitterTrainIndices],
skip = T,
ngram = 6,
markSentences = F)
save(skipSixGramsTwitter, file = "./CapstoneprojectData/final/en_US/skipSixGramsTwitter_clean.RData")
rm(skipSixGramsTwitter)
gc()
# News
load("./CapstoneprojectData/final/en_US/badwordIndexNews.RData")
en_News <- readLines("./CapstoneprojectData/final/en_US/en_US.news.txt", encoding = "UTF-8")
en_News_clean <- en_News[!badwordIndexNews]
rm(en_News)
gc()
skipFiveGramsNews <- makeNgrams(en_News_clean[newsTrainIndices],
skip = T,
ngram = 5,
markSentences = F)
save(skipFiveGramsNews, file = "./CapstoneprojectData/final/en_US/skipFiveGramsNews_clean.RData")
rm(skipFiveGramsNews)
gc()
skipSixGramsNews <- makeNgrams(en_News_clean[newsTrainIndices],
skip = T,
ngram = 6,
markSentences = F)
save(skipSixGramsNews, file = "./CapstoneprojectData/final/en_US/skipSixGramsNews_clean.RData")
rm(skipSixGramsNews)
gc()
# Blogs
load("./CapstoneprojectData/final/en_US/badwordIndexBlogs.RData")
badwordIndexBlogs <- as.logical(badwordIndexBlogs)
en_Blogs <- readLines("./CapstoneprojectData/final/en_US/en_US.blogs.txt", encoding = "UTF-8")
en_Blogs_clean <- en_Blogs[!badwordIndexBlogs]
rm(en_Blogs)
gc()
set.seed(1234)
blogsTrainIndices <- sample(seq_along(en_Blogs_clean),
size = round(0.6 * length(en_Blogs_clean)),
replace = F)
skipFiveGramsBlogs <- makeNgrams(en_Blogs_clean[blogsTrainIndices],
skip = T,
ngram = 5,
markSentences = F)
save(skipFiveGramsBlogs, file = "./CapstoneprojectData/final/en_US/skipFiveGramsBlogs_clean.RData")
rm(skipFiveGramsBlogs)
gc()
skipSixGramsBlogs <- makeNgrams(en_Blogs_clean[blogsTrainIndices],
skip = T,
ngram = 6,
markSentences = F)
save(skipSixGramsBlogs, file = "./CapstoneprojectData/final/en_US/skipSixGramsBlogs_clean.RData")
rm(skipSixGramsBlogs)
gc()
# Combine n-grams of the different sources ------------------------------------
load("./CapstoneprojectData/final/en_US/skipFiveGramsBlogs_clean.RData")
load("./CapstoneprojectData/final/en_US/skipFiveGramsNews_clean.RData")
load("./CapstoneprojectData/final/en_US/skipFiveGramsTwitter_clean.RData")
allSkipFiveGrams <- rbind.fill(skipFiveGramsBlogs, skipFiveGramsNews,
skipFiveGramsTwitter)
rm(skipFiveGramsBlogs, skipFiveGramsNews, skipFiveGramsTwitter)
gc()
allSkipFiveGrams <- data.table(allSkipFiveGrams)
allSkipFiveGrams <- allSkipFiveGrams[, lapply(.SD, sum), by = ngram]
save(allSkipFiveGrams, file = "./CapstoneprojectData/final/en_US/allSkipFiveGrams_clean.RData")
load("./CapstoneprojectData/final/en_US/skipSixGramsBlogs_clean.RData")
load("./CapstoneprojectData/final/en_US/skipSixGramsNews_clean.RData")
load("./CapstoneprojectData/final/en_US/skipSixGramsTwitter_clean.RData")
allSkipSixGrams <- rbind.fill(skipSixGramsBlogs, skipSixGramsNews,
skipSixGramsTwitter)
rm(skipSixGramsBlogs, skipSixGramsNews, skipSixGramsTwitter)
gc()
allSkipSixGrams <- data.table(allSkipSixGrams)
allSkipSixGrams <- allSkipSixGrams[, lapply(.SD, sum), by = ngram]
save(allSkipSixGrams, file = "./CapstoneprojectData/final/en_US/allSkipSixGrams_clean.RData")
| /Capstone create skip n-grams M.R | no_license | mveerara/JHU-Coursera-Capstone-final-project | R | false | false | 5,853 | r | library(beepr)
library(data.table)
library(tau)
library(plyr)
source('~/makeNgrams.R')
badwords <- readLines("./CapstoneprojectData/final/en_US/profanity-words.txt")
badwords <- c(badwords, "fucking")
en_Twitter <- readLines("./CapstoneprojectData/final/en_US/en_US.twitter.txt", encoding = "UTF-8")
badwordIndexTwitter <- sapply(en_Twitter, function(text){
any(sapply(X = badwords, function(x) grepl(x, text)))
})
save(badwordIndexTwitter, file = "./CapstoneprojectData/final/en_US/badwordIndexTwitter.RData")
badwordIndexTwitter <- as.logical(badwordIndexTwitter)
rm(en_Twitter)
en_News <- readLines("./CapstoneprojectData/final/en_US/en_US.news.txt", encoding = "UTF-8")
badwordIndexNews <- sapply(en_News, function(text){
any(sapply(X = badwords, function(x) grepl(x, text)))
})
save(badwordIndexNews, file = "./CapstoneprojectData/final/en_US/badwordIndexNews.RData")
badwordIndexNews <- as.logical(badwordIndexNews)
rm(en_News)
en_Blogs <- readLines("./CapstoneprojectData/final/en_US/en_US.blogs.txt", encoding = "UTF-8")
badwordIndexBlogs <- sapply(en_Blogs, function(text){
any(sapply(X = badwords, function(x) grepl(x, text)))
})
save(badwordIndexBlogs, file = "./CapstoneprojectData/final/en_US/badwordIndexBlogs.RData")
badwordIndexBlogs <- as.logical(badwordIndexBlogs)
rm(en_Blogs)
# Skip-n-grams -------------------------------------------------------
# Twitter
load("./CapstoneprojectData/final/en_US/badwordIndexTwitter.RData")
en_Twitter <- readLines("./CapstoneprojectData/final/en_US/en_US.twitter.txt", encoding = "UTF-8")
en_Twitter_clean <- en_Twitter[!badwordIndexTwitter]
rm(en_Twitter)
gc()
load("./CapstoneprojectData/final/en_US/twitterTrainIndices.RData")
skipFiveGramsTwitter <- makeNgrams(en_Twitter_clean[twitterTrainIndices],
skip = T,
ngram = 5,
markSentences = F)
save(skipFiveGramsTwitter, file = "./CapstoneprojectData/final/en_US/skipFiveGramsTwitter_clean.RData")
rm(skipFiveGramsTwitter)
gc()
skipSixGramsTwitter <- makeNgrams(en_Twitter_clean[twitterTrainIndices],
skip = T,
ngram = 6,
markSentences = F)
save(skipSixGramsTwitter, file = "./CapstoneprojectData/final/en_US/skipSixGramsTwitter_clean.RData")
rm(skipSixGramsTwitter)
gc()
# News
load("./CapstoneprojectData/final/en_US/badwordIndexNews.RData")
en_News <- readLines("./CapstoneprojectData/final/en_US/en_US.news.txt", encoding = "UTF-8")
en_News_clean <- en_News[!badwordIndexNews]
rm(en_News)
gc()
skipFiveGramsNews <- makeNgrams(en_News_clean[newsTrainIndices],
skip = T,
ngram = 5,
markSentences = F)
save(skipFiveGramsNews, file = "./CapstoneprojectData/final/en_US/skipFiveGramsNews_clean.RData")
rm(skipFiveGramsNews)
gc()
skipSixGramsNews <- makeNgrams(en_News_clean[newsTrainIndices],
skip = T,
ngram = 6,
markSentences = F)
save(skipSixGramsNews, file = "./CapstoneprojectData/final/en_US/skipSixGramsNews_clean.RData")
rm(skipSixGramsNews)
gc()
# Blogs
load("./CapstoneprojectData/final/en_US/badwordIndexBlogs.RData")
badwordIndexBlogs <- as.logical(badwordIndexBlogs)
en_Blogs <- readLines("./CapstoneprojectData/final/en_US/en_US.blogs.txt", encoding = "UTF-8")
en_Blogs_clean <- en_Blogs[!badwordIndexBlogs]
rm(en_Blogs)
gc()
set.seed(1234)
blogsTrainIndices <- sample(seq_along(en_Blogs_clean),
size = round(0.6 * length(en_Blogs_clean)),
replace = F)
skipFiveGramsBlogs <- makeNgrams(en_Blogs_clean[blogsTrainIndices],
skip = T,
ngram = 5,
markSentences = F)
save(skipFiveGramsBlogs, file = "./CapstoneprojectData/final/en_US/skipFiveGramsBlogs_clean.RData")
rm(skipFiveGramsBlogs)
gc()
skipSixGramsBlogs <- makeNgrams(en_Blogs_clean[blogsTrainIndices],
skip = T,
ngram = 6,
markSentences = F)
save(skipSixGramsBlogs, file = "./CapstoneprojectData/final/en_US/skipSixGramsBlogs_clean.RData")
rm(skipSixGramsBlogs)
gc()
# Combine n-grams of the different sources ------------------------------------
load("./CapstoneprojectData/final/en_US/skipFiveGramsBlogs_clean.RData")
load("./CapstoneprojectData/final/en_US/skipFiveGramsNews_clean.RData")
load("./CapstoneprojectData/final/en_US/skipFiveGramsTwitter_clean.RData")
allSkipFiveGrams <- rbind.fill(skipFiveGramsBlogs, skipFiveGramsNews,
skipFiveGramsTwitter)
rm(skipFiveGramsBlogs, skipFiveGramsNews, skipFiveGramsTwitter)
gc()
allSkipFiveGrams <- data.table(allSkipFiveGrams)
allSkipFiveGrams <- allSkipFiveGrams[, lapply(.SD, sum), by = ngram]
save(allSkipFiveGrams, file = "./CapstoneprojectData/final/en_US/allSkipFiveGrams_clean.RData")
load("./CapstoneprojectData/final/en_US/skipSixGramsBlogs_clean.RData")
load("./CapstoneprojectData/final/en_US/skipSixGramsNews_clean.RData")
load("./CapstoneprojectData/final/en_US/skipSixGramsTwitter_clean.RData")
allSkipSixGrams <- rbind.fill(skipSixGramsBlogs, skipSixGramsNews,
skipSixGramsTwitter)
rm(skipSixGramsBlogs, skipSixGramsNews, skipSixGramsTwitter)
gc()
allSkipSixGrams <- data.table(allSkipSixGrams)
allSkipSixGrams <- allSkipSixGrams[, lapply(.SD, sum), by = ngram]
save(allSkipSixGrams, file = "./CapstoneprojectData/final/en_US/allSkipSixGrams_clean.RData")
|
# Get the main polygons, will determine by area.
getSmallPolys <- function(poly, minarea=0.01) {
# Get the areas
areas <- lapply(poly@polygons,
function(x) sapply(x@Polygons, function(y) y@area))
# Quick summary of the areas
print(quantile(unlist(areas)))
# Which are the big polygons?
bigpolys <- lapply(areas, function(x) which(x > minarea))
length(unlist(bigpolys))
# Get only the big polygons and extract them
for(i in 1:length(bigpolys)){
if(length(bigpolys[[i]]) >= 1 && bigpolys[[i]] >= 1){
poly@polygons[[i]]@Polygons <- poly@polygons[[i]]@Polygons[bigpolys[[i]]]
poly@polygons[[i]]@plotOrder <- 1:length(poly@polygons[[i]]@Polygons)
}
}
return(poly)
}
| /reduce_shape_file.R | permissive | lucyokell/pdmc_model | R | false | false | 759 | r |
# Get the main polygons, will determine by area.
getSmallPolys <- function(poly, minarea=0.01) {
# Get the areas
areas <- lapply(poly@polygons,
function(x) sapply(x@Polygons, function(y) y@area))
# Quick summary of the areas
print(quantile(unlist(areas)))
# Which are the big polygons?
bigpolys <- lapply(areas, function(x) which(x > minarea))
length(unlist(bigpolys))
# Get only the big polygons and extract them
for(i in 1:length(bigpolys)){
if(length(bigpolys[[i]]) >= 1 && bigpolys[[i]] >= 1){
poly@polygons[[i]]@Polygons <- poly@polygons[[i]]@Polygons[bigpolys[[i]]]
poly@polygons[[i]]@plotOrder <- 1:length(poly@polygons[[i]]@Polygons)
}
}
return(poly)
}
|
#Function that generates a new X
#New X is chosen by first selecting k photos from the first capture occasion.
#These photos are then randomly assigned to a different individual
#This is repeated for each capture occasion
new_X<-function(X,k){
prev.ind<-nrow(X.MH[[i-1]]) #Computes the number of individuals in current X
#Computes the maximum number of photos per an individual on an occasion
if(is.matrix(X.MH[[i-1]])==TRUE){max.photo<-1
}else{max.photo<-length(X.MH[[i-1]][1,1,])}
#Augment current X array by appending empty space for k photos for each individual on each occasion
canidate.X.tmp<-abind(X.MH[[i-1]],array(NA,dim=c(prev.ind,t,k)))
#Augment current X array by appending empty space for t*k individuals on each occasion
canidate.X<-array(NA,dim=c(prev.ind+t*k,t,max.photo+k))
new.indiv<-matrix(NA,ncol=t,nrow=t*k)
for(j in 1:(max.photo+k)){
canidate.X[,,j]<-rbind(canidate.X.tmp[,,j],new.indiv)
}
#For each capture occasion choose k photos and randomly assign to a different individual
for(j in 1:t){
#choose removal location for k photos and remove those photos
remove.location<-as.vector(sample(which(!is.na(canidate.X[,j,])==FALSE),k))
remove.photos<-canidate.X[,j,][remove.location]
canidate.X[,j,][remove.location]<-NA
#Relocate the k photos
for(l in 1:k){
#sample individual and place photo
exit='F'
tmp.indiv<-sample(1:prev.ind+t*k, 1)
for(m in 1:(max.photo+k)){
if(is.na(canidate.X[tmp.indiv,j,m])==TRUE && exit=='F'){
canidate.X[tmp.indiv,j,m]<-remove.photos[l]
exit='T'
}
}
}
}
#Remove individuals from array that are all NA, ie have no photos
keep<-c(1:nrow(canidate.X))
for(j in 1:nrow(canidate.X)){
if(sum(canidate.X[j,,]!='NA',na.rm = TRUE)==0){keep[j]=NA}
}
keep<-keep[-which(is.na(keep))]
canidate.X=canidate.X[keep,,]
#Remove matrices from 3rd dimension of array that are all NA
keep<-c(1:(max.photo+k))
for (j in 1:(max.photo+k)){
if(sum(canidate.X[,,j]!='NA',na.rm = TRUE)==0){keep[j]=NA}
}
keep<-keep[-which(is.na(keep))]
canidate.X=canidate.X[,,keep]
} | /Code/new_X.R | no_license | AmandaEllis/Sampler_Project | R | false | false | 2,176 | r | #Function that generates a new X
#New X is chosen by first selecting k photos from the first capture occasion.
#These photos are then randomly assigned to a different individual
#This is repeated for each capture occasion
new_X<-function(X,k){
prev.ind<-nrow(X.MH[[i-1]]) #Computes the number of individuals in current X
#Computes the maximum number of photos per an individual on an occasion
if(is.matrix(X.MH[[i-1]])==TRUE){max.photo<-1
}else{max.photo<-length(X.MH[[i-1]][1,1,])}
#Augment current X array by appending empty space for k photos for each individual on each occasion
canidate.X.tmp<-abind(X.MH[[i-1]],array(NA,dim=c(prev.ind,t,k)))
#Augment current X array by appending empty space for t*k individuals on each occasion
canidate.X<-array(NA,dim=c(prev.ind+t*k,t,max.photo+k))
new.indiv<-matrix(NA,ncol=t,nrow=t*k)
for(j in 1:(max.photo+k)){
canidate.X[,,j]<-rbind(canidate.X.tmp[,,j],new.indiv)
}
#For each capture occasion choose k photos and randomly assign to a different individual
for(j in 1:t){
#choose removal location for k photos and remove those photos
remove.location<-as.vector(sample(which(!is.na(canidate.X[,j,])==FALSE),k))
remove.photos<-canidate.X[,j,][remove.location]
canidate.X[,j,][remove.location]<-NA
#Relocate the k photos
for(l in 1:k){
#sample individual and place photo
exit='F'
tmp.indiv<-sample(1:prev.ind+t*k, 1)
for(m in 1:(max.photo+k)){
if(is.na(canidate.X[tmp.indiv,j,m])==TRUE && exit=='F'){
canidate.X[tmp.indiv,j,m]<-remove.photos[l]
exit='T'
}
}
}
}
#Remove individuals from array that are all NA, ie have no photos
keep<-c(1:nrow(canidate.X))
for(j in 1:nrow(canidate.X)){
if(sum(canidate.X[j,,]!='NA',na.rm = TRUE)==0){keep[j]=NA}
}
keep<-keep[-which(is.na(keep))]
canidate.X=canidate.X[keep,,]
#Remove matrices from 3rd dimension of array that are all NA
keep<-c(1:(max.photo+k))
for (j in 1:(max.photo+k)){
if(sum(canidate.X[,,j]!='NA',na.rm = TRUE)==0){keep[j]=NA}
}
keep<-keep[-which(is.na(keep))]
canidate.X=canidate.X[,,keep]
} |
################################ USER INPUTS #################################################
Gridmet <- read.csv("data/park-specific/input/GridMet.csv",header=T)
file <- list.files(path = './data/park-specific/output', pattern = 'Final_Environment.RData', full.names = TRUE)
load(file)
colors3<-c("white",colors2)
if(dir.exists('./figures/additional') == FALSE){
dir.create('./figures/additional')
}
OutDir<-("./figures/additional")
################################ END USER INPUTS #############################################
############################### FORMAT DATAFRAMES ############################################
# Gridmet
Gridmet$Date<-ymd(Gridmet$Date)
Gridmet$Month<-format(Gridmet$Date,format="%m")
Gridmet$Year<-format(Gridmet$Date,format="%Y")
Gridmet$TmeanC<-(((Gridmet$tmax+Gridmet$tmin)/2)-32)*5/9
Gridmet$Pr_mm<-Gridmet$precip*25.4
d<-aggregate(Pr_mm~Month+Year,Gridmet,sum)
d2<-aggregate(TmeanC~Month+Year,Gridmet,mean)
drt<-merge(d,d2,by=c("Month","Year"));rm(d,d2)
drt<-drt[with(drt, order(Year, Month)),]
drt$PET<-thornthwaite(drt$TmeanC,lat = Lat)
# Run SPEI on gridmet
tp<-ts(drt$Pr_mm,frequency=12,start=c(1979,1))
tpet<-ts(drt$PET,frequency=12,start=c(1979,1))
SPEI<-spei(tp - tpet, SPEI_per)
PlotName <- "Gridmet-SPEI"
plot1 <- paste('./figures/additional/', PlotName)
jpeg(paste(plot1, ".jpg", sep = ""), width = 350, height = 350)
plot(x=SPEI,main="Gridmet") #eventually prob want to figure out how to make x-axis date
dev.off()
drt$SPEI<-SPEI$fitted;drt$SPEI[which(is.na(drt$SPEI))]<-0 #records used to normalize data are NAs - convert to 0s
names(drt)[6]<-"SPEI"
drt3<-aggregate(cbind(Pr_mm,SPEI)~Year,drt,mean)
# # MACA This step only needed if historical GCMs don't have RCPs pasted on end
# AH<-ALL_HIST
# ALL_HIST$GCM<-paste(ALL_HIST$GCM,"rcp45",sep=".")
# AH$GCM<-paste(AH$GCM,"rcp85",sep=".")
# ALL_HIST<-rbind(ALL_HIST,AH); rm(AH)
H<-subset(ALL_HIST,GCM %in% WB_GCMs,select=c(Date,GCM,PrecipCustom,TavgCustom))
F<-subset(ALL_FUTURE, GCM %in% WB_GCMs, select=c(Date,GCM,PrecipCustom,TavgCustom))
ALL<-rbind(H,F)
ALL$Month<-format(ALL$Date,format="%m")
ALL$Year<-format(ALL$Date,format="%Y")
ALL$Pr_mm<-ALL$PrecipCustom*25.4
ALL$TmeanC<-(ALL$TavgCustom-32)*5/9
M<-aggregate(Pr_mm~Month+Year+GCM,ALL,sum)
Mon<-aggregate(TmeanC~Month+Year+GCM,ALL,mean)
Mon<-merge(Mon,M,by=c("Month","Year","GCM"));rm(M)
Mon$PET<-thornthwaite(Mon$TmeanC,lat=Lat)
Mon<-merge(Mon,CF_GCM,by="GCM")
Mon$CF<-factor(Mon$CF,levels=unique(Mon$CF))
MON<-aggregate(cbind(Pr_mm,PET)~Month+Year+CF,Mon,mean)
MON<-MON[with(MON, order(CF,Year, Month)),]
CF.split<-split(MON,MON$CF) #Splits df into array by CF
# this step is done because each CF has unique historical record and SPEI normalized to average conditions at beginning of record
for (i in 1:length(CF.split)){
name=names(CF.split)[i]
t<-CF.split[[i]]
tp<-ts(t$Pr_mm,frequency=12,start=c(SPEI_start,1))
tpet<-ts(t$PET,frequency=12,start=c(SPEI_start,1))
SPEI<-spei(tp-tpet,SPEI_per,ref.start=c(SPEI_start,1),ref.end=c(SPEI_end,12))
CF.split[[i]]$SPEI <- SPEI$fitted[1:length(SPEI$fitted)]
# Plot each CF
plot <- paste('./figures/additional/', name)
jpeg(paste(plot,"-SPEI.jpg",sep=""), width = 350, height = 350)
plot(x=SPEI,main=name) #eventually prob want to figure out how to make x-axis date
dev.off()
}
all2<- ldply(CF.split, data.frame) #convert back to df
all2$SPEI[which(is.na(all2$SPEI))]<-0 #records used to normalize data are NAs - convert to 0s
all2$SPEI[which(is.infinite(all2$SPEI))]<- -5 #getting some -Inf values that are large jumps, temp fix
#
# all3<-subset(all2,Month==9) #Because we aggregated drought years as only applying to growing season
# # If you are doing for place where winter drought would be important, use following line
all3<-aggregate(cbind(Pr_mm,SPEI)~Year+CF,all2,mean)
###################################### PLOT ANNUAL TIME-SERIES #################################################
############################################# Plotting ###########################################################
PlotTheme = theme(axis.text=element_text(size=20), #Text size for axis tick mark labels
axis.title.x=element_blank(), #Text size and alignment for x-axis label
axis.title.y=element_text(size=24, vjust=0.5, margin=margin(t=20, r=20, b=20, l=20)), #Text size and alignment for y-axis label
plot.title=element_text(size=26,face="bold",hjust=0.5, margin=margin(t=20, r=20, b=20, l=20)), #Text size and alignment for plot title
legend.title=element_text(size=24), #Text size of legend category labels
legend.text=element_text(size=22), #Text size of legend title
legend.position = "bottom",
panel.background = element_blank(), #Background white
panel.grid.major = element_line("light grey",0.3)) #add grid back
BarPlotTheme = theme(axis.text.x=element_text(size=24), #Text size for axis tick mark labels
axis.text.y=element_text(size=20),
axis.title.x=element_blank(), #Text size and alignment for x-axis label
axis.title.y=element_text(size=24, vjust=0.5, margin=margin(t=20, r=20, b=20, l=20)), #Text size and alignment for y-axis label
plot.title=element_text(size=26,face="bold",hjust=0.5, margin=margin(t=20, r=20, b=20, l=20)), #Text size and alignment for plot title
legend.position = "none")
#Height and width
PlotWidth = 15
PlotHeight = 9
# Gridmet
drt3$col[drt3$SPEI>=0]<-"wet"
drt3$col[drt3$SPEI<0]<-"dry"
drt3$col<-factor(drt3$col, levels=c("wet","dry"))
ggplot(data = drt3, aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = "SPEI values for Historical Period (gridMET)",
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave("Recent Drought.png", path = './figures/additional', width = 18, height = 9)
# MACA prep dataframe
all3$col[all3$SPEI>=0]<-"wet"
all3$col[all3$SPEI<0]<-"dry"
all3$col<-factor(all3$col, levels=c("wet","dry"))
all3$Year<-as.numeric(all3$Year)
# CF
CF1<-subset(all3, CF %in% CFs[1] )
grid.append<-drt3; grid.append$CF<-CFs[1]
grid.append<-subset(grid.append, select=c(Year,CF,Pr_mm:col))
grid.append<-rbind(grid.append, subset(CF1,Year>=2020 & Year < 2070))
ggplot(data = subset(CF1,Year>=2025&Year<2056), aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_rect(xmin=2025, xmax=2055, ymin=-Inf, ymax=Inf, alpha=0.1, fill="darkgray", col="darkgray") +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = paste("SPEI values for", CFs[1], "climate future", sep = " " ),
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave(paste(CFs[1], "Drought.png",sep=" "), path = './figures/additional', width = 18, height = 9)
ggplot(data = grid.append, aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_rect(xmin=2025, xmax=2055, ymin=-Inf, ymax=Inf, alpha=0.1, fill="darkgray", col="darkgray") +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = paste("SPEI values for", CFs[1], "(Gridmet + MACA)", sep = " " ),
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave(paste(CFs[1], "Drought+Gridmet.png",sep=" "), path = './figures/additional', width = 18, height = 9)
# CF 2
CF2<-subset(all3, CF %in% CFs[2] )
grid.append<-drt3; grid.append$CF<-CFs[2]
grid.append<-subset(grid.append, select=c(Year,CF,Pr_mm:col))
grid.append<-rbind(grid.append, subset(CF2,Year>=2020 & Year < 2070))
ggplot(data = subset(CF2,Year>=2025&Year<2056), aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_rect(xmin=2025, xmax=2055, ymin=-Inf, ymax=Inf, alpha=0.1, fill="darkgray", col="darkgray") +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = paste("SPEI values for", CFs[2], "climate future", sep = " " ),
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave(paste(CFs[2], "Drought.png",sep=" "), path = './figures/additional', width = 18, height = 9)
ggplot(data = grid.append, aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_rect(xmin=2025, xmax=2055, ymin=-Inf, ymax=Inf, alpha=0.1, fill="darkgray", col="darkgray") +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = paste("SPEI values for", CFs[2], "(Gridmet + MACA)", sep = " " ),
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave(paste(CFs[2], "Drought+Gridmet.png",sep=" "), path = './figures/additional', width = 18, height = 9)
# Split into periods
Historical2<-subset(all3, Year >= 1950 & Year <2000)
min(Historical2$SPEI)
Future2<-subset(all3, Year >= 2025 & Year <2056)
min(Future2$SPEI)
# Calculate drought characteristics
Historical2$Drought=0
Historical2$Drought[which(Historical2$SPEI < truncation)] <- 1
# Drought Duration calculation
# 1 Create var for beginnign drought and var for end drought, then count months between
head(Historical2)
# Create count of years within CF
length(Historical2$CF)/length(unique(Historical2$CF))
Historical2$count<-rep(seq(1, length(Historical2$CF)/length(unique(Historical2$CF)) # 50=# years in historical period
, 1),length(unique(Historical2$CF))) # 4=repeat # of CFs
Historical2$length<-0
Historical2$length <- Historical2$Drought * unlist(lapply(rle(Historical2$Drought)$lengths, seq_len))
mean(Historical2$length[Historical2$length>0])
# To get duration, now just remove those that are not droughts and do calculations on length
# Give each drought period an ID
D<-which(Historical2$length==1)
HistoricalDrought<-data.frame()
HistoricalDrought<-setNames(data.frame(matrix(ncol=10,nrow=length(D))),c("DID","Start","End","Year","per","CF","duration","severity","peak","freq"))
HistoricalDrought$Start = Sys.time(); HistoricalDrought$End = Sys.time()
HistoricalDrought$per<-as.factor("H")
# Calculate variables for each drought period
for (i in 1:length(D)){
HistoricalDrought$DID[i]<-i
HistoricalDrought$Start[i]<-strptime(Historical2$Date[D[i]],format="%Y-%m-%d",tz="MST")
HistoricalDrought$Year[i]<-Historical2$Year[D[i]]
}
ND<- which((Historical2$length == 0) * unlist(lapply(rle(Historical2$length)$lengths, seq_len)) == 1)
if(ND[1]==1) ND<-ND[2:length(ND)]
if(Historical2$Drought[length(Historical2$Drought)]==1) ND[length(ND)+1]<-length(Historical2$length)
###### !!!!!!!!!!!
# If last row in drought df is a drought period - use next line of code. Otherwies proceed.
# ND[length(ND)+1]<-length(Historical2$length) #had to add this step because last drought went until end of df so no end in ND
#Duration # months SPEI < truncation; Severity # Sum(SPEI) when SPEI < truncation; Peak # min(SPEI) when SPEI < truncation
for (i in 1:length(ND)){
HistoricalDrought$CF[i]<-as.character(Historical2$CF[D[i]])
HistoricalDrought$End[i]<-strptime(Historical2$Date[ND[i]],format="%Y-%m-%d",tz="MST")
HistoricalDrought$duration[i]<-Historical2$length[ND[i]-1]
HistoricalDrought$severity[i]<-sum(Historical2$SPEI[D[i]:(ND[i]-1)])
HistoricalDrought$peak[i]<-min(Historical2$SPEI[D[i]:(ND[i]-1)])
}
HistoricalDrought$CF<-factor(HistoricalDrought$CF, levels=levels(Historical2$CF))
## Freq
CF.split<-split(Historical2,Historical2$CF)
for (i in 1:length(CF.split)){
name=as.character(unique(CF.split[[i]]$CF))
d<-which(CF.split[[i]]$length==1)
nd<-which((CF.split[[i]]$length == 0) * unlist(lapply(rle(CF.split[[i]]$length)$lengths, seq_len)) == 1)
if(length(nd)>length(d)) {nd=nd[2:length(nd)]}
for (j in 1:length(d)){
HistoricalDrought$freq[which(HistoricalDrought$CF==name & HistoricalDrought$Year==CF.split[[i]]$Year[d[j]])] <-
CF.split[[i]]$count[d[j+1]]-CF.split[[i]]$count[nd[j]]
}
}
####### Future
# Calculate drought characteristics
Future2$Drought=0
Future2$Drought[which(Future2$SPEI < truncation)] <- 1
# Drought Duration calculation
# 1 Create var for beginnign drought and var for end drought, then count months between
head(Future2)
# Create count of months within CF
length(Future2$CF)/length(unique(Future2$CF))
Future2$count<-rep(seq(1, length(Future2$CF)/length(unique(Future2$CF)),
1),length(unique(Future2$CF))) # repeat # of CFs
Future2$length<-0
Future2$length <- Future2$Drought * unlist(lapply(rle(Future2$Drought)$lengths, seq_len))
mean(Future2$length[Future2$length>0])
# To get duration, now just remove those that are not droughts and do calculations on length
# Give each drought period an ID
D<-which(Future2$length==1)
FutureDrought<-data.frame()
FutureDrought<-setNames(data.frame(matrix(ncol=10,nrow=length(D))),c("DID","Start","End","Year","per","CF","duration","severity","peak","freq"))
FutureDrought$Start = Sys.time(); FutureDrought$End = Sys.time()
FutureDrought$per<-as.factor("F")
# Calculate variables for each drought period
for (i in 1:length(D)){
FutureDrought$DID[i]<-i
FutureDrought$Start[i]<-strptime(Future2$Date[D[i]],format="%Y-%m-%d",tz="MST")
FutureDrought$Year[i]<-Future2$Year[D[i]]
}
ND<- which((Future2$length == 0) * unlist(lapply(rle(Future2$length)$lengths, seq_len)) == 1)
if(ND[1]==1) ND<-ND[2:length(ND)]
if(Future2$Drought[length(Future2$Drought)]==1) ND[length(ND)+1]<-length(Future2$length)
#Duration # months SPEI < truncation; Severity # Sum(SPEI) when SPEI < truncation; Peak # min(SPEI) when SPEI < truncation
for (i in 1:length(ND)){
FutureDrought$CF[i]<-as.character(Future2$CF[D[i]])
FutureDrought$End[i]<-strptime(Future2$Date[ND[i]],format="%Y-%m-%d",tz="MST")
FutureDrought$duration[i]<-Future2$length[ND[i]-1]
FutureDrought$severity[i]<-sum(Future2$SPEI[D[i]:(ND[i]-1)])
FutureDrought$peak[i]<-min(Future2$SPEI[D[i]:(ND[i]-1)])
}
FutureDrought$CF<-as.factor(FutureDrought$CF)
## Freq
CF.split<-split(Future2,Future2$CF)
for (i in 1:length(CF.split)){
name=as.character(unique(CF.split[[i]]$CF))
d<-which(CF.split[[i]]$length==1)
nd<-which((CF.split[[i]]$length == 0) * unlist(lapply(rle(CF.split[[i]]$length)$lengths, seq_len)) == 1)
if(length(nd)>length(d)) {nd=nd[2:length(nd)]}
for (j in 1:length(d)){
FutureDrought$freq[which(FutureDrought$CF==name & FutureDrought$Year==CF.split[[i]]$Year[d[j]])] <-
CF.split[[i]]$count[d[j+1]]-CF.split[[i]]$count[nd[j]]
}
}
head(HistoricalDrought)
head(FutureDrought)
Drought<-rbind(HistoricalDrought,FutureDrought)
write.csv(Drought,"./data/park-specific/output/Drt.all.csv",row.names=FALSE) # csv with all drought events
Hist_char<-setNames(data.frame(matrix(ncol=6,nrow=length(levels(HistoricalDrought$CF)))),c("CF","per","Duration","Severity","Intensity","Frequency"))
Hist_char$CF<-levels(HistoricalDrought$CF)
Hist_char$per<-"H"
for (i in 1:length(Hist_char$CF)){
name<-Hist_char$CF[i]
Hist_char$Frequency[i]<-mean(HistoricalDrought$freq[which(HistoricalDrought$CF == name)],na.rm=TRUE)
Hist_char$Duration[i]<-mean(HistoricalDrought$duration[which(HistoricalDrought$CF == name)])
Hist_char$Severity[i]<-mean(HistoricalDrought$severity[which(HistoricalDrought$CF == name)])
Hist_char$Intensity[i]<-mean(HistoricalDrought$peak[which(HistoricalDrought$CF == name)])
}
Drought_char<-setNames(data.frame(matrix(ncol=6,nrow=length(levels(FutureDrought$CF)))),c("CF","per","Duration","Severity","Intensity","Frequency"))
Drought_char$CF<-levels(FutureDrought$CF)
Drought_char$per<-"F"
for (i in 1:length(Drought_char$CF)){
name<-Drought_char$CF[i]
Drought_char$Frequency[i]<-mean(FutureDrought$freq[which(FutureDrought$CF == name)],na.rm=TRUE)
Drought_char$Duration[i]<-mean(FutureDrought$duration[which(FutureDrought$CF == name)])
Drought_char$Severity[i]<-mean(FutureDrought$severity[which(FutureDrought$CF == name)])
Drought_char$Intensity[i]<-mean(FutureDrought$peak[which(FutureDrought$CF == name)])
}
Drought_char<-rbind(Hist_char,Drought_char)
# csv for averages for each CF for hist and future periods
write.csv(Drought_char,"./data/park-specific/output/Drought_char.csv",row.names=FALSE)
########################################### BAR PLOTS ###############################################
#Drought duration barplot
Drought_char_H = subset(Drought_char, per == "H")
Drought_char_F = subset(Drought_char, per == "F")
Drought_char_H$CF<-"Historical"
DroughtH = aggregate(cbind(Duration,Severity,Intensity,Frequency)~CF+per,Drought_char_H,mean, na.rm=TRUE)
Drought_all = rbind(DroughtH, Drought_char_F)
Drought_all$CF = factor(Drought_all$CF, levels = c("Historical",CFs))
#Change NaN's to 0's
Drought_char_H[is.na(Drought_char_H) == TRUE] = 0
Drought_delta = data.frame(CF = Drought_char_H$CF)
Drought_delta$Duration = Drought_char_F$Duration - Drought_char_H$Duration
Drought_delta$Severity = Drought_char_F$Severity - Drought_char_H$Severity
Drought_delta$Intensity = Drought_char_F$Intensity - Drought_char_H$Intensity
Drought_delta$Frequency = Drought_char_F$Frequency - Drought_char_H$Frequency
Drought_delta$CF = factor(Drought_delta$CF, levels = c(CFs))
#Drought duraton barplot
ggplot(Drought_all, aes(x=CF, y=as.numeric(Duration), fill=CF)) + geom_bar(stat="identity", col="black") +
scale_y_continuous() +
labs(x="", y="Years",
title=paste(SiteID, "- Average Drought Duration")) +
scale_fill_manual(values = colors3) +
BarPlotTheme
ggsave(paste(SiteID, "Duration.png"), path = './figures/additional', height=PlotHeight, width=PlotWidth, dpi=600)
#Drought severity barplot
ggplot(Drought_all, aes(x=CF, y=as.numeric(Severity), fill=CF)) + geom_bar(stat="identity", col="black") +
scale_y_continuous() +
labs(x="", y="Severity (SPEI * duration)",
title=paste(SiteID, "- Average Drought Severity")) +
scale_fill_manual(values = colors3) +
BarPlotTheme
ggsave(paste(SiteID, "Severity.png"), path = './figures/additional', height=PlotHeight, width=PlotWidth, dpi=600)
#Drought intensity barplot
ggplot(Drought_all, aes(x=CF, y=as.numeric(Intensity), fill=CF)) + geom_bar(stat="identity", col="black") +
scale_y_continuous() +
labs(x="", y="Intensity (Minimum SPEI values)",
title=paste(SiteID, "- Average Drought Intensity")) +
scale_fill_manual(values = colors3) +
BarPlotTheme
ggsave(paste(SiteID, "Intensity.png"), path = './figures/additional', height=PlotHeight, width=PlotWidth, dpi=600)
#Drought-free interval barplot
ggplot(Drought_all, aes(x=CF, y=as.numeric(Frequency), fill=CF)) + geom_bar(stat="identity", col="black") +
scale_y_continuous() +
labs(x="", y="Years",
title=paste(SiteID, "- Average Drought-Free Interval")) +
scale_fill_manual(values = colors3) +
BarPlotTheme
ggsave(paste(SiteID, "Frequency.png"), path = './figures/additional', height=PlotHeight, width=PlotWidth, dpi=600)
| /scripts/Additional-tables-plots/RSS_MACA_drought_char.R | no_license | Janelle88/CCRP_Climate_Futures_v1.0 | R | false | false | 19,783 | r |
################################ USER INPUTS #################################################
Gridmet <- read.csv("data/park-specific/input/GridMet.csv",header=T)
file <- list.files(path = './data/park-specific/output', pattern = 'Final_Environment.RData', full.names = TRUE)
load(file)
colors3<-c("white",colors2)
if(dir.exists('./figures/additional') == FALSE){
dir.create('./figures/additional')
}
OutDir<-("./figures/additional")
################################ END USER INPUTS #############################################
############################### FORMAT DATAFRAMES ############################################
# Gridmet
Gridmet$Date<-ymd(Gridmet$Date)
Gridmet$Month<-format(Gridmet$Date,format="%m")
Gridmet$Year<-format(Gridmet$Date,format="%Y")
Gridmet$TmeanC<-(((Gridmet$tmax+Gridmet$tmin)/2)-32)*5/9
Gridmet$Pr_mm<-Gridmet$precip*25.4
d<-aggregate(Pr_mm~Month+Year,Gridmet,sum)
d2<-aggregate(TmeanC~Month+Year,Gridmet,mean)
drt<-merge(d,d2,by=c("Month","Year"));rm(d,d2)
drt<-drt[with(drt, order(Year, Month)),]
drt$PET<-thornthwaite(drt$TmeanC,lat = Lat)
# Run SPEI on gridmet
tp<-ts(drt$Pr_mm,frequency=12,start=c(1979,1))
tpet<-ts(drt$PET,frequency=12,start=c(1979,1))
SPEI<-spei(tp - tpet, SPEI_per)
PlotName <- "Gridmet-SPEI"
plot1 <- paste('./figures/additional/', PlotName)
jpeg(paste(plot1, ".jpg", sep = ""), width = 350, height = 350)
plot(x=SPEI,main="Gridmet") #eventually prob want to figure out how to make x-axis date
dev.off()
drt$SPEI<-SPEI$fitted;drt$SPEI[which(is.na(drt$SPEI))]<-0 #records used to normalize data are NAs - convert to 0s
names(drt)[6]<-"SPEI"
drt3<-aggregate(cbind(Pr_mm,SPEI)~Year,drt,mean)
# # MACA This step only needed if historical GCMs don't have RCPs pasted on end
# AH<-ALL_HIST
# ALL_HIST$GCM<-paste(ALL_HIST$GCM,"rcp45",sep=".")
# AH$GCM<-paste(AH$GCM,"rcp85",sep=".")
# ALL_HIST<-rbind(ALL_HIST,AH); rm(AH)
H<-subset(ALL_HIST,GCM %in% WB_GCMs,select=c(Date,GCM,PrecipCustom,TavgCustom))
F<-subset(ALL_FUTURE, GCM %in% WB_GCMs, select=c(Date,GCM,PrecipCustom,TavgCustom))
ALL<-rbind(H,F)
ALL$Month<-format(ALL$Date,format="%m")
ALL$Year<-format(ALL$Date,format="%Y")
ALL$Pr_mm<-ALL$PrecipCustom*25.4
ALL$TmeanC<-(ALL$TavgCustom-32)*5/9
M<-aggregate(Pr_mm~Month+Year+GCM,ALL,sum)
Mon<-aggregate(TmeanC~Month+Year+GCM,ALL,mean)
Mon<-merge(Mon,M,by=c("Month","Year","GCM"));rm(M)
Mon$PET<-thornthwaite(Mon$TmeanC,lat=Lat)
Mon<-merge(Mon,CF_GCM,by="GCM")
Mon$CF<-factor(Mon$CF,levels=unique(Mon$CF))
MON<-aggregate(cbind(Pr_mm,PET)~Month+Year+CF,Mon,mean)
MON<-MON[with(MON, order(CF,Year, Month)),]
CF.split<-split(MON,MON$CF) #Splits df into array by CF
# this step is done because each CF has unique historical record and SPEI normalized to average conditions at beginning of record
for (i in 1:length(CF.split)){
name=names(CF.split)[i]
t<-CF.split[[i]]
tp<-ts(t$Pr_mm,frequency=12,start=c(SPEI_start,1))
tpet<-ts(t$PET,frequency=12,start=c(SPEI_start,1))
SPEI<-spei(tp-tpet,SPEI_per,ref.start=c(SPEI_start,1),ref.end=c(SPEI_end,12))
CF.split[[i]]$SPEI <- SPEI$fitted[1:length(SPEI$fitted)]
# Plot each CF
plot <- paste('./figures/additional/', name)
jpeg(paste(plot,"-SPEI.jpg",sep=""), width = 350, height = 350)
plot(x=SPEI,main=name) #eventually prob want to figure out how to make x-axis date
dev.off()
}
all2<- ldply(CF.split, data.frame) #convert back to df
all2$SPEI[which(is.na(all2$SPEI))]<-0 #records used to normalize data are NAs - convert to 0s
all2$SPEI[which(is.infinite(all2$SPEI))]<- -5 #getting some -Inf values that are large jumps, temp fix
#
# all3<-subset(all2,Month==9) #Because we aggregated drought years as only applying to growing season
# # If you are doing for place where winter drought would be important, use following line
all3<-aggregate(cbind(Pr_mm,SPEI)~Year+CF,all2,mean)
###################################### PLOT ANNUAL TIME-SERIES #################################################
############################################# Plotting ###########################################################
PlotTheme = theme(axis.text=element_text(size=20), #Text size for axis tick mark labels
axis.title.x=element_blank(), #Text size and alignment for x-axis label
axis.title.y=element_text(size=24, vjust=0.5, margin=margin(t=20, r=20, b=20, l=20)), #Text size and alignment for y-axis label
plot.title=element_text(size=26,face="bold",hjust=0.5, margin=margin(t=20, r=20, b=20, l=20)), #Text size and alignment for plot title
legend.title=element_text(size=24), #Text size of legend category labels
legend.text=element_text(size=22), #Text size of legend title
legend.position = "bottom",
panel.background = element_blank(), #Background white
panel.grid.major = element_line("light grey",0.3)) #add grid back
BarPlotTheme = theme(axis.text.x=element_text(size=24), #Text size for axis tick mark labels
axis.text.y=element_text(size=20),
axis.title.x=element_blank(), #Text size and alignment for x-axis label
axis.title.y=element_text(size=24, vjust=0.5, margin=margin(t=20, r=20, b=20, l=20)), #Text size and alignment for y-axis label
plot.title=element_text(size=26,face="bold",hjust=0.5, margin=margin(t=20, r=20, b=20, l=20)), #Text size and alignment for plot title
legend.position = "none")
#Height and width
PlotWidth = 15
PlotHeight = 9
# Gridmet
drt3$col[drt3$SPEI>=0]<-"wet"
drt3$col[drt3$SPEI<0]<-"dry"
drt3$col<-factor(drt3$col, levels=c("wet","dry"))
ggplot(data = drt3, aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = "SPEI values for Historical Period (gridMET)",
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave("Recent Drought.png", path = './figures/additional', width = 18, height = 9)
# MACA prep dataframe
all3$col[all3$SPEI>=0]<-"wet"
all3$col[all3$SPEI<0]<-"dry"
all3$col<-factor(all3$col, levels=c("wet","dry"))
all3$Year<-as.numeric(all3$Year)
# CF
CF1<-subset(all3, CF %in% CFs[1] )
grid.append<-drt3; grid.append$CF<-CFs[1]
grid.append<-subset(grid.append, select=c(Year,CF,Pr_mm:col))
grid.append<-rbind(grid.append, subset(CF1,Year>=2020 & Year < 2070))
ggplot(data = subset(CF1,Year>=2025&Year<2056), aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_rect(xmin=2025, xmax=2055, ymin=-Inf, ymax=Inf, alpha=0.1, fill="darkgray", col="darkgray") +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = paste("SPEI values for", CFs[1], "climate future", sep = " " ),
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave(paste(CFs[1], "Drought.png",sep=" "), path = './figures/additional', width = 18, height = 9)
ggplot(data = grid.append, aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_rect(xmin=2025, xmax=2055, ymin=-Inf, ymax=Inf, alpha=0.1, fill="darkgray", col="darkgray") +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = paste("SPEI values for", CFs[1], "(Gridmet + MACA)", sep = " " ),
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave(paste(CFs[1], "Drought+Gridmet.png",sep=" "), path = './figures/additional', width = 18, height = 9)
# CF 2
CF2<-subset(all3, CF %in% CFs[2] )
grid.append<-drt3; grid.append$CF<-CFs[2]
grid.append<-subset(grid.append, select=c(Year,CF,Pr_mm:col))
grid.append<-rbind(grid.append, subset(CF2,Year>=2020 & Year < 2070))
ggplot(data = subset(CF2,Year>=2025&Year<2056), aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_rect(xmin=2025, xmax=2055, ymin=-Inf, ymax=Inf, alpha=0.1, fill="darkgray", col="darkgray") +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = paste("SPEI values for", CFs[2], "climate future", sep = " " ),
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave(paste(CFs[2], "Drought.png",sep=" "), path = './figures/additional', width = 18, height = 9)
ggplot(data = grid.append, aes(x=as.numeric(as.character(Year)), y=SPEI,fill = col)) +
geom_rect(xmin=2025, xmax=2055, ymin=-Inf, ymax=Inf, alpha=0.1, fill="darkgray", col="darkgray") +
geom_bar(stat="identity",aes(fill=col),col="black") +
geom_hline(yintercept=-.5,linetype=2,colour="black",size=1) +
scale_fill_manual(name="",values =c("blue","red")) +
labs(title = paste("SPEI values for", CFs[2], "(Gridmet + MACA)", sep = " " ),
x = "Date", y = "SPEI") +
guides(color=guide_legend(override.aes = list(size=7))) + PlotTheme
ggsave(paste(CFs[2], "Drought+Gridmet.png",sep=" "), path = './figures/additional', width = 18, height = 9)
# Split into periods
Historical2<-subset(all3, Year >= 1950 & Year <2000)
min(Historical2$SPEI)
Future2<-subset(all3, Year >= 2025 & Year <2056)
min(Future2$SPEI)
# Calculate drought characteristics
Historical2$Drought=0
Historical2$Drought[which(Historical2$SPEI < truncation)] <- 1
# Drought Duration calculation
# 1 Create var for beginnign drought and var for end drought, then count months between
head(Historical2)
# Create count of years within CF
length(Historical2$CF)/length(unique(Historical2$CF))
Historical2$count<-rep(seq(1, length(Historical2$CF)/length(unique(Historical2$CF)) # 50=# years in historical period
, 1),length(unique(Historical2$CF))) # 4=repeat # of CFs
Historical2$length<-0
Historical2$length <- Historical2$Drought * unlist(lapply(rle(Historical2$Drought)$lengths, seq_len))
mean(Historical2$length[Historical2$length>0])
# To get duration, now just remove those that are not droughts and do calculations on length
# Give each drought period an ID
D<-which(Historical2$length==1)
HistoricalDrought<-data.frame()
HistoricalDrought<-setNames(data.frame(matrix(ncol=10,nrow=length(D))),c("DID","Start","End","Year","per","CF","duration","severity","peak","freq"))
HistoricalDrought$Start = Sys.time(); HistoricalDrought$End = Sys.time()
HistoricalDrought$per<-as.factor("H")
# Calculate variables for each drought period
for (i in 1:length(D)){
HistoricalDrought$DID[i]<-i
HistoricalDrought$Start[i]<-strptime(Historical2$Date[D[i]],format="%Y-%m-%d",tz="MST")
HistoricalDrought$Year[i]<-Historical2$Year[D[i]]
}
ND<- which((Historical2$length == 0) * unlist(lapply(rle(Historical2$length)$lengths, seq_len)) == 1)
if(ND[1]==1) ND<-ND[2:length(ND)]
if(Historical2$Drought[length(Historical2$Drought)]==1) ND[length(ND)+1]<-length(Historical2$length)
###### !!!!!!!!!!!
# If last row in drought df is a drought period - use next line of code. Otherwies proceed.
# ND[length(ND)+1]<-length(Historical2$length) #had to add this step because last drought went until end of df so no end in ND
#Duration # months SPEI < truncation; Severity # Sum(SPEI) when SPEI < truncation; Peak # min(SPEI) when SPEI < truncation
for (i in 1:length(ND)){
HistoricalDrought$CF[i]<-as.character(Historical2$CF[D[i]])
HistoricalDrought$End[i]<-strptime(Historical2$Date[ND[i]],format="%Y-%m-%d",tz="MST")
HistoricalDrought$duration[i]<-Historical2$length[ND[i]-1]
HistoricalDrought$severity[i]<-sum(Historical2$SPEI[D[i]:(ND[i]-1)])
HistoricalDrought$peak[i]<-min(Historical2$SPEI[D[i]:(ND[i]-1)])
}
HistoricalDrought$CF<-factor(HistoricalDrought$CF, levels=levels(Historical2$CF))
## Freq
CF.split<-split(Historical2,Historical2$CF)
for (i in 1:length(CF.split)){
name=as.character(unique(CF.split[[i]]$CF))
d<-which(CF.split[[i]]$length==1)
nd<-which((CF.split[[i]]$length == 0) * unlist(lapply(rle(CF.split[[i]]$length)$lengths, seq_len)) == 1)
if(length(nd)>length(d)) {nd=nd[2:length(nd)]}
for (j in 1:length(d)){
HistoricalDrought$freq[which(HistoricalDrought$CF==name & HistoricalDrought$Year==CF.split[[i]]$Year[d[j]])] <-
CF.split[[i]]$count[d[j+1]]-CF.split[[i]]$count[nd[j]]
}
}
####### Future
# Calculate drought characteristics
Future2$Drought=0
Future2$Drought[which(Future2$SPEI < truncation)] <- 1
# Drought Duration calculation
# 1 Create var for beginnign drought and var for end drought, then count months between
head(Future2)
# Create count of months within CF
length(Future2$CF)/length(unique(Future2$CF))
Future2$count<-rep(seq(1, length(Future2$CF)/length(unique(Future2$CF)),
1),length(unique(Future2$CF))) # repeat # of CFs
Future2$length<-0
Future2$length <- Future2$Drought * unlist(lapply(rle(Future2$Drought)$lengths, seq_len))
mean(Future2$length[Future2$length>0])
# To get duration, now just remove those that are not droughts and do calculations on length
# Give each drought period an ID
D<-which(Future2$length==1)
FutureDrought<-data.frame()
FutureDrought<-setNames(data.frame(matrix(ncol=10,nrow=length(D))),c("DID","Start","End","Year","per","CF","duration","severity","peak","freq"))
FutureDrought$Start = Sys.time(); FutureDrought$End = Sys.time()
FutureDrought$per<-as.factor("F")
# Calculate variables for each drought period
for (i in 1:length(D)){
FutureDrought$DID[i]<-i
FutureDrought$Start[i]<-strptime(Future2$Date[D[i]],format="%Y-%m-%d",tz="MST")
FutureDrought$Year[i]<-Future2$Year[D[i]]
}
ND<- which((Future2$length == 0) * unlist(lapply(rle(Future2$length)$lengths, seq_len)) == 1)
if(ND[1]==1) ND<-ND[2:length(ND)]
if(Future2$Drought[length(Future2$Drought)]==1) ND[length(ND)+1]<-length(Future2$length)
#Duration # months SPEI < truncation; Severity # Sum(SPEI) when SPEI < truncation; Peak # min(SPEI) when SPEI < truncation
for (i in 1:length(ND)){
FutureDrought$CF[i]<-as.character(Future2$CF[D[i]])
FutureDrought$End[i]<-strptime(Future2$Date[ND[i]],format="%Y-%m-%d",tz="MST")
FutureDrought$duration[i]<-Future2$length[ND[i]-1]
FutureDrought$severity[i]<-sum(Future2$SPEI[D[i]:(ND[i]-1)])
FutureDrought$peak[i]<-min(Future2$SPEI[D[i]:(ND[i]-1)])
}
FutureDrought$CF<-as.factor(FutureDrought$CF)
## Freq
CF.split<-split(Future2,Future2$CF)
for (i in 1:length(CF.split)){
name=as.character(unique(CF.split[[i]]$CF))
d<-which(CF.split[[i]]$length==1)
nd<-which((CF.split[[i]]$length == 0) * unlist(lapply(rle(CF.split[[i]]$length)$lengths, seq_len)) == 1)
if(length(nd)>length(d)) {nd=nd[2:length(nd)]}
for (j in 1:length(d)){
FutureDrought$freq[which(FutureDrought$CF==name & FutureDrought$Year==CF.split[[i]]$Year[d[j]])] <-
CF.split[[i]]$count[d[j+1]]-CF.split[[i]]$count[nd[j]]
}
}
head(HistoricalDrought)
head(FutureDrought)
Drought<-rbind(HistoricalDrought,FutureDrought)
write.csv(Drought,"./data/park-specific/output/Drt.all.csv",row.names=FALSE) # csv with all drought events
Hist_char<-setNames(data.frame(matrix(ncol=6,nrow=length(levels(HistoricalDrought$CF)))),c("CF","per","Duration","Severity","Intensity","Frequency"))
Hist_char$CF<-levels(HistoricalDrought$CF)
Hist_char$per<-"H"
for (i in 1:length(Hist_char$CF)){
name<-Hist_char$CF[i]
Hist_char$Frequency[i]<-mean(HistoricalDrought$freq[which(HistoricalDrought$CF == name)],na.rm=TRUE)
Hist_char$Duration[i]<-mean(HistoricalDrought$duration[which(HistoricalDrought$CF == name)])
Hist_char$Severity[i]<-mean(HistoricalDrought$severity[which(HistoricalDrought$CF == name)])
Hist_char$Intensity[i]<-mean(HistoricalDrought$peak[which(HistoricalDrought$CF == name)])
}
Drought_char<-setNames(data.frame(matrix(ncol=6,nrow=length(levels(FutureDrought$CF)))),c("CF","per","Duration","Severity","Intensity","Frequency"))
Drought_char$CF<-levels(FutureDrought$CF)
Drought_char$per<-"F"
for (i in 1:length(Drought_char$CF)){
name<-Drought_char$CF[i]
Drought_char$Frequency[i]<-mean(FutureDrought$freq[which(FutureDrought$CF == name)],na.rm=TRUE)
Drought_char$Duration[i]<-mean(FutureDrought$duration[which(FutureDrought$CF == name)])
Drought_char$Severity[i]<-mean(FutureDrought$severity[which(FutureDrought$CF == name)])
Drought_char$Intensity[i]<-mean(FutureDrought$peak[which(FutureDrought$CF == name)])
}
Drought_char<-rbind(Hist_char,Drought_char)
# csv for averages for each CF for hist and future periods
write.csv(Drought_char,"./data/park-specific/output/Drought_char.csv",row.names=FALSE)
########################################### BAR PLOTS ###############################################
#Drought duration barplot
Drought_char_H = subset(Drought_char, per == "H")
Drought_char_F = subset(Drought_char, per == "F")
Drought_char_H$CF<-"Historical"
DroughtH = aggregate(cbind(Duration,Severity,Intensity,Frequency)~CF+per,Drought_char_H,mean, na.rm=TRUE)
Drought_all = rbind(DroughtH, Drought_char_F)
Drought_all$CF = factor(Drought_all$CF, levels = c("Historical",CFs))
#Change NaN's to 0's
Drought_char_H[is.na(Drought_char_H) == TRUE] = 0
Drought_delta = data.frame(CF = Drought_char_H$CF)
Drought_delta$Duration = Drought_char_F$Duration - Drought_char_H$Duration
Drought_delta$Severity = Drought_char_F$Severity - Drought_char_H$Severity
Drought_delta$Intensity = Drought_char_F$Intensity - Drought_char_H$Intensity
Drought_delta$Frequency = Drought_char_F$Frequency - Drought_char_H$Frequency
Drought_delta$CF = factor(Drought_delta$CF, levels = c(CFs))
#Drought duraton barplot
ggplot(Drought_all, aes(x=CF, y=as.numeric(Duration), fill=CF)) + geom_bar(stat="identity", col="black") +
scale_y_continuous() +
labs(x="", y="Years",
title=paste(SiteID, "- Average Drought Duration")) +
scale_fill_manual(values = colors3) +
BarPlotTheme
ggsave(paste(SiteID, "Duration.png"), path = './figures/additional', height=PlotHeight, width=PlotWidth, dpi=600)
#Drought severity barplot
ggplot(Drought_all, aes(x=CF, y=as.numeric(Severity), fill=CF)) + geom_bar(stat="identity", col="black") +
scale_y_continuous() +
labs(x="", y="Severity (SPEI * duration)",
title=paste(SiteID, "- Average Drought Severity")) +
scale_fill_manual(values = colors3) +
BarPlotTheme
ggsave(paste(SiteID, "Severity.png"), path = './figures/additional', height=PlotHeight, width=PlotWidth, dpi=600)
#Drought intensity barplot
ggplot(Drought_all, aes(x=CF, y=as.numeric(Intensity), fill=CF)) + geom_bar(stat="identity", col="black") +
scale_y_continuous() +
labs(x="", y="Intensity (Minimum SPEI values)",
title=paste(SiteID, "- Average Drought Intensity")) +
scale_fill_manual(values = colors3) +
BarPlotTheme
ggsave(paste(SiteID, "Intensity.png"), path = './figures/additional', height=PlotHeight, width=PlotWidth, dpi=600)
#Drought-free interval barplot
ggplot(Drought_all, aes(x=CF, y=as.numeric(Frequency), fill=CF)) + geom_bar(stat="identity", col="black") +
scale_y_continuous() +
labs(x="", y="Years",
title=paste(SiteID, "- Average Drought-Free Interval")) +
scale_fill_manual(values = colors3) +
BarPlotTheme
ggsave(paste(SiteID, "Frequency.png"), path = './figures/additional', height=PlotHeight, width=PlotWidth, dpi=600)
|
library(haven)
library(dplyr)
library(ggplot2)
library(forcats)
library(rstanarm)
library(tidyr)
library(stringr)
library(lme4)
"%ni%" <- Negate("%in%")
######load datasets
#IAT
df <- rbind(read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2013.sav'),
read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2012.sav'),
read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2010.sav'))
df2 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2011.sav')
df3 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2009.sav')
df4 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2008.sav')
df5 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2007.sav')
df6 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2006.sav')
df7 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2005.sav')
df8 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2004.sav')
df9 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2002-2003.sav')
df10 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2014.sav')
#County linking information
df_haley <- read.csv('/Users/travis/Documents/gits/Data/Haley_countylinks/_Master Spreadsheet.csv', stringsAsFactors = F)
df_haley[1791,6] <- 'Doña Ana County, New Mexico' #because of the "ñ"
df_states <- data.frame(state=c(state.name, 'District of Columbia'),
state_abb=c(state.abb, 'DC'))
#Covariates
df_acs <- read.csv('/Users/travis/Documents/gits/Data/ACS/county_age/ACS_14_5YR_DP05_with_ann.csv', skip=1)
df_acs_eth <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_ethnicity/ACS_14_5YR_B02001_with_ann.csv',
skip = 1, stringsAsFactors = F)
df_acs_ed <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_education/ACS_14_5YR_S1501_with_ann.csv',
skip = 1, stringsAsFactors = F)
df_acs_pov_emp <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_poverty_emp/ACS_14_5YR_DP03_with_ann.csv',
skip = 1, stringsAsFactors = F)
df_acs_hous <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_housing/DEC_10_SF1_GCTPH1.US04PR_with_ann.csv',
skip=1, stringsAsFactors = F)
df_acs_mob <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_mobility/ACS_14_5YR_S0701_with_ann.csv',
skip=1, stringsAsFactors = F)
df_fbi <- read.csv('/Users/travis/Documents/gits/Data/FBI/state_crimes/CrimeTrendsInOneVar.csv')
df_seg <- read.csv('/Users/travis/Documents/gits/Data/ACS/county_segregation/ACS_14_5YR_B02001_with_ann.csv',
skip=1, stringsAsFactors = F)
#educators <- c('25-2000', '25-3000', '25-4000', '25-9000')
# per stacey's contact in education, limit to the following:
educators <- c('25-2000', '25-3000')
# '25-1000' - postsecondary teachers
## Get IAT data, limit observations to just those with county information, age,
## and who identify as white
df %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
filter(raceomb==6) -> subdat
df2 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df3 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df4 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df5 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df6 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df7 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
mutate(tblack_0to10=tblacks_0to10,
twhite_0to10=twhites_0to10) %>%
mutate(raceomb = ethnic) %>% #race was called something else in this one
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df8 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
mutate(tblack_0to10=tblacks_0to10,
twhite_0to10=twhites_0to10) %>%
mutate(raceomb = ethnic) %>% #race was called something else in this one
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df9 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
mutate(raceomb = ethnic) %>% #race was mislabeled
mutate(tblack_0to10=tblacks_0to10, #change to be consistent with other files
twhite_0to10=twhites_0to10) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df10 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
subdat %>%
mutate(explicit_bias=tblack_0to10) %>%
mutate(explicit_bias_diff = twhite_0to10 - tblack_0to10) %>%
mutate(age_bin = cut(age, breaks=c(14, 24, 34, 54, 75, 120))) %>%
filter(!is.na(age_bin)) %>%
filter(STATE %ni%
c('AA', 'AE', 'AP', 'AS', 'FM', 'GU', 'MH', 'MP', 'PR', 'VI')) %>% #exclude territories, etc
mutate(county_id = paste(STATE, CountyNo, sep='-')) -> individual_data
### Combine all covariates into one dataframe ###
#################################################
df_acs <- df_acs[,c(3, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64)] #age population breakdown
df_acs$Geography <- as.character(df_acs$Geography)
df_acs$Geography[1803] <- 'Doña Ana County, New Mexico'
#county age distributions
df_acs %>%
gather(age, num, -Geography) %>%
mutate(name_from_census=Geography) %>% #make colun name match haley's
select(-Geography) %>%
left_join(df_haley) %>%
mutate(age = substr(age, 25, 26)) %>% #pick out the lower bound of the category defined by the ACS
mutate(age_bin = cut(as.numeric(age),
breaks=c(14, 24, 34, 54, 75, 120))) %>% #convert to numeric that matches the MRP scheme
group_by(county_fips, age_bin) %>%
summarise(num = sum(num, na.rm=T)) %>% #total number of people for each age group & county
left_join(df_haley[,c('state_name', 'state_fips',
'state_code', 'county_fips')]) -> df_acs_counts
#white and african american state level population
covs <- df_acs_eth[,c(3, 4, 6, 8)]
names(covs) <- c('state_name', 'total_pop', 'white_pop', 'black_pop')
covs %>%
mutate(white_prop = white_pop/total_pop,
black_prop = black_pop/total_pop) %>%
mutate(b.w.ratio = black_prop/white_prop) -> covs
#percentage w/ba or higher @ state level
covs_ed <- df_acs_ed[,c(3, 28)]
names(covs_ed) <- c('state_name', 'col_grads')
#state level unemployment, income & poverty level
covs_income <- df_acs_pov_emp[,c(3, 21, 248, 478)]
names(covs_income) <- c('state_name', 'unemp', 'income', 'poverty')
#state level housing density
covs_hous <- df_acs_hous[,c(5, 14)]
names(covs_hous) <- c('state_fips', 'housing_density')
#state-level mobility
covs_mob <- df_acs_mob[,c(3, 10, 12)]
names(covs_mob) <- c('state_name', 'moved_states', 'moved_abroad')
covs_mob$mobility <- covs_mob$moved_states + covs_mob$moved_abroad
#state-level crime rate
df_fbi %>%
gather(state_name, crime, -Year) %>%
mutate(state_name = stringr::str_replace_all(state_name, '\\.+', ' ')) %>%
left_join(covs[,c('state_name', 'total_pop')]) %>%
mutate(crime_rate = crime/total_pop) %>%
group_by(state_name) %>%
summarise(crime_rate = mean(crime_rate, na.rm=T)) -> covs_crime #averaged across 5 years
#state-level segregation index
df_seg <- df_seg[,c(1:4, 6,8)]
names(df_seg) <- c('ID', 'ID2', 'Geo', 'Total', 'White', 'Black')
df_seg %>%
mutate(FIPS = stringr::str_sub(ID, -11, -1)) %>%
mutate(state_fips = stringr::str_sub(FIPS, 1, 2),
county_fips = stringr::str_sub(FIPS, 3, 5),
census_fips = stringr::str_sub(FIPS, 6, 12)) %>%
select(-ID, -ID2) %>%
group_by(state_fips) %>%
mutate(state_total = sum(Total, na.rm=T),
state_white = sum(White, na.rm=T),
state_black = sum(Black, na.rm=T)) %>%
mutate(black_prop = Black/state_black,
white_prop = White/state_white) %>%
mutate(bw_diff = abs(black_prop-white_prop)) %>%
group_by(state_fips) %>%
mutate(dissim = sum(bw_diff)*.5) %>%
separate(Geo, c('tract', 'county', 'state_name'), sep=',') %>%
ungroup() %>%
select(state_name, dissim) %>%
mutate(state_name = stringr::str_trim(state_name)) %>%
distinct() %>%
arrange(desc(dissim)) -> covs_seg
#put 'em all together
df_acs_counts %>%
left_join(covs) %>%
left_join(covs_ed) %>%
left_join(covs_income) %>%
left_join(covs_hous) %>%
left_join(covs_mob) %>%
left_join(covs_crime) %>%
left_join(covs_seg) %>%
mutate(county_id = paste(
state_code, stringr::str_sub(county_fips, -3, -1), sep='-') )-> df_acs_counts
#df_acs_counts has state-level covariates & distribution of age within county
df_acs_counts %>%
ungroup() %>%
select(state_name:housing_density, mobility:dissim) %>%
distinct() %>%
mutate_at(vars(total_pop:dissim), scale) -> state_covs
#rename to join with state covariates
names(individual_data)[2] <- 'state_code'
individual_data %>%
left_join(state_covs) -> individual_data_base
# fit MRP models
# tutorial here: http://www.princeton.edu/~jkastell/mrp_primer.html
#no state-level predictors - these have trouble with convergence
#individual.model.bias <- lmer(D_biep.White_Good_all ~ (1|age_bin) + (1|county_id) +
# (1|state_code), data=individual_data_base)
#individual.model.explicit <- lmer(explicit_bias ~ (1|age_bin) + (1|county_id) +
# (1|state_code), data=individual_data_base)
# individual_data_diffmod <- individual_data_base[!is.na(individual_data_base$explicit_bias_diff),]
# individual.model.explicit_diff <- lmer(explicit_bias_diff ~ (1|age_bin) +
# (1|county_id) + (1|state_code),
# data=individual_data_base)
#state-level predictors are the same predictors as in the final model:
individual.model.bias <- lmer(D_biep.White_Good_all ~ total_pop + white_prop +
black_prop + b.w.ratio + col_grads + unemp +
income + poverty + housing_density + mobility +
crime_rate + dissim + (1|age_bin) +
(1|county_id) + (1|state_code),
data=individual_data_base)
individual.model.explicit <- lmer(explicit_bias ~ total_pop + white_prop +
black_prop + b.w.ratio + col_grads +
unemp + income + poverty + housing_density +
mobility + crime_rate + dissim +
(1|age_bin) + (1|county_id) +
(1|state_code), data=individual_data_base)
individual.model.explicit_diff <- lmer(explicit_bias_diff ~ total_pop +
white_prop + black_prop + b.w.ratio +
col_grads + unemp + income + poverty +
housing_density + mobility +
crime_rate + dissim + (1|age_bin) +
(1|county_id) + (1|state_code),
data=individual_data_base)
#required to get rid of convergence warnings - run the model for more iterations
ss <- getME(individual.model.explicit_diff, c('theta', 'fixef'))
individual.model.explicit_diff <- update(individual.model.explicit_diff,
start=ss,
control=lmerControl(optCtrl=list(maxfun=2e4)))
#create dataframe to make predictions over
df_acs_counts %>%
select(county_fips:state_code) %>%
ungroup() %>%
left_join(state_covs) %>%
mutate(county_id = paste(state_code,
str_sub(county_fips, -3, -1), sep='-')) -> scaled_counts
#predict implicit
scaled_counts$yhat_bias <- predict(individual.model.bias,
newdata=scaled_counts, allow.new.levels=T)
#predict explicit
scaled_counts$yhat_explicit <- predict(individual.model.explicit,
newdat=scaled_counts, allow.new.levels=T)
#reverse score explicit bias
scaled_counts$yhat_explicit <- scaled_counts$yhat_explicit*-1
#predict explicit diff score
scaled_counts$yhat_explicit_diff <- predict(individual.model.explicit_diff,
newdat=scaled_counts,
allow.new.levels=T)
scaled_counts %>%
ungroup() %>%
group_by(county_id) %>%
summarise(weighted_bias = weighted.mean(yhat_bias, num),
weighted_explicit = weighted.mean(yhat_explicit, num),
weighted_explicit_diff = weighted.mean(yhat_explicit_diff, num)) -> mrp_ests
#compute naiive means as well
individual_data %>%
group_by(county_id) %>%
summarise(bias = mean(D_biep.White_Good_all, na.rm=T),
explicit = mean(explicit_bias, na.rm=T),
explicit_diff = mean(explicit_bias_diff, na.rm=T),
n_bias_obs = sum(!is.na(D_biep.White_Good_all)),
n_explicit_obs = sum(!is.na(explicit_bias))) %>%
left_join(mrp_ests) -> county_means
write.csv(county_means, '/Users/travis/Documents/gits/educational_disparities/output/county_means.csv')
###### write teacher data
#gather all dataframes who have occupation info
df %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) -> subdat
df2 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df3 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df4 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df5 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df6 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df10 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
#compute difference score, reverse explicit score, filter out territories & non-educators
subdat %>%
mutate(explicit_diff=twhite_0to10 - tblack_0to10,
explicit = tblack_0to10*-1) %>%
filter(STATE %ni%
c('AA', 'AE', 'AP', 'AS', 'FM', 'GU', 'MH', 'MP', 'PR', 'VI')) %>%
filter(occupation %in% educators) %>%
mutate(county_id = paste(STATE, CountyNo, sep='-')) -> individual_data
names(individual_data)[2] <- 'state_abb'
individual_data %>%
filter(raceomb==6) %>% #whites only
group_by(county_id) %>%
mutate(teacher_bias = mean(D_biep.White_Good_all, na.rm=T),
teacher_explicit = mean(explicit, na.rm=T),
teacher_explicit_diff = mean(explicit_diff, na.rm=T),
num_obs = n()) %>%
filter(num_obs>49) %>%
select(county_id, state_abb, teacher_bias,
teacher_explicit, teacher_explicit_diff, num_obs) %>%
distinct() -> county_teacher_estimates
write.csv(county_teacher_estimates, row.names = F,
file = '/Users/travis/Documents/gits/educational_disparities/output/county_teacher_means.csv')
| /iat_analysis.R | no_license | riddlet/educational_disparities | R | false | false | 16,865 | r | library(haven)
library(dplyr)
library(ggplot2)
library(forcats)
library(rstanarm)
library(tidyr)
library(stringr)
library(lme4)
"%ni%" <- Negate("%in%")
######load datasets
#IAT
df <- rbind(read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2013.sav'),
read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2012.sav'),
read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2010.sav'))
df2 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2011.sav')
df3 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2009.sav')
df4 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2008.sav')
df5 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2007.sav')
df6 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2006.sav')
df7 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2005.sav')
df8 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2004.sav')
df9 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2002-2003.sav')
df10 <- read_sav('/Users/travis/Documents/gits/Data/iat_race/Race IAT.public.2014.sav')
#County linking information
df_haley <- read.csv('/Users/travis/Documents/gits/Data/Haley_countylinks/_Master Spreadsheet.csv', stringsAsFactors = F)
df_haley[1791,6] <- 'Doña Ana County, New Mexico' #because of the "ñ"
df_states <- data.frame(state=c(state.name, 'District of Columbia'),
state_abb=c(state.abb, 'DC'))
#Covariates
df_acs <- read.csv('/Users/travis/Documents/gits/Data/ACS/county_age/ACS_14_5YR_DP05_with_ann.csv', skip=1)
df_acs_eth <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_ethnicity/ACS_14_5YR_B02001_with_ann.csv',
skip = 1, stringsAsFactors = F)
df_acs_ed <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_education/ACS_14_5YR_S1501_with_ann.csv',
skip = 1, stringsAsFactors = F)
df_acs_pov_emp <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_poverty_emp/ACS_14_5YR_DP03_with_ann.csv',
skip = 1, stringsAsFactors = F)
df_acs_hous <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_housing/DEC_10_SF1_GCTPH1.US04PR_with_ann.csv',
skip=1, stringsAsFactors = F)
df_acs_mob <- read.csv('/Users/travis/Documents/gits/Data/ACS/state_mobility/ACS_14_5YR_S0701_with_ann.csv',
skip=1, stringsAsFactors = F)
df_fbi <- read.csv('/Users/travis/Documents/gits/Data/FBI/state_crimes/CrimeTrendsInOneVar.csv')
df_seg <- read.csv('/Users/travis/Documents/gits/Data/ACS/county_segregation/ACS_14_5YR_B02001_with_ann.csv',
skip=1, stringsAsFactors = F)
#educators <- c('25-2000', '25-3000', '25-4000', '25-9000')
# per stacey's contact in education, limit to the following:
educators <- c('25-2000', '25-3000')
# '25-1000' - postsecondary teachers
## Get IAT data, limit observations to just those with county information, age,
## and who identify as white
df %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
filter(raceomb==6) -> subdat
df2 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df3 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df4 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df5 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df6 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df7 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
mutate(tblack_0to10=tblacks_0to10,
twhite_0to10=twhites_0to10) %>%
mutate(raceomb = ethnic) %>% #race was called something else in this one
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df8 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
mutate(tblack_0to10=tblacks_0to10,
twhite_0to10=twhites_0to10) %>%
mutate(raceomb = ethnic) %>% #race was called something else in this one
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df9 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
mutate(raceomb = ethnic) %>% #race was mislabeled
mutate(tblack_0to10=tblacks_0to10, #change to be consistent with other files
twhite_0to10=twhites_0to10) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
df10 %>%
filter(CountyNo!='' &
!is.na(age)) %>%
select(CountyNo, STATE, D_biep.White_Good_all,
tblack_0to10, twhite_0to10, raceomb, age) %>%
rbind(subdat) %>%
filter(raceomb==6) -> subdat
subdat %>%
mutate(explicit_bias=tblack_0to10) %>%
mutate(explicit_bias_diff = twhite_0to10 - tblack_0to10) %>%
mutate(age_bin = cut(age, breaks=c(14, 24, 34, 54, 75, 120))) %>%
filter(!is.na(age_bin)) %>%
filter(STATE %ni%
c('AA', 'AE', 'AP', 'AS', 'FM', 'GU', 'MH', 'MP', 'PR', 'VI')) %>% #exclude territories, etc
mutate(county_id = paste(STATE, CountyNo, sep='-')) -> individual_data
### Combine all covariates into one dataframe ###
#################################################
df_acs <- df_acs[,c(3, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64)] #age population breakdown
df_acs$Geography <- as.character(df_acs$Geography)
df_acs$Geography[1803] <- 'Doña Ana County, New Mexico'
#county age distributions
df_acs %>%
gather(age, num, -Geography) %>%
mutate(name_from_census=Geography) %>% #make colun name match haley's
select(-Geography) %>%
left_join(df_haley) %>%
mutate(age = substr(age, 25, 26)) %>% #pick out the lower bound of the category defined by the ACS
mutate(age_bin = cut(as.numeric(age),
breaks=c(14, 24, 34, 54, 75, 120))) %>% #convert to numeric that matches the MRP scheme
group_by(county_fips, age_bin) %>%
summarise(num = sum(num, na.rm=T)) %>% #total number of people for each age group & county
left_join(df_haley[,c('state_name', 'state_fips',
'state_code', 'county_fips')]) -> df_acs_counts
#white and african american state level population
covs <- df_acs_eth[,c(3, 4, 6, 8)]
names(covs) <- c('state_name', 'total_pop', 'white_pop', 'black_pop')
covs %>%
mutate(white_prop = white_pop/total_pop,
black_prop = black_pop/total_pop) %>%
mutate(b.w.ratio = black_prop/white_prop) -> covs
#percentage w/ba or higher @ state level
covs_ed <- df_acs_ed[,c(3, 28)]
names(covs_ed) <- c('state_name', 'col_grads')
#state level unemployment, income & poverty level
covs_income <- df_acs_pov_emp[,c(3, 21, 248, 478)]
names(covs_income) <- c('state_name', 'unemp', 'income', 'poverty')
#state level housing density
covs_hous <- df_acs_hous[,c(5, 14)]
names(covs_hous) <- c('state_fips', 'housing_density')
#state-level mobility
covs_mob <- df_acs_mob[,c(3, 10, 12)]
names(covs_mob) <- c('state_name', 'moved_states', 'moved_abroad')
covs_mob$mobility <- covs_mob$moved_states + covs_mob$moved_abroad
#state-level crime rate
df_fbi %>%
gather(state_name, crime, -Year) %>%
mutate(state_name = stringr::str_replace_all(state_name, '\\.+', ' ')) %>%
left_join(covs[,c('state_name', 'total_pop')]) %>%
mutate(crime_rate = crime/total_pop) %>%
group_by(state_name) %>%
summarise(crime_rate = mean(crime_rate, na.rm=T)) -> covs_crime #averaged across 5 years
#state-level segregation index
df_seg <- df_seg[,c(1:4, 6,8)]
names(df_seg) <- c('ID', 'ID2', 'Geo', 'Total', 'White', 'Black')
df_seg %>%
mutate(FIPS = stringr::str_sub(ID, -11, -1)) %>%
mutate(state_fips = stringr::str_sub(FIPS, 1, 2),
county_fips = stringr::str_sub(FIPS, 3, 5),
census_fips = stringr::str_sub(FIPS, 6, 12)) %>%
select(-ID, -ID2) %>%
group_by(state_fips) %>%
mutate(state_total = sum(Total, na.rm=T),
state_white = sum(White, na.rm=T),
state_black = sum(Black, na.rm=T)) %>%
mutate(black_prop = Black/state_black,
white_prop = White/state_white) %>%
mutate(bw_diff = abs(black_prop-white_prop)) %>%
group_by(state_fips) %>%
mutate(dissim = sum(bw_diff)*.5) %>%
separate(Geo, c('tract', 'county', 'state_name'), sep=',') %>%
ungroup() %>%
select(state_name, dissim) %>%
mutate(state_name = stringr::str_trim(state_name)) %>%
distinct() %>%
arrange(desc(dissim)) -> covs_seg
#put 'em all together
df_acs_counts %>%
left_join(covs) %>%
left_join(covs_ed) %>%
left_join(covs_income) %>%
left_join(covs_hous) %>%
left_join(covs_mob) %>%
left_join(covs_crime) %>%
left_join(covs_seg) %>%
mutate(county_id = paste(
state_code, stringr::str_sub(county_fips, -3, -1), sep='-') )-> df_acs_counts
#df_acs_counts has state-level covariates & distribution of age within county
df_acs_counts %>%
ungroup() %>%
select(state_name:housing_density, mobility:dissim) %>%
distinct() %>%
mutate_at(vars(total_pop:dissim), scale) -> state_covs
#rename to join with state covariates
names(individual_data)[2] <- 'state_code'
individual_data %>%
left_join(state_covs) -> individual_data_base
# fit MRP models
# tutorial here: http://www.princeton.edu/~jkastell/mrp_primer.html
#no state-level predictors - these have trouble with convergence
#individual.model.bias <- lmer(D_biep.White_Good_all ~ (1|age_bin) + (1|county_id) +
# (1|state_code), data=individual_data_base)
#individual.model.explicit <- lmer(explicit_bias ~ (1|age_bin) + (1|county_id) +
# (1|state_code), data=individual_data_base)
# individual_data_diffmod <- individual_data_base[!is.na(individual_data_base$explicit_bias_diff),]
# individual.model.explicit_diff <- lmer(explicit_bias_diff ~ (1|age_bin) +
# (1|county_id) + (1|state_code),
# data=individual_data_base)
#state-level predictors are the same predictors as in the final model:
individual.model.bias <- lmer(D_biep.White_Good_all ~ total_pop + white_prop +
black_prop + b.w.ratio + col_grads + unemp +
income + poverty + housing_density + mobility +
crime_rate + dissim + (1|age_bin) +
(1|county_id) + (1|state_code),
data=individual_data_base)
individual.model.explicit <- lmer(explicit_bias ~ total_pop + white_prop +
black_prop + b.w.ratio + col_grads +
unemp + income + poverty + housing_density +
mobility + crime_rate + dissim +
(1|age_bin) + (1|county_id) +
(1|state_code), data=individual_data_base)
individual.model.explicit_diff <- lmer(explicit_bias_diff ~ total_pop +
white_prop + black_prop + b.w.ratio +
col_grads + unemp + income + poverty +
housing_density + mobility +
crime_rate + dissim + (1|age_bin) +
(1|county_id) + (1|state_code),
data=individual_data_base)
#required to get rid of convergence warnings - run the model for more iterations
ss <- getME(individual.model.explicit_diff, c('theta', 'fixef'))
individual.model.explicit_diff <- update(individual.model.explicit_diff,
start=ss,
control=lmerControl(optCtrl=list(maxfun=2e4)))
#create dataframe to make predictions over
df_acs_counts %>%
select(county_fips:state_code) %>%
ungroup() %>%
left_join(state_covs) %>%
mutate(county_id = paste(state_code,
str_sub(county_fips, -3, -1), sep='-')) -> scaled_counts
#predict implicit
scaled_counts$yhat_bias <- predict(individual.model.bias,
newdata=scaled_counts, allow.new.levels=T)
#predict explicit
scaled_counts$yhat_explicit <- predict(individual.model.explicit,
newdat=scaled_counts, allow.new.levels=T)
#reverse score explicit bias
scaled_counts$yhat_explicit <- scaled_counts$yhat_explicit*-1
#predict explicit diff score
scaled_counts$yhat_explicit_diff <- predict(individual.model.explicit_diff,
newdat=scaled_counts,
allow.new.levels=T)
scaled_counts %>%
ungroup() %>%
group_by(county_id) %>%
summarise(weighted_bias = weighted.mean(yhat_bias, num),
weighted_explicit = weighted.mean(yhat_explicit, num),
weighted_explicit_diff = weighted.mean(yhat_explicit_diff, num)) -> mrp_ests
#compute naiive means as well
individual_data %>%
group_by(county_id) %>%
summarise(bias = mean(D_biep.White_Good_all, na.rm=T),
explicit = mean(explicit_bias, na.rm=T),
explicit_diff = mean(explicit_bias_diff, na.rm=T),
n_bias_obs = sum(!is.na(D_biep.White_Good_all)),
n_explicit_obs = sum(!is.na(explicit_bias))) %>%
left_join(mrp_ests) -> county_means
write.csv(county_means, '/Users/travis/Documents/gits/educational_disparities/output/county_means.csv')
###### write teacher data
#gather all dataframes who have occupation info
df %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) -> subdat
df2 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df3 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df4 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df5 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df6 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
df10 %>%
filter(CountyNo!='') %>%
select(CountyNo, STATE, D_biep.White_Good_all,
twhite_0to10, tblack_0to10, raceomb, age, occupation) %>%
rbind(subdat) -> subdat
#compute difference score, reverse explicit score, filter out territories & non-educators
subdat %>%
mutate(explicit_diff=twhite_0to10 - tblack_0to10,
explicit = tblack_0to10*-1) %>%
filter(STATE %ni%
c('AA', 'AE', 'AP', 'AS', 'FM', 'GU', 'MH', 'MP', 'PR', 'VI')) %>%
filter(occupation %in% educators) %>%
mutate(county_id = paste(STATE, CountyNo, sep='-')) -> individual_data
names(individual_data)[2] <- 'state_abb'
individual_data %>%
filter(raceomb==6) %>% #whites only
group_by(county_id) %>%
mutate(teacher_bias = mean(D_biep.White_Good_all, na.rm=T),
teacher_explicit = mean(explicit, na.rm=T),
teacher_explicit_diff = mean(explicit_diff, na.rm=T),
num_obs = n()) %>%
filter(num_obs>49) %>%
select(county_id, state_abb, teacher_bias,
teacher_explicit, teacher_explicit_diff, num_obs) %>%
distinct() -> county_teacher_estimates
write.csv(county_teacher_estimates, row.names = F,
file = '/Users/travis/Documents/gits/educational_disparities/output/county_teacher_means.csv')
|
#######################
## NHS Analysis ##
#######################
## 11/21/2018
## Fit models to NHS data
## prepare dataframes for JMMICS pack
## then fit:
## Conditional:
## joint conditional (no 0s)
## joint conditional
## outcome-only glmm
## Marginal:
## GEE-Exch
## IEE
## WGEE
## JMM
## marginal size model (for comparing joint models)
## load packages
source("../Functions/JMMICS.R")
## load and clean data
source("../Data Analysis/analysis_clean_data.R")
## prelim tables
source("../Data Analysis/analysis_EDA.R")
## other
require(tidyr)
require(dplyr)
require(glmmML)
require(lme4)
require(geepack)
require(ggplot2)
require(xtable)
library(JMMICSpack)
################################
### prepare data for JMMICSpack
YY <- datG2$adhd
XX <- cbind(1,datG2$desqx1,datG2$msmk2,datG2$yob89_5155,datG2$yob89_5660,datG2$yob89_61plus)#,datG2$raceWhite,datG2$momed2,datG2$momed3,datG2$momed4)
NN <- datG1$totalkids
ZZ <- cbind(1,datG1$desqx1,datG1$msmk2,datG1$yob89_5155,datG1$yob89_5660,datG1$yob89_61plus)
ZZ0 <- ZZ[,1:2]
IDD <- datG2$id2
### data for JMMICSpack (no 0s)
YY_no0 <- datG2_no0$adhd
XX_no0 <- cbind(1,datG2_no0$desqx1,datG2_no0$msmk2,datG2_no0$yob89_5155,datG2_no0$yob89_5660,datG2_no0$yob89_61plus)
NN_no0 <- datG1_no0$totalkids
ZZ_no0 <- cbind(1,datG1_no0$desqx1,datG1_no0$msmk2,datG1_no0$yob89_5155,datG1_no0$yob89_5660,datG1_no0$yob89_61plus)
IDD_no0 <- datG2_no0$id2
########################################
## Model Fits ##
########################################
########################################
## joint (marginal) models
##
gee <- geeglm(adhd~desqx1+msmk2+yob89_5155+yob89_5660+yob89_61plus,id=id2,data=datG2,family=binomial,corstr="exchangeable")
gee_est <- gee$coef
gee_SE <- summary(gee)$coef[,2]
print("gee")
##
iee <- geeglm(adhd~desqx1+msmk2+yob89_5155+yob89_5660+yob89_61plus,id=id2,data=datG2,family=binomial,corstr="independence")
iee_est <- iee$coef
iee_SE <- summary(iee)$coef[,2]
print("iee")
##
wgee <- geeglm(adhd~desqx1+msmk2+yob89_5155+yob89_5660+yob89_61plus,id=id2,data=datG2,family=binomial,corstr="independence",weights=(1/totalkids))
wgee_est <- wgee$coef
wgee_SE <- summary(wgee)$coef[,2]
print("wgee")
##
jmm_no0s <- JMMICS_fit(Nk=NN_no0,Zk=ZZ_no0,Yki=YY_no0,Xki=XX_no0,IDk=IDD_no0,Z0k=ZZ_no0,weights=NA,minNk=1,NegBin=FALSE,ZIP=FALSE,slopes=FALSE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_no0s_est <- jmm_no0s[[1]]
jmm_no0s_SE <- jmm_no0s[[2]]
print("jmm_no0s")
##
jmm <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=FALSE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_est <- jmm[[1]]
jmm_SE <- jmm[[2]]
print("jmm")
##
jmm_slopes_no0s <- JMMICS_fit(Nk=NN_no0,Zk=ZZ_no0,Yki=YY_no0,Xki=XX_no0,IDk=IDD_no0,Z0k=ZZ_no0,weights=NA,minNk=1,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_slopes_no0s_est <- jmm_slopes_no0s[[1]]
jmm_slopes_no0s_SE <- jmm_slopes_no0s[[2]]
print("jmm_slopes_no0s")
##
jmm_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_slopes_est <- jmm_slopes[[1]]
jmm_slopes_SE <- jmm_slopes[[2]]
print("jmm_slopes")
##
jmm_ZIP <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=TRUE,slopes=FALSE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_ZIP_est <- jmm_ZIP[[1]]
jmm_ZIP_SE <- jmm_ZIP[[2]]
print("jmm_ZIP")
##
jmm_ZIP_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=TRUE,slopes=TRUE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_ZIP_slopes_est <- jmm_ZIP_slopes[[1]]
jmm_ZIP_slopes_SE <- jmm_ZIP_slopes[[2]]
print("jmm_ZIP_slopes")
########################################
## joint (conditional) models
##
naive <- glmmML(adhd~desqx1+msmk2+yob89_5155+yob89_5660+yob89_61plus,cluster=id2,family=binomial,data=datG2,method="ghq",n.points=30)
naive_est <- c(naive$coefficients,naive$sigma)
naive_SE <- c(naive$coef.sd,naive$sigma.sd)
print("naive")
##
naive_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=FALSE,nquad=50)
naive_slopes_est <- naive_slopes[[1]]
naive_slopes_SE <- naive_slopes[[2]]
print("naive_slopes")
##
joint_no0s <- JMMICS_fit(Nk=NN_no0,Zk=ZZ_no0,Yki=YY_no0,Xki=XX_no0,IDk=IDD_no0,Z0k=ZZ_no0,weights=NA,minNk=1,NegBin=FALSE,ZIP=FALSE,slopes=FALSE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_no0s_est <- joint_no0s[[1]]
joint_no0s_SE <- joint_no0s[[2]]
print("joint_no0s")
##
joint <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=FALSE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_est <- joint[[1]]
joint_SE <- joint[[2]]
print("joint")
##
joint_slopes_no0s <- JMMICS_fit(Nk=NN_no0,Zk=ZZ_no0,Yki=YY_no0,Xki=XX_no0,IDk=IDD_no0,Z0k=ZZ_no0,weights=NA,minNk=1,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_slopes_no0s_est <- joint_slopes_no0s[[1]]
joint_slopes_no0s_SE <- joint_slopes_no0s[[2]]
print("joint_slopes_no0s")
##
joint_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_slopes_est <- joint_slopes[[1]]
joint_slopes_SE <- joint_slopes[[2]]
print("joint_slopes")
##
joint_ZIP <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=TRUE,slopes=FALSE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_ZIP_est <- joint_ZIP[[1]]
joint_ZIP_SE <- joint_ZIP[[2]]
print("joint_ZIP")
##
joint_ZIP_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=TRUE,slopes=TRUE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_ZIP_slopes_est <- joint_ZIP_slopes[[1]]
joint_ZIP_slopes_SE <- joint_ZIP_slopes[[2]]
print("joint_ZIP_slopes")
########################################
## Collect Results ##
########################################
n_a <- ncol(ZZ) ## no. of alphas
n_b <- ncol(XX) ## no. of betas
n_e <- ncol(ZZ0) ## no. of epsilons
## order of parameters: epsilon, alphas, betas, sigma0, sigma1, gamma0, gamma1
## c(rep(NA,n_e), rep(NA,n_a), rep(NA,n_b), rep(NA,2), rep(NA,2))
## where sigma0=sigma for random intercepts
## order that parameters are output from JMMICSpack: alpha gamma0 gamma1 beta sigma0 sigma1 epsilon
#c(rep(NA,n_e),rep(NA,n_a),rep(NA,n_b),rep(NA,2),rep(NA,2)),
ests <- cbind(c(rep(NA,n_e),rep(NA,n_a),gee_est,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),iee_est,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),wgee_est,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),jmm_no0s_est[1:n_a],jmm_no0s_est[(n_a+1)+(1:n_b)],jmm_no0s_est[n_a+1+n_b+1],NA,jmm_no0s_est[(n_a+1)],NA),
c(rep(NA,n_e),jmm_est[1:n_a],jmm_est[(n_a+1)+(1:n_b)],jmm_est[(n_a+1+n_b)+1],NA,jmm_est[n_a+1],NA),
c(rep(NA,n_e),jmm_slopes_no0s_est[1:n_a],jmm_slopes_no0s_est[(n_a+2)+(1:n_b)],jmm_slopes_no0s_est[(n_a+2+n_b)+(1:2)],jmm_slopes_no0s_est[(n_a)+(1:2)]),
c(rep(NA,n_e),jmm_slopes_est[1:n_a],jmm_slopes_est[(n_a+2)+(1:n_b)],jmm_slopes_est[(n_a+2+n_b)+(1:2)],jmm_slopes_est[(n_a)+(1:2)]),
c(jmm_ZIP_est[(n_a+1+n_b+1)+1:n_e],jmm_ZIP_est[1:n_a],jmm_ZIP_est[(n_a+1)+(1:n_b)],jmm_ZIP_est[(n_a+1+n_b)+1],NA,jmm_ZIP_est[n_a+1],NA),
c(jmm_ZIP_slopes_est[(n_a+2+n_b+2)+1:n_e],jmm_ZIP_slopes_est[1:n_a],jmm_ZIP_slopes_est[(n_a+2)+(1:n_b)],jmm_ZIP_slopes_est[(n_a+2+n_b)+(1:2)],jmm_ZIP_slopes_est[(n_a)+(1:2)]),
c(rep(NA,n_e),rep(NA,n_a),naive_est[1:n_b],naive_est[(n_b)+1],NA,rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),naive_slopes_est[1:n_b],naive_slopes_est[(n_b)+(1:2)],rep(NA,2)),
c(rep(NA,n_e),joint_no0s_est[1:n_a],joint_no0s_est[(n_a+1)+(1:n_b)],joint_no0s_est[n_a+1+n_b+1],NA,joint_no0s_est[(n_a+1)],NA),
c(rep(NA,n_e),joint_est[1:n_a],joint_est[(n_a+1)+(1:n_b)],joint_est[(n_a+1+n_b)+1],NA,joint_est[n_a+1],NA),
c(rep(NA,n_e),joint_slopes_no0s_est[1:n_a],joint_slopes_no0s_est[(n_a+2)+(1:n_b)],joint_slopes_no0s_est[(n_a+2+n_b)+(1:2)],joint_slopes_no0s_est[(n_a)+(1:2)]),
c(rep(NA,n_e),joint_slopes_est[1:n_a],joint_slopes_est[(n_a+2)+(1:n_b)],joint_slopes_est[(n_a+2+n_b)+(1:2)],joint_slopes_est[(n_a)+(1:2)]),
c(joint_ZIP_est[(n_a+1+n_b+1)+1:n_e],joint_ZIP_est[1:n_a],joint_ZIP_est[(n_a+1)+(1:n_b)],joint_ZIP_est[(n_a+1+n_b)+1],NA,joint_ZIP_est[n_a+1],NA),
c(joint_ZIP_slopes_est[(n_a+2+n_b+2)+1:n_e],joint_ZIP_slopes_est[1:n_a],joint_ZIP_slopes_est[(n_a+2)+(1:n_b)],joint_ZIP_slopes_est[(n_a+2+n_b)+(1:2)],joint_ZIP_slopes_est[(n_a)+(1:2)])
)
SEs <- cbind(c(rep(NA,n_e),rep(NA,n_a),gee_SE,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),iee_SE,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),wgee_SE,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),jmm_no0s_SE[1:n_a],jmm_no0s_SE[(n_a+1)+(1:n_b)],jmm_no0s_SE[n_a+1+n_b+1],NA,jmm_no0s_SE[(n_a+1)],NA),
c(rep(NA,n_e),jmm_SE[1:n_a],jmm_SE[(n_a+1)+(1:n_b)],jmm_SE[(n_a+1+n_b)+1],NA,jmm_SE[n_a+1],NA),
c(rep(NA,n_e),jmm_slopes_no0s_SE[1:n_a],jmm_slopes_no0s_SE[(n_a+2)+(1:n_b)],jmm_slopes_no0s_SE[(n_a+2+n_b)+(1:2)],jmm_slopes_no0s_SE[(n_a)+(1:2)]),
c(rep(NA,n_e),jmm_slopes_SE[1:n_a],jmm_slopes_SE[(n_a+2)+(1:n_b)],jmm_slopes_SE[(n_a+2+n_b)+(1:2)],jmm_slopes_SE[(n_a)+(1:2)]),
c(jmm_ZIP_SE[(n_a+1+n_b+1)+1:n_e],jmm_ZIP_SE[1:n_a],jmm_ZIP_SE[(n_a+1)+(1:n_b)],jmm_ZIP_SE[(n_a+1+n_b)+1],NA,jmm_ZIP_SE[n_a+1],NA),
c(jmm_ZIP_slopes_SE[(n_a+2+n_b+2)+1:n_e],jmm_ZIP_slopes_SE[1:n_a],jmm_ZIP_slopes_SE[(n_a+2)+(1:n_b)],jmm_ZIP_slopes_SE[(n_a+2+n_b)+(1:2)],jmm_ZIP_slopes_SE[(n_a)+(1:2)]),
c(rep(NA,n_e),rep(NA,n_a),naive_SE[1:n_b],naive_SE[(n_b)+1],NA,rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),naive_slopes_SE[1:n_b],naive_slopes_SE[(n_b)+(1:2)],rep(NA,2)),
c(rep(NA,n_e),joint_no0s_SE[1:n_a],joint_no0s_SE[(n_a+1)+(1:n_b)],joint_no0s_SE[n_a+1+n_b+1],NA,joint_no0s_SE[(n_a+1)],NA),
c(rep(NA,n_e),joint_SE[1:n_a],joint_SE[(n_a+1)+(1:n_b)],joint_SE[(n_a+1+n_b)+1],NA,joint_SE[n_a+1],NA),
c(rep(NA,n_e),joint_slopes_no0s_SE[1:n_a],joint_slopes_no0s_SE[(n_a+2)+(1:n_b)],joint_slopes_no0s_SE[(n_a+2+n_b)+(1:2)],joint_slopes_no0s_SE[(n_a)+(1:2)]),
c(rep(NA,n_e),joint_slopes_SE[1:n_a],joint_slopes_SE[(n_a+2)+(1:n_b)],joint_slopes_SE[(n_a+2+n_b)+(1:2)],joint_slopes_SE[(n_a)+(1:2)]),
c(joint_ZIP_SE[(n_a+1+n_b+1)+1:n_e],joint_ZIP_SE[1:n_a],joint_ZIP_SE[(n_a+1)+(1:n_b)],joint_ZIP_SE[(n_a+1+n_b)+1],NA,joint_ZIP_SE[n_a+1],NA),
c(joint_ZIP_slopes_SE[(n_a+2+n_b+2)+1:n_e],joint_ZIP_slopes_SE[1:n_a],joint_ZIP_slopes_SE[(n_a+2)+(1:n_b)],joint_ZIP_slopes_SE[(n_a+2+n_b)+(1:2)],joint_ZIP_slopes_SE[(n_a)+(1:2)])
)
rownames(ests) <- rownames(SEs) <- c("e0","e1 DES",
"a0","a1 DES","a2 msmk","a3 yob5155","a4 yob5660","a5 yob61plus",
"b0","b1 DES","b2 msmk","b3 yob5155","b4 yob5660","b5 yob61plus",
"Sigma0","Sigma1",
"Gamma0","Gamma1"
)
colnames(ests) <- colnames(SEs) <- c("GEE","IEE","WEE",
"JMM No0s","JMM",
"JMMSlopes No0s","JMMSlopes",
"JMM ZIP","JMMSlopes ZIP",
"Out-Only","Out-Only Slopes",
"Joint No0s","Joint",
"JointSlopes No0s","JointSlopes",
"Joint ZIP","JointSlopes ZIP"
)
write.table(ests,file="../Data Analysis/ests.txt")
write.table(SEs,file="../Data Analysis/SEs.txt")
xtable(ests)
xtable(SEs)
| /Data Analysis/analysis_run.R | no_license | glenmcgee/InformativeEmptiness | R | false | false | 12,461 | r | #######################
## NHS Analysis ##
#######################
## 11/21/2018
## Fit models to NHS data
## prepare dataframes for JMMICS pack
## then fit:
## Conditional:
## joint conditional (no 0s)
## joint conditional
## outcome-only glmm
## Marginal:
## GEE-Exch
## IEE
## WGEE
## JMM
## marginal size model (for comparing joint models)
## load packages
source("../Functions/JMMICS.R")
## load and clean data
source("../Data Analysis/analysis_clean_data.R")
## prelim tables
source("../Data Analysis/analysis_EDA.R")
## other
require(tidyr)
require(dplyr)
require(glmmML)
require(lme4)
require(geepack)
require(ggplot2)
require(xtable)
library(JMMICSpack)
################################
### prepare data for JMMICSpack
YY <- datG2$adhd
XX <- cbind(1,datG2$desqx1,datG2$msmk2,datG2$yob89_5155,datG2$yob89_5660,datG2$yob89_61plus)#,datG2$raceWhite,datG2$momed2,datG2$momed3,datG2$momed4)
NN <- datG1$totalkids
ZZ <- cbind(1,datG1$desqx1,datG1$msmk2,datG1$yob89_5155,datG1$yob89_5660,datG1$yob89_61plus)
ZZ0 <- ZZ[,1:2]
IDD <- datG2$id2
### data for JMMICSpack (no 0s)
YY_no0 <- datG2_no0$adhd
XX_no0 <- cbind(1,datG2_no0$desqx1,datG2_no0$msmk2,datG2_no0$yob89_5155,datG2_no0$yob89_5660,datG2_no0$yob89_61plus)
NN_no0 <- datG1_no0$totalkids
ZZ_no0 <- cbind(1,datG1_no0$desqx1,datG1_no0$msmk2,datG1_no0$yob89_5155,datG1_no0$yob89_5660,datG1_no0$yob89_61plus)
IDD_no0 <- datG2_no0$id2
########################################
## Model Fits ##
########################################
########################################
## joint (marginal) models
##
gee <- geeglm(adhd~desqx1+msmk2+yob89_5155+yob89_5660+yob89_61plus,id=id2,data=datG2,family=binomial,corstr="exchangeable")
gee_est <- gee$coef
gee_SE <- summary(gee)$coef[,2]
print("gee")
##
iee <- geeglm(adhd~desqx1+msmk2+yob89_5155+yob89_5660+yob89_61plus,id=id2,data=datG2,family=binomial,corstr="independence")
iee_est <- iee$coef
iee_SE <- summary(iee)$coef[,2]
print("iee")
##
wgee <- geeglm(adhd~desqx1+msmk2+yob89_5155+yob89_5660+yob89_61plus,id=id2,data=datG2,family=binomial,corstr="independence",weights=(1/totalkids))
wgee_est <- wgee$coef
wgee_SE <- summary(wgee)$coef[,2]
print("wgee")
##
jmm_no0s <- JMMICS_fit(Nk=NN_no0,Zk=ZZ_no0,Yki=YY_no0,Xki=XX_no0,IDk=IDD_no0,Z0k=ZZ_no0,weights=NA,minNk=1,NegBin=FALSE,ZIP=FALSE,slopes=FALSE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_no0s_est <- jmm_no0s[[1]]
jmm_no0s_SE <- jmm_no0s[[2]]
print("jmm_no0s")
##
jmm <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=FALSE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_est <- jmm[[1]]
jmm_SE <- jmm[[2]]
print("jmm")
##
jmm_slopes_no0s <- JMMICS_fit(Nk=NN_no0,Zk=ZZ_no0,Yki=YY_no0,Xki=XX_no0,IDk=IDD_no0,Z0k=ZZ_no0,weights=NA,minNk=1,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_slopes_no0s_est <- jmm_slopes_no0s[[1]]
jmm_slopes_no0s_SE <- jmm_slopes_no0s[[2]]
print("jmm_slopes_no0s")
##
jmm_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_slopes_est <- jmm_slopes[[1]]
jmm_slopes_SE <- jmm_slopes[[2]]
print("jmm_slopes")
##
jmm_ZIP <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=TRUE,slopes=FALSE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_ZIP_est <- jmm_ZIP[[1]]
jmm_ZIP_SE <- jmm_ZIP[[2]]
print("jmm_ZIP")
##
jmm_ZIP_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=TRUE,slopes=TRUE,slope_col=2,condSize=FALSE,condOut=FALSE,joint=TRUE,nquad=50)
jmm_ZIP_slopes_est <- jmm_ZIP_slopes[[1]]
jmm_ZIP_slopes_SE <- jmm_ZIP_slopes[[2]]
print("jmm_ZIP_slopes")
########################################
## joint (conditional) models
##
naive <- glmmML(adhd~desqx1+msmk2+yob89_5155+yob89_5660+yob89_61plus,cluster=id2,family=binomial,data=datG2,method="ghq",n.points=30)
naive_est <- c(naive$coefficients,naive$sigma)
naive_SE <- c(naive$coef.sd,naive$sigma.sd)
print("naive")
##
naive_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=FALSE,nquad=50)
naive_slopes_est <- naive_slopes[[1]]
naive_slopes_SE <- naive_slopes[[2]]
print("naive_slopes")
##
joint_no0s <- JMMICS_fit(Nk=NN_no0,Zk=ZZ_no0,Yki=YY_no0,Xki=XX_no0,IDk=IDD_no0,Z0k=ZZ_no0,weights=NA,minNk=1,NegBin=FALSE,ZIP=FALSE,slopes=FALSE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_no0s_est <- joint_no0s[[1]]
joint_no0s_SE <- joint_no0s[[2]]
print("joint_no0s")
##
joint <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=FALSE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_est <- joint[[1]]
joint_SE <- joint[[2]]
print("joint")
##
joint_slopes_no0s <- JMMICS_fit(Nk=NN_no0,Zk=ZZ_no0,Yki=YY_no0,Xki=XX_no0,IDk=IDD_no0,Z0k=ZZ_no0,weights=NA,minNk=1,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_slopes_no0s_est <- joint_slopes_no0s[[1]]
joint_slopes_no0s_SE <- joint_slopes_no0s[[2]]
print("joint_slopes_no0s")
##
joint_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=FALSE,slopes=TRUE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_slopes_est <- joint_slopes[[1]]
joint_slopes_SE <- joint_slopes[[2]]
print("joint_slopes")
##
joint_ZIP <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=TRUE,slopes=FALSE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_ZIP_est <- joint_ZIP[[1]]
joint_ZIP_SE <- joint_ZIP[[2]]
print("joint_ZIP")
##
joint_ZIP_slopes <- JMMICS_fit(Nk=NN,Zk=ZZ,Yki=YY,Xki=XX,IDk=IDD,Z0k=ZZ0,weights=NA,minNk=0,NegBin=FALSE,ZIP=TRUE,slopes=TRUE,slope_col=2,condSize=TRUE,condOut=TRUE,joint=TRUE,nquad=50)
joint_ZIP_slopes_est <- joint_ZIP_slopes[[1]]
joint_ZIP_slopes_SE <- joint_ZIP_slopes[[2]]
print("joint_ZIP_slopes")
########################################
## Collect Results ##
########################################
n_a <- ncol(ZZ) ## no. of alphas
n_b <- ncol(XX) ## no. of betas
n_e <- ncol(ZZ0) ## no. of epsilons
## order of parameters: epsilon, alphas, betas, sigma0, sigma1, gamma0, gamma1
## c(rep(NA,n_e), rep(NA,n_a), rep(NA,n_b), rep(NA,2), rep(NA,2))
## where sigma0=sigma for random intercepts
## order that parameters are output from JMMICSpack: alpha gamma0 gamma1 beta sigma0 sigma1 epsilon
#c(rep(NA,n_e),rep(NA,n_a),rep(NA,n_b),rep(NA,2),rep(NA,2)),
ests <- cbind(c(rep(NA,n_e),rep(NA,n_a),gee_est,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),iee_est,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),wgee_est,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),jmm_no0s_est[1:n_a],jmm_no0s_est[(n_a+1)+(1:n_b)],jmm_no0s_est[n_a+1+n_b+1],NA,jmm_no0s_est[(n_a+1)],NA),
c(rep(NA,n_e),jmm_est[1:n_a],jmm_est[(n_a+1)+(1:n_b)],jmm_est[(n_a+1+n_b)+1],NA,jmm_est[n_a+1],NA),
c(rep(NA,n_e),jmm_slopes_no0s_est[1:n_a],jmm_slopes_no0s_est[(n_a+2)+(1:n_b)],jmm_slopes_no0s_est[(n_a+2+n_b)+(1:2)],jmm_slopes_no0s_est[(n_a)+(1:2)]),
c(rep(NA,n_e),jmm_slopes_est[1:n_a],jmm_slopes_est[(n_a+2)+(1:n_b)],jmm_slopes_est[(n_a+2+n_b)+(1:2)],jmm_slopes_est[(n_a)+(1:2)]),
c(jmm_ZIP_est[(n_a+1+n_b+1)+1:n_e],jmm_ZIP_est[1:n_a],jmm_ZIP_est[(n_a+1)+(1:n_b)],jmm_ZIP_est[(n_a+1+n_b)+1],NA,jmm_ZIP_est[n_a+1],NA),
c(jmm_ZIP_slopes_est[(n_a+2+n_b+2)+1:n_e],jmm_ZIP_slopes_est[1:n_a],jmm_ZIP_slopes_est[(n_a+2)+(1:n_b)],jmm_ZIP_slopes_est[(n_a+2+n_b)+(1:2)],jmm_ZIP_slopes_est[(n_a)+(1:2)]),
c(rep(NA,n_e),rep(NA,n_a),naive_est[1:n_b],naive_est[(n_b)+1],NA,rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),naive_slopes_est[1:n_b],naive_slopes_est[(n_b)+(1:2)],rep(NA,2)),
c(rep(NA,n_e),joint_no0s_est[1:n_a],joint_no0s_est[(n_a+1)+(1:n_b)],joint_no0s_est[n_a+1+n_b+1],NA,joint_no0s_est[(n_a+1)],NA),
c(rep(NA,n_e),joint_est[1:n_a],joint_est[(n_a+1)+(1:n_b)],joint_est[(n_a+1+n_b)+1],NA,joint_est[n_a+1],NA),
c(rep(NA,n_e),joint_slopes_no0s_est[1:n_a],joint_slopes_no0s_est[(n_a+2)+(1:n_b)],joint_slopes_no0s_est[(n_a+2+n_b)+(1:2)],joint_slopes_no0s_est[(n_a)+(1:2)]),
c(rep(NA,n_e),joint_slopes_est[1:n_a],joint_slopes_est[(n_a+2)+(1:n_b)],joint_slopes_est[(n_a+2+n_b)+(1:2)],joint_slopes_est[(n_a)+(1:2)]),
c(joint_ZIP_est[(n_a+1+n_b+1)+1:n_e],joint_ZIP_est[1:n_a],joint_ZIP_est[(n_a+1)+(1:n_b)],joint_ZIP_est[(n_a+1+n_b)+1],NA,joint_ZIP_est[n_a+1],NA),
c(joint_ZIP_slopes_est[(n_a+2+n_b+2)+1:n_e],joint_ZIP_slopes_est[1:n_a],joint_ZIP_slopes_est[(n_a+2)+(1:n_b)],joint_ZIP_slopes_est[(n_a+2+n_b)+(1:2)],joint_ZIP_slopes_est[(n_a)+(1:2)])
)
SEs <- cbind(c(rep(NA,n_e),rep(NA,n_a),gee_SE,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),iee_SE,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),wgee_SE,rep(NA,2),rep(NA,2)),
c(rep(NA,n_e),jmm_no0s_SE[1:n_a],jmm_no0s_SE[(n_a+1)+(1:n_b)],jmm_no0s_SE[n_a+1+n_b+1],NA,jmm_no0s_SE[(n_a+1)],NA),
c(rep(NA,n_e),jmm_SE[1:n_a],jmm_SE[(n_a+1)+(1:n_b)],jmm_SE[(n_a+1+n_b)+1],NA,jmm_SE[n_a+1],NA),
c(rep(NA,n_e),jmm_slopes_no0s_SE[1:n_a],jmm_slopes_no0s_SE[(n_a+2)+(1:n_b)],jmm_slopes_no0s_SE[(n_a+2+n_b)+(1:2)],jmm_slopes_no0s_SE[(n_a)+(1:2)]),
c(rep(NA,n_e),jmm_slopes_SE[1:n_a],jmm_slopes_SE[(n_a+2)+(1:n_b)],jmm_slopes_SE[(n_a+2+n_b)+(1:2)],jmm_slopes_SE[(n_a)+(1:2)]),
c(jmm_ZIP_SE[(n_a+1+n_b+1)+1:n_e],jmm_ZIP_SE[1:n_a],jmm_ZIP_SE[(n_a+1)+(1:n_b)],jmm_ZIP_SE[(n_a+1+n_b)+1],NA,jmm_ZIP_SE[n_a+1],NA),
c(jmm_ZIP_slopes_SE[(n_a+2+n_b+2)+1:n_e],jmm_ZIP_slopes_SE[1:n_a],jmm_ZIP_slopes_SE[(n_a+2)+(1:n_b)],jmm_ZIP_slopes_SE[(n_a+2+n_b)+(1:2)],jmm_ZIP_slopes_SE[(n_a)+(1:2)]),
c(rep(NA,n_e),rep(NA,n_a),naive_SE[1:n_b],naive_SE[(n_b)+1],NA,rep(NA,2)),
c(rep(NA,n_e),rep(NA,n_a),naive_slopes_SE[1:n_b],naive_slopes_SE[(n_b)+(1:2)],rep(NA,2)),
c(rep(NA,n_e),joint_no0s_SE[1:n_a],joint_no0s_SE[(n_a+1)+(1:n_b)],joint_no0s_SE[n_a+1+n_b+1],NA,joint_no0s_SE[(n_a+1)],NA),
c(rep(NA,n_e),joint_SE[1:n_a],joint_SE[(n_a+1)+(1:n_b)],joint_SE[(n_a+1+n_b)+1],NA,joint_SE[n_a+1],NA),
c(rep(NA,n_e),joint_slopes_no0s_SE[1:n_a],joint_slopes_no0s_SE[(n_a+2)+(1:n_b)],joint_slopes_no0s_SE[(n_a+2+n_b)+(1:2)],joint_slopes_no0s_SE[(n_a)+(1:2)]),
c(rep(NA,n_e),joint_slopes_SE[1:n_a],joint_slopes_SE[(n_a+2)+(1:n_b)],joint_slopes_SE[(n_a+2+n_b)+(1:2)],joint_slopes_SE[(n_a)+(1:2)]),
c(joint_ZIP_SE[(n_a+1+n_b+1)+1:n_e],joint_ZIP_SE[1:n_a],joint_ZIP_SE[(n_a+1)+(1:n_b)],joint_ZIP_SE[(n_a+1+n_b)+1],NA,joint_ZIP_SE[n_a+1],NA),
c(joint_ZIP_slopes_SE[(n_a+2+n_b+2)+1:n_e],joint_ZIP_slopes_SE[1:n_a],joint_ZIP_slopes_SE[(n_a+2)+(1:n_b)],joint_ZIP_slopes_SE[(n_a+2+n_b)+(1:2)],joint_ZIP_slopes_SE[(n_a)+(1:2)])
)
rownames(ests) <- rownames(SEs) <- c("e0","e1 DES",
"a0","a1 DES","a2 msmk","a3 yob5155","a4 yob5660","a5 yob61plus",
"b0","b1 DES","b2 msmk","b3 yob5155","b4 yob5660","b5 yob61plus",
"Sigma0","Sigma1",
"Gamma0","Gamma1"
)
colnames(ests) <- colnames(SEs) <- c("GEE","IEE","WEE",
"JMM No0s","JMM",
"JMMSlopes No0s","JMMSlopes",
"JMM ZIP","JMMSlopes ZIP",
"Out-Only","Out-Only Slopes",
"Joint No0s","Joint",
"JointSlopes No0s","JointSlopes",
"Joint ZIP","JointSlopes ZIP"
)
write.table(ests,file="../Data Analysis/ests.txt")
write.table(SEs,file="../Data Analysis/SEs.txt")
xtable(ests)
xtable(SEs)
|
\name{prepareForPredictBC}
\alias{prepareForPredictBC}
\title{
Convert node predictions into probabilities for binary classification models.
}
\description{
This method can only be aplied for a binary classification model. Its primary purpose is to process a \code{\link[randomForest]{randomForest}} object as required for \code{predictBC()}.
This method converts node predictions in the \code{\link[randomForest]{randomForest}} object.
The current class label in terminal nodes is replaced by the probability of belonging to a "selected" class - where the probability is calculated as the proportion of local training set instances assigned to the terminal node in question which belong to the "selected" class.
The class of the first instance in the complete training dataset is chosen as the "selected" class.
} %RMR: Anna - is this correct? Also, is the "selected" class, as explained here, the class assigned the number 1? If so, this should be explained in getChanges.Rd, featureContributions.Rd, getLocalIncrements.rd and predictBC.rd! #<TO DO>: Check this is the case.
\usage{
prepareForPredictBC(object, dataT, mcls=NULL)
}
\arguments{
\item{object}{an object of the class \code{randomForest}}
\item{dataT}{a data frame containing the variables in the model for all instances in the training set} %RMR: Anna, I'm pretty sure this is correct?
\item{mcls}{main class that be set to "1" for binary classification. If \code{NULL}, the class name from the first record in \code{dataT} will be set as "1"}
}
\value{
an object of class \code{randomForest} with a new \code{type="binary"}.
}
\author{
Anna Palczewska \email{annawojak@gmail.com}
}
\seealso{
\code{\link[randomForest]{randomForest}}
}
\examples{
\dontrun{
library(randomForest)
data(ames)
ames_train<-ames[ames$Type=="Train",-c(1,3, ncol(ames))]
rF_Model <- randomForest(x=ames_train[,-1],y=as.factor(as.character(ames_train[,1])),
ntree=500,importance=TRUE, keep.inbag=TRUE,replace=FALSE)
new_Model<-prepareForPredictBC(rF_Model, ames_train[,-1])
}
}
\keyword{binary}
\keyword{ contribution }
| /eyeBot/pkg/man/prepareForPredictBC.rd | no_license | wildoctopus/eyeBot | R | false | false | 2,084 | rd | \name{prepareForPredictBC}
\alias{prepareForPredictBC}
\title{
Convert node predictions into probabilities for binary classification models.
}
\description{
This method can only be aplied for a binary classification model. Its primary purpose is to process a \code{\link[randomForest]{randomForest}} object as required for \code{predictBC()}.
This method converts node predictions in the \code{\link[randomForest]{randomForest}} object.
The current class label in terminal nodes is replaced by the probability of belonging to a "selected" class - where the probability is calculated as the proportion of local training set instances assigned to the terminal node in question which belong to the "selected" class.
The class of the first instance in the complete training dataset is chosen as the "selected" class.
} %RMR: Anna - is this correct? Also, is the "selected" class, as explained here, the class assigned the number 1? If so, this should be explained in getChanges.Rd, featureContributions.Rd, getLocalIncrements.rd and predictBC.rd! #<TO DO>: Check this is the case.
\usage{
prepareForPredictBC(object, dataT, mcls=NULL)
}
\arguments{
\item{object}{an object of the class \code{randomForest}}
\item{dataT}{a data frame containing the variables in the model for all instances in the training set} %RMR: Anna, I'm pretty sure this is correct?
\item{mcls}{main class that be set to "1" for binary classification. If \code{NULL}, the class name from the first record in \code{dataT} will be set as "1"}
}
\value{
an object of class \code{randomForest} with a new \code{type="binary"}.
}
\author{
Anna Palczewska \email{annawojak@gmail.com}
}
\seealso{
\code{\link[randomForest]{randomForest}}
}
\examples{
\dontrun{
library(randomForest)
data(ames)
ames_train<-ames[ames$Type=="Train",-c(1,3, ncol(ames))]
rF_Model <- randomForest(x=ames_train[,-1],y=as.factor(as.character(ames_train[,1])),
ntree=500,importance=TRUE, keep.inbag=TRUE,replace=FALSE)
new_Model<-prepareForPredictBC(rF_Model, ames_train[,-1])
}
}
\keyword{binary}
\keyword{ contribution }
|
#Necesita para correr en Google Cloud
#40 GB de memoria RAM
#256 GB de espacio en el disco local
#8 vCPU
#clase_binaria2 1={BAJA+2,BAJA+1} 0={CONTINUA}
#Entrena en a union de ONCE meses de [202001, 202011]
#No usa variables historicas
#Optimizacion Bayesiana de hiperparametros de lightgbm
#usa el interminable 5-fold cross validation
#funciona automaticamente con EXPERIMENTOS
#va generando incrementalmente salidas para kaggle
# WARNING usted debe cambiar este script si lo corre en su propio Linux
#limpio la memoria
rm( list=ls() ) #remove all objects
gc() #garbage collection
require("data.table")
require("rlist")
require("yaml")
require("lightgbm")
#paquetes necesarios para la Bayesian Optimization
require("DiceKriging")
require("mlrMBO")
#para poder usarlo en la PC y en la nube sin tener que cambiar la ruta
#cambiar aqui las rutas en su maquina
switch ( Sys.info()[['sysname']],
Windows = { directory.root <- "M:\\" }, #Windows
Darwin = { directory.root <- "~/dm/" }, #Apple MAC
Linux = { directory.root <- "~/buckets/b1/" } #Google Cloud
)
#defino la carpeta donde trabajo
setwd( directory.root )
kexperimento <- NA #NA si se corre la primera vez, un valor concreto si es para continuar procesando
kscript <- "721_lgb_bin2_hist"
karch_dataset <- "./datasetsOri/paquete_premium.csv.gz"
kmes_apply <- 202101 #El mes donde debo aplicar el modelo
kmes_train_hasta <- 202011 #Obvimente, solo puedo entrenar hasta 202011
kmes_train_desde <- 202001 #Entreno desde Enero-2020
kcanaritos <- 30
kBO_iter <- 100 #cantidad de iteraciones de la Optimizacion Bayesiana
#Aqui se cargan los hiperparametros
hs <- makeParamSet(
makeNumericParam("learning_rate", lower= 0.02 , upper= 0.06),
makeNumericParam("feature_fraction", lower= 0.1 , upper= 0.4),
makeIntegerParam("min_data_in_leaf", lower= 1000L , upper= 8000L),
makeIntegerParam("num_leaves", lower= 100L , upper= 1024L),
makeNumericParam("prob_corte", lower= 0.040, upper= 0.055)
)
campos_malos <- c("mpasivos_margen") #aqui se deben cargar todos los campos culpables del Data Drifting
ksemilla_azar <- 102191 #Aqui poner la propia semilla
#------------------------------------------------------------------------------
#Funcion que lleva el registro de los experimentos
get_experimento <- function()
{
if( !file.exists( "./maestro.yaml" ) ) cat( file="./maestro.yaml", "experimento: 1000" )
exp <- read_yaml( "./maestro.yaml" )
experimento_actual <- exp$experimento
exp$experimento <- as.integer(exp$experimento + 1)
Sys.chmod( "./maestro.yaml", mode = "0644", use_umask = TRUE)
write_yaml( exp, "./maestro.yaml" )
Sys.chmod( "./maestro.yaml", mode = "0444", use_umask = TRUE) #dejo el archivo readonly
return( experimento_actual )
}
#------------------------------------------------------------------------------
#graba a un archivo los componentes de lista
#para el primer registro, escribe antes los titulos
loguear <- function( reg, arch=NA, folder="./work/", ext=".txt", verbose=TRUE )
{
archivo <- arch
if( is.na(arch) ) archivo <- paste0( folder, substitute( reg), ext )
if( !file.exists( archivo ) ) #Escribo los titulos
{
linea <- paste0( "fecha\t",
paste( list.names(reg), collapse="\t" ), "\n" )
cat( linea, file=archivo )
}
linea <- paste0( format(Sys.time(), "%Y%m%d %H%M%S"), "\t", #la fecha y hora
gsub( ", ", "\t", toString( reg ) ), "\n" )
cat( linea, file=archivo, append=TRUE ) #grabo al archivo
if( verbose ) cat( linea ) #imprimo por pantalla
}
#------------------------------------------------------------------------------
PROB_CORTE <- 0.025
fganancia_logistic_lightgbm <- function(probs, datos)
{
vlabels <- getinfo(datos, "label")
vpesos <- getinfo(datos, "weight")
#aqui esta el inmoral uso de los pesos para calcular la ganancia correcta
gan <- sum( (probs > PROB_CORTE ) *
ifelse( vlabels== 1 & vpesos > 1, 48750, -1250 ) )
return( list( "name"= "ganancia",
"value"= gan,
"higher_better"= TRUE ) )
}
#------------------------------------------------------------------------------
#esta funcion solo puede recibir los parametros que se estan optimizando
#el resto de los parametros se pasan como variables globales, la semilla del mal ...
EstimarGanancia_lightgbm <- function( x )
{
GLOBAL_iteracion <<- GLOBAL_iteracion + 1
gc()
PROB_CORTE <<- x$prob_corte #asigno la variable global
kfolds <- 5 # cantidad de folds para cross validation
param_basicos <- list( objective= "binary",
metric= "custom",
first_metric_only= TRUE,
boost_from_average= TRUE,
feature_pre_filter= FALSE,
verbosity= -100,
seed= 999983,
max_depth= -1, # -1 significa no limitar, por ahora lo dejo fijo
min_gain_to_split= 0.0, #por ahora, lo dejo fijo
lambda_l1= 0.0, #por ahora, lo dejo fijo
lambda_l2= 0.0, #por ahora, lo dejo fijo
max_bin= 31, #por ahora, lo dejo fijo
num_iterations= 9999, #un numero muy grande, lo limita early_stopping_rounds
force_row_wise= TRUE #para que los alumnos no se atemoricen con tantos warning
)
#el parametro discolo, que depende de otro
param_variable <- list( early_stopping_rounds= as.integer(50 + 1/x$learning_rate) )
param_completo <- c( param_basicos, param_variable, x )
set.seed( 999983 )
modelocv <- lgb.cv( data= dtrain,
eval= fganancia_logistic_lightgbm,
stratified= TRUE, #sobre el cross validation
nfold= kfolds, #folds del cross validation
param= param_completo,
verbose= -100
)
ganancia <- unlist(modelocv$record_evals$valid$ganancia$eval)[ modelocv$best_iter ]
ganancia_normalizada <- ganancia* kfolds
attr(ganancia_normalizada ,"extras" ) <- list("num_iterations"= modelocv$best_iter) #esta es la forma de devolver un parametro extra
param_completo$num_iterations <- modelocv$best_iter #asigno el mejor num_iterations
param_completo["early_stopping_rounds"] <- NULL
#si tengo una ganancia superadora, genero el archivo para Kaggle
if( ganancia > GLOBAL_ganancia_max )
{
GLOBAL_ganancia_max <<- ganancia #asigno la nueva maxima ganancia a una variable GLOBAL, por eso el <<-
set.seed(ksemilla_azar)
modelo <- lightgbm( data= dtrain,
param= param_completo,
verbose= -100
)
#calculo la importancia de variables
tb_importancia <- lgb.importance( model= modelo )
fwrite( tb_importancia,
file= paste0(kimp, "imp_", GLOBAL_iteracion, ".txt"),
sep="\t" )
prediccion <- predict( modelo, data.matrix( dapply[ , campos_buenos, with=FALSE]) )
Predicted <- as.integer( prediccion > x$prob_corte )
entrega <- as.data.table( list( "numero_de_cliente"= dapply$numero_de_cliente,
"Predicted"= Predicted) )
#genero el archivo para Kaggle
fwrite( entrega,
file= paste0(kkaggle, GLOBAL_iteracion, ".csv" ),
sep= "," )
}
#logueo
xx <- param_completo
xx$iteracion_bayesiana <- GLOBAL_iteracion
xx$ganancia <- ganancia_normalizada #le agrego la ganancia
loguear( xx, arch= klog )
return( ganancia )
}
#------------------------------------------------------------------------------
#Aqui empieza el programa
if( is.na(kexperimento ) ) kexperimento <- get_experimento() #creo el experimento
#en estos archivos quedan los resultados
dir.create( paste0( "./work/E", kexperimento, "/" ) )
kbayesiana <- paste0("./work/E", kexperimento, "/E", kexperimento, "_", kscript, ".RDATA" )
klog <- paste0("./work/E", kexperimento, "/E", kexperimento, "_", kscript, "_BOlog.txt" )
kimp <- paste0("./work/E", kexperimento, "/E", kexperimento, "_", kscript, "_" )
kkaggle <- paste0("./kaggle/E",kexperimento, "_", kscript, "_" )
GLOBAL_ganancia_max <- -Inf
GLOBAL_iteracion <- 0
#si ya existe el archivo log, traigo hasta donde llegue
if( file.exists(klog) )
{
tabla_log <- fread( klog)
GLOBAL_iteracion <- nrow( tabla_log ) -1
GLOBAL_ganancia_max <- tabla_log[ , max(ganancia) ]
}
#cargo el dataset que tiene los 36 meses
dataset <- fread(karch_dataset)
#agrego canaritos
if( kcanaritos > 0 )
{
for( i in 1:kcanaritos) dataset[ , paste0("canarito", i ) := runif( nrow(dataset))]
}
#cargo los datos donde voy a aplicar el modelo
dapply <- copy( dataset[ foto_mes==kmes_apply ] )
#creo la clase_binaria2 1={ BAJA+2,BAJA+1} 0={CONTINUA}
dataset[ , clase01:= ifelse( clase_ternaria=="CONTINUA", 0, 1 ) ]
#los campos que se van a utilizar
campos_buenos <- setdiff( colnames(dataset), c("clase_ternaria","clase01", campos_malos) )
#dejo los datos en el formato que necesita LightGBM
#uso el weight como un truco ESPANTOSO para saber la clase real
dtrain <- lgb.Dataset( data= data.matrix( dataset[ foto_mes>=kmes_train_desde & foto_mes<=kmes_train_hasta , campos_buenos, with=FALSE]),
label= dataset[ foto_mes>=kmes_train_desde & foto_mes<=kmes_train_hasta, clase01],
weight= dataset[ foto_mes>=kmes_train_desde & foto_mes<=kmes_train_hasta , ifelse(clase_ternaria=="BAJA+2", 1.0000001, 1.0)] ,
free_raw_data= TRUE
)
#elimino el dataset para liberar memoria RAM
rm( dataset )
gc()
#Aqui comienza la configuracion de la Bayesian Optimization
funcion_optimizar <- EstimarGanancia_lightgbm #la funcion que voy a maximizar
configureMlr( show.learner.output= FALSE)
#configuro la busqueda bayesiana, los hiperparametros que se van a optimizar
#por favor, no desesperarse por lo complejo
obj.fun <- makeSingleObjectiveFunction(
fn= funcion_optimizar, #la funcion que voy a maximizar
minimize= FALSE, #estoy Maximizando la ganancia
noisy= TRUE,
par.set= hs, #definido al comienzo del programa
has.simple.signature = FALSE #paso los parametros en una lista
)
ctrl <- makeMBOControl( save.on.disk.at.time= 600, save.file.path= kbayesiana) #se graba cada 600 segundos
ctrl <- setMBOControlTermination(ctrl, iters= kBO_iter ) #cantidad de iteraciones
ctrl <- setMBOControlInfill(ctrl, crit= makeMBOInfillCritEI() )
#establezco la funcion que busca el maximo
surr.km <- makeLearner("regr.km", predict.type= "se", covtype= "matern3_2", control= list(trace= TRUE))
#inicio la optimizacion bayesiana
if(!file.exists(kbayesiana)) {
run <- mbo(obj.fun, learner= surr.km, control= ctrl)
} else {
run <- mboContinue( kbayesiana ) #retomo en caso que ya exista
}
#apagado de la maquina virtual, pero NO se borra
system( "sleep 10 && sudo shutdown -h now", wait=FALSE)
#suicidio, elimina la maquina virtual directamente
#system( "sleep 10 &&
# export NAME=$(curl -X GET http://metadata.google.internal/computeMetadata/v1/instance/name -H 'Metadata-Flavor: Google') &&
# export ZONE=$(curl -X GET http://metadata.google.internal/computeMetadata/v1/instance/zone -H 'Metadata-Flavor: Google') &&
# gcloud --quiet compute instances delete $NAME --zone=$ZONE",
# wait=FALSE )
quit( save="no" )
| /clasesGustavo/TareasHogar/Tarea20210924/721_lgb_bin2_hist.r | no_license | gerbeldo/labo2021 | R | false | false | 11,955 | r | #Necesita para correr en Google Cloud
#40 GB de memoria RAM
#256 GB de espacio en el disco local
#8 vCPU
#clase_binaria2 1={BAJA+2,BAJA+1} 0={CONTINUA}
#Entrena en a union de ONCE meses de [202001, 202011]
#No usa variables historicas
#Optimizacion Bayesiana de hiperparametros de lightgbm
#usa el interminable 5-fold cross validation
#funciona automaticamente con EXPERIMENTOS
#va generando incrementalmente salidas para kaggle
# WARNING usted debe cambiar este script si lo corre en su propio Linux
#limpio la memoria
rm( list=ls() ) #remove all objects
gc() #garbage collection
require("data.table")
require("rlist")
require("yaml")
require("lightgbm")
#paquetes necesarios para la Bayesian Optimization
require("DiceKriging")
require("mlrMBO")
#para poder usarlo en la PC y en la nube sin tener que cambiar la ruta
#cambiar aqui las rutas en su maquina
switch ( Sys.info()[['sysname']],
Windows = { directory.root <- "M:\\" }, #Windows
Darwin = { directory.root <- "~/dm/" }, #Apple MAC
Linux = { directory.root <- "~/buckets/b1/" } #Google Cloud
)
#defino la carpeta donde trabajo
setwd( directory.root )
kexperimento <- NA #NA si se corre la primera vez, un valor concreto si es para continuar procesando
kscript <- "721_lgb_bin2_hist"
karch_dataset <- "./datasetsOri/paquete_premium.csv.gz"
kmes_apply <- 202101 #El mes donde debo aplicar el modelo
kmes_train_hasta <- 202011 #Obvimente, solo puedo entrenar hasta 202011
kmes_train_desde <- 202001 #Entreno desde Enero-2020
kcanaritos <- 30
kBO_iter <- 100 #cantidad de iteraciones de la Optimizacion Bayesiana
#Aqui se cargan los hiperparametros
hs <- makeParamSet(
makeNumericParam("learning_rate", lower= 0.02 , upper= 0.06),
makeNumericParam("feature_fraction", lower= 0.1 , upper= 0.4),
makeIntegerParam("min_data_in_leaf", lower= 1000L , upper= 8000L),
makeIntegerParam("num_leaves", lower= 100L , upper= 1024L),
makeNumericParam("prob_corte", lower= 0.040, upper= 0.055)
)
campos_malos <- c("mpasivos_margen") #aqui se deben cargar todos los campos culpables del Data Drifting
ksemilla_azar <- 102191 #Aqui poner la propia semilla
#------------------------------------------------------------------------------
#Funcion que lleva el registro de los experimentos
get_experimento <- function()
{
if( !file.exists( "./maestro.yaml" ) ) cat( file="./maestro.yaml", "experimento: 1000" )
exp <- read_yaml( "./maestro.yaml" )
experimento_actual <- exp$experimento
exp$experimento <- as.integer(exp$experimento + 1)
Sys.chmod( "./maestro.yaml", mode = "0644", use_umask = TRUE)
write_yaml( exp, "./maestro.yaml" )
Sys.chmod( "./maestro.yaml", mode = "0444", use_umask = TRUE) #dejo el archivo readonly
return( experimento_actual )
}
#------------------------------------------------------------------------------
#graba a un archivo los componentes de lista
#para el primer registro, escribe antes los titulos
loguear <- function( reg, arch=NA, folder="./work/", ext=".txt", verbose=TRUE )
{
archivo <- arch
if( is.na(arch) ) archivo <- paste0( folder, substitute( reg), ext )
if( !file.exists( archivo ) ) #Escribo los titulos
{
linea <- paste0( "fecha\t",
paste( list.names(reg), collapse="\t" ), "\n" )
cat( linea, file=archivo )
}
linea <- paste0( format(Sys.time(), "%Y%m%d %H%M%S"), "\t", #la fecha y hora
gsub( ", ", "\t", toString( reg ) ), "\n" )
cat( linea, file=archivo, append=TRUE ) #grabo al archivo
if( verbose ) cat( linea ) #imprimo por pantalla
}
#------------------------------------------------------------------------------
PROB_CORTE <- 0.025
fganancia_logistic_lightgbm <- function(probs, datos)
{
vlabels <- getinfo(datos, "label")
vpesos <- getinfo(datos, "weight")
#aqui esta el inmoral uso de los pesos para calcular la ganancia correcta
gan <- sum( (probs > PROB_CORTE ) *
ifelse( vlabels== 1 & vpesos > 1, 48750, -1250 ) )
return( list( "name"= "ganancia",
"value"= gan,
"higher_better"= TRUE ) )
}
#------------------------------------------------------------------------------
#esta funcion solo puede recibir los parametros que se estan optimizando
#el resto de los parametros se pasan como variables globales, la semilla del mal ...
EstimarGanancia_lightgbm <- function( x )
{
GLOBAL_iteracion <<- GLOBAL_iteracion + 1
gc()
PROB_CORTE <<- x$prob_corte #asigno la variable global
kfolds <- 5 # cantidad de folds para cross validation
param_basicos <- list( objective= "binary",
metric= "custom",
first_metric_only= TRUE,
boost_from_average= TRUE,
feature_pre_filter= FALSE,
verbosity= -100,
seed= 999983,
max_depth= -1, # -1 significa no limitar, por ahora lo dejo fijo
min_gain_to_split= 0.0, #por ahora, lo dejo fijo
lambda_l1= 0.0, #por ahora, lo dejo fijo
lambda_l2= 0.0, #por ahora, lo dejo fijo
max_bin= 31, #por ahora, lo dejo fijo
num_iterations= 9999, #un numero muy grande, lo limita early_stopping_rounds
force_row_wise= TRUE #para que los alumnos no se atemoricen con tantos warning
)
#el parametro discolo, que depende de otro
param_variable <- list( early_stopping_rounds= as.integer(50 + 1/x$learning_rate) )
param_completo <- c( param_basicos, param_variable, x )
set.seed( 999983 )
modelocv <- lgb.cv( data= dtrain,
eval= fganancia_logistic_lightgbm,
stratified= TRUE, #sobre el cross validation
nfold= kfolds, #folds del cross validation
param= param_completo,
verbose= -100
)
ganancia <- unlist(modelocv$record_evals$valid$ganancia$eval)[ modelocv$best_iter ]
ganancia_normalizada <- ganancia* kfolds
attr(ganancia_normalizada ,"extras" ) <- list("num_iterations"= modelocv$best_iter) #esta es la forma de devolver un parametro extra
param_completo$num_iterations <- modelocv$best_iter #asigno el mejor num_iterations
param_completo["early_stopping_rounds"] <- NULL
#si tengo una ganancia superadora, genero el archivo para Kaggle
if( ganancia > GLOBAL_ganancia_max )
{
GLOBAL_ganancia_max <<- ganancia #asigno la nueva maxima ganancia a una variable GLOBAL, por eso el <<-
set.seed(ksemilla_azar)
modelo <- lightgbm( data= dtrain,
param= param_completo,
verbose= -100
)
#calculo la importancia de variables
tb_importancia <- lgb.importance( model= modelo )
fwrite( tb_importancia,
file= paste0(kimp, "imp_", GLOBAL_iteracion, ".txt"),
sep="\t" )
prediccion <- predict( modelo, data.matrix( dapply[ , campos_buenos, with=FALSE]) )
Predicted <- as.integer( prediccion > x$prob_corte )
entrega <- as.data.table( list( "numero_de_cliente"= dapply$numero_de_cliente,
"Predicted"= Predicted) )
#genero el archivo para Kaggle
fwrite( entrega,
file= paste0(kkaggle, GLOBAL_iteracion, ".csv" ),
sep= "," )
}
#logueo
xx <- param_completo
xx$iteracion_bayesiana <- GLOBAL_iteracion
xx$ganancia <- ganancia_normalizada #le agrego la ganancia
loguear( xx, arch= klog )
return( ganancia )
}
#------------------------------------------------------------------------------
#Aqui empieza el programa
if( is.na(kexperimento ) ) kexperimento <- get_experimento() #creo el experimento
#en estos archivos quedan los resultados
dir.create( paste0( "./work/E", kexperimento, "/" ) )
kbayesiana <- paste0("./work/E", kexperimento, "/E", kexperimento, "_", kscript, ".RDATA" )
klog <- paste0("./work/E", kexperimento, "/E", kexperimento, "_", kscript, "_BOlog.txt" )
kimp <- paste0("./work/E", kexperimento, "/E", kexperimento, "_", kscript, "_" )
kkaggle <- paste0("./kaggle/E",kexperimento, "_", kscript, "_" )
GLOBAL_ganancia_max <- -Inf
GLOBAL_iteracion <- 0
#si ya existe el archivo log, traigo hasta donde llegue
if( file.exists(klog) )
{
tabla_log <- fread( klog)
GLOBAL_iteracion <- nrow( tabla_log ) -1
GLOBAL_ganancia_max <- tabla_log[ , max(ganancia) ]
}
#cargo el dataset que tiene los 36 meses
dataset <- fread(karch_dataset)
#agrego canaritos
if( kcanaritos > 0 )
{
for( i in 1:kcanaritos) dataset[ , paste0("canarito", i ) := runif( nrow(dataset))]
}
#cargo los datos donde voy a aplicar el modelo
dapply <- copy( dataset[ foto_mes==kmes_apply ] )
#creo la clase_binaria2 1={ BAJA+2,BAJA+1} 0={CONTINUA}
dataset[ , clase01:= ifelse( clase_ternaria=="CONTINUA", 0, 1 ) ]
#los campos que se van a utilizar
campos_buenos <- setdiff( colnames(dataset), c("clase_ternaria","clase01", campos_malos) )
#dejo los datos en el formato que necesita LightGBM
#uso el weight como un truco ESPANTOSO para saber la clase real
dtrain <- lgb.Dataset( data= data.matrix( dataset[ foto_mes>=kmes_train_desde & foto_mes<=kmes_train_hasta , campos_buenos, with=FALSE]),
label= dataset[ foto_mes>=kmes_train_desde & foto_mes<=kmes_train_hasta, clase01],
weight= dataset[ foto_mes>=kmes_train_desde & foto_mes<=kmes_train_hasta , ifelse(clase_ternaria=="BAJA+2", 1.0000001, 1.0)] ,
free_raw_data= TRUE
)
#elimino el dataset para liberar memoria RAM
rm( dataset )
gc()
#Aqui comienza la configuracion de la Bayesian Optimization
funcion_optimizar <- EstimarGanancia_lightgbm #la funcion que voy a maximizar
configureMlr( show.learner.output= FALSE)
#configuro la busqueda bayesiana, los hiperparametros que se van a optimizar
#por favor, no desesperarse por lo complejo
obj.fun <- makeSingleObjectiveFunction(
fn= funcion_optimizar, #la funcion que voy a maximizar
minimize= FALSE, #estoy Maximizando la ganancia
noisy= TRUE,
par.set= hs, #definido al comienzo del programa
has.simple.signature = FALSE #paso los parametros en una lista
)
ctrl <- makeMBOControl( save.on.disk.at.time= 600, save.file.path= kbayesiana) #se graba cada 600 segundos
ctrl <- setMBOControlTermination(ctrl, iters= kBO_iter ) #cantidad de iteraciones
ctrl <- setMBOControlInfill(ctrl, crit= makeMBOInfillCritEI() )
#establezco la funcion que busca el maximo
surr.km <- makeLearner("regr.km", predict.type= "se", covtype= "matern3_2", control= list(trace= TRUE))
#inicio la optimizacion bayesiana
if(!file.exists(kbayesiana)) {
run <- mbo(obj.fun, learner= surr.km, control= ctrl)
} else {
run <- mboContinue( kbayesiana ) #retomo en caso que ya exista
}
#apagado de la maquina virtual, pero NO se borra
system( "sleep 10 && sudo shutdown -h now", wait=FALSE)
#suicidio, elimina la maquina virtual directamente
#system( "sleep 10 &&
# export NAME=$(curl -X GET http://metadata.google.internal/computeMetadata/v1/instance/name -H 'Metadata-Flavor: Google') &&
# export ZONE=$(curl -X GET http://metadata.google.internal/computeMetadata/v1/instance/zone -H 'Metadata-Flavor: Google') &&
# gcloud --quiet compute instances delete $NAME --zone=$ZONE",
# wait=FALSE )
quit( save="no" )
|
###################
#
# read_dist.R
#
# reads triangle matrices into R
#
###################
library(tidyverse)
read_dist <- function(dist_file_name){
# read in the first row to determine the matrix dimensions
matrix_dim <- as.numeric(read.table(dist_file_name, nrow = 1, as.is = TRUE))
# read in all the data from the lower triangle (exlcude the first which is the matrix dim)
distance_matrix <- read.table(dist_file_name, fill = TRUE, skip = 1,
col.names = c(as.character(1:matrix_dim)),
stringsAsFactor = F)
# add column names based on row names
colnames(distance_matrix) <- c('rows', distance_matrix$X1[-matrix_dim])
# convert to long form and eliminate NAs (upper right of triangle)
distance_matrix %>%
pivot_longer(col = -rows, values_to = 'distances', names_to = 'columns') %>%
filter(!is.na(distances))
}
| /code/read_dist.R | permissive | SchlossLab/Lesniak_Clearance_mSphere_2021 | R | false | false | 855 | r | ###################
#
# read_dist.R
#
# reads triangle matrices into R
#
###################
library(tidyverse)
read_dist <- function(dist_file_name){
# read in the first row to determine the matrix dimensions
matrix_dim <- as.numeric(read.table(dist_file_name, nrow = 1, as.is = TRUE))
# read in all the data from the lower triangle (exlcude the first which is the matrix dim)
distance_matrix <- read.table(dist_file_name, fill = TRUE, skip = 1,
col.names = c(as.character(1:matrix_dim)),
stringsAsFactor = F)
# add column names based on row names
colnames(distance_matrix) <- c('rows', distance_matrix$X1[-matrix_dim])
# convert to long form and eliminate NAs (upper right of triangle)
distance_matrix %>%
pivot_longer(col = -rows, values_to = 'distances', names_to = 'columns') %>%
filter(!is.na(distances))
}
|
# This is the R script for the Plot 3.png
library(data.table)
library(dplyr)
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# download and unzip the data
fileName <- "power_consumption.zip"
if (!file.exists(fileName)) {
download.file(fileUrl, destfile = fileName)
}
if(!file.exists("household_power_consumption.txt")) {
unzip(fileName)
}
# load the data and filter out rows with the date we need
# note that ? represents missing value
consumption <- fread("./household_power_consumption.txt", na.strings = "?")
consumption <- consumption %>% filter(Date == "1/2/2007" | Date == "2/2/2007")
# paste the date and time column to be a new DateTime column
# convert into POSIXlt format
# timezone: CET for paris (where the data was collected)
consumption <- consumption %>% mutate(DateTime=paste(Date, Time)) %>%
select(DateTime, everything()) %>%
select(-Date, -Time)
consumption$DateTime <- strptime(consumption$DateTime, format = "%d/%m/%Y %H:%M:%S",
tz = "CET")
# make the plot and save it into a png file
png(filename = "Plot 3.png", width = 480, height = 480)
with(consumption, plot(DateTime, Sub_metering_1, xlab = "", ylab = "Energy sub metering",
type = "n"))
with(consumption, lines(DateTime, Sub_metering_1, type = "l"))
with(consumption, lines(DateTime, Sub_metering_2, type = "l", col = "red"))
with(consumption, lines(DateTime, Sub_metering_3, type = "l", col = "blue"))
legend("topright", col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1,1), text.font = 0.5)
dev.off() | /plot3.R | no_license | fanzhaom/ExData_Plotting1 | R | false | false | 1,681 | r | # This is the R script for the Plot 3.png
library(data.table)
library(dplyr)
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# download and unzip the data
fileName <- "power_consumption.zip"
if (!file.exists(fileName)) {
download.file(fileUrl, destfile = fileName)
}
if(!file.exists("household_power_consumption.txt")) {
unzip(fileName)
}
# load the data and filter out rows with the date we need
# note that ? represents missing value
consumption <- fread("./household_power_consumption.txt", na.strings = "?")
consumption <- consumption %>% filter(Date == "1/2/2007" | Date == "2/2/2007")
# paste the date and time column to be a new DateTime column
# convert into POSIXlt format
# timezone: CET for paris (where the data was collected)
consumption <- consumption %>% mutate(DateTime=paste(Date, Time)) %>%
select(DateTime, everything()) %>%
select(-Date, -Time)
consumption$DateTime <- strptime(consumption$DateTime, format = "%d/%m/%Y %H:%M:%S",
tz = "CET")
# make the plot and save it into a png file
png(filename = "Plot 3.png", width = 480, height = 480)
with(consumption, plot(DateTime, Sub_metering_1, xlab = "", ylab = "Energy sub metering",
type = "n"))
with(consumption, lines(DateTime, Sub_metering_1, type = "l"))
with(consumption, lines(DateTime, Sub_metering_2, type = "l", col = "red"))
with(consumption, lines(DateTime, Sub_metering_3, type = "l", col = "blue"))
legend("topright", col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1,1), text.font = 0.5)
dev.off() |
helpers.installPackages("quantmod") | /init.R | permissive | bridgecrew-perf7/deployR4 | R | false | false | 35 | r | helpers.installPackages("quantmod") |
#' @title Split samples into groups given metadata.
#' @description The function returns two groups situated in the low and high quantile of the given metadata item.
#' When two metadata items are provided, three splitting modi are available: congruent, complementary and inverse.
#' For groups with low and high values of a metadata item and two metadata items, congruent means that the low group
#' is low for both metadata items and the high group is high for both metadata items, inverse means that the low group is
#' low for the first metadata item and high for the second metadata item and vice versa for the high group whereas congruent means
#' that the first group is high for the first metadata item and the second high for the second metadata item.
#'
#' @param abundances a matrix with taxa as rows and samples as columns
#' @param metadata a dataframe with metadata items as columns
#' @param metadata.name the name of a numeric metadata item to be used for sample splitting
#' @param metadata2.name the name of a second numeric metadata item to be used for sample splitting
#' @param mode the splitting mode; can be inverse, complementary or congruent; only relevant if a second metadata item is provided
#' @param quantile.def the thresholds on the lower and upper quantile which define the two sample groups (group 1 from 0 to first quantile, group 2 from second quantile to 1)
#' @return The function returns the abundances of group 1 and 2 (named group1 and group2) as well as the group-specific metadata values (named metadata1 and metadata2),
#' where second metadata item values can be empty.
#' @export
selectSamplesGivenMetadata<-function(abundances, metadata, metadata.name="", metadata2.name="", mode="congruent", quantile.def=c(0.1,0.9)){
thresholds=quantile(metadata[[metadata.name]], quantile.def)
metadata.values=metadata[[metadata.name]]
metadata2.values=c()
if(metadata2.name!=""){
metadata2.values=metadata[[metadata2.name]]
}
indices.group1=which(metadata.values<thresholds[1]) # low quantile group
indices.group2=which(metadata.values>thresholds[2]) # high quantile group
if(metadata2.name!=""){
thresholds2=quantile(metadata[[metadata2.name]], quantile.def)
if(mode=="inverse"){
indices.metadata2.group1=which(metadata2.values>thresholds2[2]) # group 1: high in metadata 2
indices.metadata2.group2=which(metadata2.values<thresholds2[1]) # group 2: low in metadata 2
}else if(mode=="complementary"){
indices.metadata2.group2=which(metadata2.values>thresholds2[2])
}else if(mode=="congruent"){
indices.metadata2.group1=which(metadata2.values<thresholds2[1])
indices.metadata2.group2=which(metadata2.values>thresholds2[2])
}else{
stop(paste("Mode",mode,"is not supported."))
}
if(mode=="complementary"){
# metadata 1 should be high in first group
indices.group1=indices.group2
# metadata 2 should be high in second group
indices.group2=indices.metadata2.group2
}else{
indices.group1=intersect(indices.group1,indices.metadata2.group1)
indices.group2=intersect(indices.group2,indices.metadata2.group2)
}
if(length(indices.group1)==0){
stop("No samples found in intersection of selected metadata for group 1.")
}
if(length(indices.group2)==0){
stop("No samples found in intersection of selected metadata for group 2.")
}
}
group1=abundances[,indices.group1]
group2=abundances[,indices.group2]
metadata.group1=metadata[indices.group1,]
metadata.group2=metadata[indices.group2,]
res=list(group1,group2, metadata.group1, metadata.group2)
names(res)=c("group1","group2","metadata1","metadata2")
return(res)
}
# Helper function to match age and gender for two data sets.
# Age is matched first, and gender is matched in case there is
# more than one sample that matches age within given range.
# If range is smaller than 1, gender is not matched.
# age1: age vector for query data set
# gender1: optional gender vector for query data set
# age2: age vector for target data set
# gender2: optional gender vector for target data set; needed if gender1 is given
# range: allowed deviation for age in years
# The method returns the indices of the selected target samples.
matchAgeAndGender<-function(age1=c(), gender1=c(), age2=c(), gender2=c(), range=1){
selected.target.indices=c()
if(length(gender1)>1 && length(gender2)==0){
stop("If you provide a query gender vector, please provide a target gender vector.")
}
if(range<=0 && length(gender1)>0){
gender1=c()
warning("In order to match age and gender, please provide a range larger 0.")
}
# loop query age vector
for(query.index in 1:length(age1)){
queryage=age1[query.index]
# try exact match first
okindices=age2[age2==queryage]
newIndexFound=FALSE
if(length(okindices)>0){
for(okindex in okindices){
# select only one match
if(!(okindex %in% selected.target.indices) && !newIndexFound){
newIndexFound=TRUE
# no gender match required
if(length(gender1)==0){
selected.target.indices=c(selected.target.indices,okindex)
}
}
} # end loop indices found
} # end test indices found
# if exact age match fails or if gender is provided or if all target indices were already selected, check for samples with age within the allowed range
if(length(gender1)>0 || !newIndexFound){
# check within range
if(range>0){
okindices=c()
# collect target samples with age within range
for(target.index in 1:length(age2)){
if(age2[target.index] <= (queryage+range) && age2[target.index] >= (queryage-range)){
okindices=c(okindices,target.index)
}
}
print(paste("Found",length(okindices),"samples with age within range"))
# no matching age found within range
if(length(okindices)<1){
warning(paste("No matching age found for sample",query.index," and range ",range,". Consider expanding the range."))
}
# find matching gender
else if(length(gender1)>0){
newIndexFoundWithGender=FALSE
for(okindex in okindices){
if(gender1[query.index]==gender2[okindex]){
if(!(okindex %in% selected.target.indices) && !newIndexFoundWithGender){
selected.target.indices=c(selected.target.indices,okindex)
newIndexFoundWithGender=TRUE
}
}
} # end loop indices
if(!newIndexFoundWithGender){
warning(paste("Did not find target sample with matching gender not found before for sample",query.index))
for(okindex in okindices){
if(!(okindex %in% selected.target.indices) && !newIndexFound){
selected.target.indices=c(selected.target.indices,okindex)
newIndexFound=TRUE
}
} # end loop indices
}
if(!newIndexFound){
warning(paste("Did not find target sample for query sample",query.index, "with matching age not found before"))
}
# no need to match gender
}else if(length(gender1)==0){
for(okindex in okindices){
if(!(okindex %in% selected.target.indices) && !newIndexFound){
selected.target.indices=c(selected.target.indices,okindex)
newIndexFound=TRUE
}
} # end loop indices
if(!newIndexFound){
warning(paste("Did not find target sample for query sample",query.index, "with matching age not found before"))
}
}
} # end range larger 0; with 0 range, nothing else can be done
} # end no new index found or gender matching enabled
if(!newIndexFound){
warning(paste("No matching age or no new matchig age found for sample",query.index," and range ",range,". Consider expanding the range."))
}
} # end loop over query indices
return(selected.target.indices)
}
| /R/selectSamplesGivenMetadata.R | permissive | hallucigenia-sparsa/seqgroup | R | false | false | 8,056 | r | #' @title Split samples into groups given metadata.
#' @description The function returns two groups situated in the low and high quantile of the given metadata item.
#' When two metadata items are provided, three splitting modi are available: congruent, complementary and inverse.
#' For groups with low and high values of a metadata item and two metadata items, congruent means that the low group
#' is low for both metadata items and the high group is high for both metadata items, inverse means that the low group is
#' low for the first metadata item and high for the second metadata item and vice versa for the high group whereas congruent means
#' that the first group is high for the first metadata item and the second high for the second metadata item.
#'
#' @param abundances a matrix with taxa as rows and samples as columns
#' @param metadata a dataframe with metadata items as columns
#' @param metadata.name the name of a numeric metadata item to be used for sample splitting
#' @param metadata2.name the name of a second numeric metadata item to be used for sample splitting
#' @param mode the splitting mode; can be inverse, complementary or congruent; only relevant if a second metadata item is provided
#' @param quantile.def the thresholds on the lower and upper quantile which define the two sample groups (group 1 from 0 to first quantile, group 2 from second quantile to 1)
#' @return The function returns the abundances of group 1 and 2 (named group1 and group2) as well as the group-specific metadata values (named metadata1 and metadata2),
#' where second metadata item values can be empty.
#' @export
selectSamplesGivenMetadata<-function(abundances, metadata, metadata.name="", metadata2.name="", mode="congruent", quantile.def=c(0.1,0.9)){
thresholds=quantile(metadata[[metadata.name]], quantile.def)
metadata.values=metadata[[metadata.name]]
metadata2.values=c()
if(metadata2.name!=""){
metadata2.values=metadata[[metadata2.name]]
}
indices.group1=which(metadata.values<thresholds[1]) # low quantile group
indices.group2=which(metadata.values>thresholds[2]) # high quantile group
if(metadata2.name!=""){
thresholds2=quantile(metadata[[metadata2.name]], quantile.def)
if(mode=="inverse"){
indices.metadata2.group1=which(metadata2.values>thresholds2[2]) # group 1: high in metadata 2
indices.metadata2.group2=which(metadata2.values<thresholds2[1]) # group 2: low in metadata 2
}else if(mode=="complementary"){
indices.metadata2.group2=which(metadata2.values>thresholds2[2])
}else if(mode=="congruent"){
indices.metadata2.group1=which(metadata2.values<thresholds2[1])
indices.metadata2.group2=which(metadata2.values>thresholds2[2])
}else{
stop(paste("Mode",mode,"is not supported."))
}
if(mode=="complementary"){
# metadata 1 should be high in first group
indices.group1=indices.group2
# metadata 2 should be high in second group
indices.group2=indices.metadata2.group2
}else{
indices.group1=intersect(indices.group1,indices.metadata2.group1)
indices.group2=intersect(indices.group2,indices.metadata2.group2)
}
if(length(indices.group1)==0){
stop("No samples found in intersection of selected metadata for group 1.")
}
if(length(indices.group2)==0){
stop("No samples found in intersection of selected metadata for group 2.")
}
}
group1=abundances[,indices.group1]
group2=abundances[,indices.group2]
metadata.group1=metadata[indices.group1,]
metadata.group2=metadata[indices.group2,]
res=list(group1,group2, metadata.group1, metadata.group2)
names(res)=c("group1","group2","metadata1","metadata2")
return(res)
}
# Helper function to match age and gender for two data sets.
# Age is matched first, and gender is matched in case there is
# more than one sample that matches age within given range.
# If range is smaller than 1, gender is not matched.
# age1: age vector for query data set
# gender1: optional gender vector for query data set
# age2: age vector for target data set
# gender2: optional gender vector for target data set; needed if gender1 is given
# range: allowed deviation for age in years
# The method returns the indices of the selected target samples.
matchAgeAndGender<-function(age1=c(), gender1=c(), age2=c(), gender2=c(), range=1){
selected.target.indices=c()
if(length(gender1)>1 && length(gender2)==0){
stop("If you provide a query gender vector, please provide a target gender vector.")
}
if(range<=0 && length(gender1)>0){
gender1=c()
warning("In order to match age and gender, please provide a range larger 0.")
}
# loop query age vector
for(query.index in 1:length(age1)){
queryage=age1[query.index]
# try exact match first
okindices=age2[age2==queryage]
newIndexFound=FALSE
if(length(okindices)>0){
for(okindex in okindices){
# select only one match
if(!(okindex %in% selected.target.indices) && !newIndexFound){
newIndexFound=TRUE
# no gender match required
if(length(gender1)==0){
selected.target.indices=c(selected.target.indices,okindex)
}
}
} # end loop indices found
} # end test indices found
# if exact age match fails or if gender is provided or if all target indices were already selected, check for samples with age within the allowed range
if(length(gender1)>0 || !newIndexFound){
# check within range
if(range>0){
okindices=c()
# collect target samples with age within range
for(target.index in 1:length(age2)){
if(age2[target.index] <= (queryage+range) && age2[target.index] >= (queryage-range)){
okindices=c(okindices,target.index)
}
}
print(paste("Found",length(okindices),"samples with age within range"))
# no matching age found within range
if(length(okindices)<1){
warning(paste("No matching age found for sample",query.index," and range ",range,". Consider expanding the range."))
}
# find matching gender
else if(length(gender1)>0){
newIndexFoundWithGender=FALSE
for(okindex in okindices){
if(gender1[query.index]==gender2[okindex]){
if(!(okindex %in% selected.target.indices) && !newIndexFoundWithGender){
selected.target.indices=c(selected.target.indices,okindex)
newIndexFoundWithGender=TRUE
}
}
} # end loop indices
if(!newIndexFoundWithGender){
warning(paste("Did not find target sample with matching gender not found before for sample",query.index))
for(okindex in okindices){
if(!(okindex %in% selected.target.indices) && !newIndexFound){
selected.target.indices=c(selected.target.indices,okindex)
newIndexFound=TRUE
}
} # end loop indices
}
if(!newIndexFound){
warning(paste("Did not find target sample for query sample",query.index, "with matching age not found before"))
}
# no need to match gender
}else if(length(gender1)==0){
for(okindex in okindices){
if(!(okindex %in% selected.target.indices) && !newIndexFound){
selected.target.indices=c(selected.target.indices,okindex)
newIndexFound=TRUE
}
} # end loop indices
if(!newIndexFound){
warning(paste("Did not find target sample for query sample",query.index, "with matching age not found before"))
}
}
} # end range larger 0; with 0 range, nothing else can be done
} # end no new index found or gender matching enabled
if(!newIndexFound){
warning(paste("No matching age or no new matchig age found for sample",query.index," and range ",range,". Consider expanding the range."))
}
} # end loop over query indices
return(selected.target.indices)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairt.R
\name{pairt}
\alias{pairt}
\title{Compute power for a Paired t-test
Takes means, sd, and sample sizes. Alpha is .05 by default, alternative values may be entered by user.
correlation (r) defaults to .50.}
\usage{
pairt(m1 = NULL, m2 = NULL, s = NULL, n = NULL, r = NULL, alpha = 0.05)
}
\arguments{
\item{m1}{Mean for Pre Test}
\item{m2}{Mean for Post Test}
\item{s}{Standard deviation}
\item{n}{Sample size}
\item{r}{Correlation pre-post measures (default is .50)}
\item{alpha}{Type I error (default is .05)}
}
\value{
Power for the Paired t-test
}
\description{
Compute power for a Paired t-test
Takes means, sd, and sample sizes. Alpha is .05 by default, alternative values may be entered by user.
correlation (r) defaults to .50.
}
\examples{
pairt(m1=25,m2=20, s = 5, n = 25, r = .5)
}
| /man/pairt.Rd | permissive | chrisaberson/pwr2ppl | R | false | true | 916 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairt.R
\name{pairt}
\alias{pairt}
\title{Compute power for a Paired t-test
Takes means, sd, and sample sizes. Alpha is .05 by default, alternative values may be entered by user.
correlation (r) defaults to .50.}
\usage{
pairt(m1 = NULL, m2 = NULL, s = NULL, n = NULL, r = NULL, alpha = 0.05)
}
\arguments{
\item{m1}{Mean for Pre Test}
\item{m2}{Mean for Post Test}
\item{s}{Standard deviation}
\item{n}{Sample size}
\item{r}{Correlation pre-post measures (default is .50)}
\item{alpha}{Type I error (default is .05)}
}
\value{
Power for the Paired t-test
}
\description{
Compute power for a Paired t-test
Takes means, sd, and sample sizes. Alpha is .05 by default, alternative values may be entered by user.
correlation (r) defaults to .50.
}
\examples{
pairt(m1=25,m2=20, s = 5, n = 25, r = .5)
}
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
format.packageInfo<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::format.packageInfo(params)
}
}
| /R/format.packageInfo.R | no_license | granatb/RapeR | R | false | false | 691 | r |
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
format.packageInfo<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::format.packageInfo(params)
}
}
|
\name{Main.Rainfed.Growing.Season.Daily.ET.Calc}
\alias{Main.Rainfed.Growing.Season.Daily.ET.Calc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
~~function to do ... ~~
}
\description{
~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Main.Rainfed.Growing.Season.Daily.ET.Calc(Croplayer, Auto = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Croplayer}{
~~Describe \code{Croplayer} here~~
}
\item{Auto}{
~~Describe \code{Auto} here~~
}
}
\details{
~~ If necessary, more details than the description above ~~
}
\value{
~Describe the value returned
If it is a LIST, use
\item{comp1 }{Description of 'comp1'}
\item{comp2 }{Description of 'comp2'}
...
}
\references{
~put references to the literature/web site here ~
}
\author{
~~who you are~~
}
\note{
~~further notes~~
}
~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (Croplayer, Auto = TRUE)
{
load("Vars.Rdata")
Irr.Vars <- Vars[-c(3, 6, 8, 14, 15)]
if (!(Croplayer \%in\% Irr.Vars))
stop("This function is for irrigated varieties only!")
load(paste0(Intermediates, paste("Growing.Season", Croplayer,
"ETo_", "Rdata", sep = ".")))
ETo <- Growing.Season
rm(Growing.Season)
load(paste0(Intermediates, paste("Growing.Season", Croplayer,
"Precip_", "Rdata", sep = ".")))
Precip <- Growing.Season
rm(Growing.Season)
CROP <- Croplayer
load(paste0("../Intermediates/Daily.Crop.Profile.", CROP,
".Rdata"))
Root.depth <- lapply(DailyKcb, function(x) x$daily_root.depth)
Qfc.minus.Qwp <- lapply(Precip, function(x) x$Qfc.minus.Qwp)
TEW <- lapply(Precip, function(x) x$ave_TEW)
Dei <- TEW
REW <- lapply(Precip, function(x) x$ave_REW)
Precip <- lapply(Precip, function(x) x[, (grep("layer", names(x)))])
load(paste0(Intermediates, paste("Few", Croplayer, "Rdata",
sep = ".")))
load(paste0(Intermediates, paste("KcMax", Croplayer, "Rdata",
sep = ".")))
KcMax <- lapply(KcMax, function(x) x[, (grep("layer", names(x)))])
load(paste0(Intermediates, paste("Kcb.corrected", Croplayer,
"Rdata", sep = ".")))
ETo <- lapply(ETo, function(x) x[, (grep("layer", names(x)))])
sapply(ETo, function(x) length(x[x < 0]))
if (file.exists(paste0(Intermediates, paste("Growing.Saved",
Croplayer, "Rdata", sep = "."))) == FALSE) {
for (i in 1:length(ETo)) {
ETo[[i]][ETo[[i]] < 0] <- 0
ETo[[i]] <- round(ETo[[i]], 3)
ETo[[i]][ETo[[i]] > 28] <- 1.655
print("ETo high vals warning:")
print(length(ETo[[i]][ETo[[i]] > 18]))
}
print("ETo data cleaned")
ROi <- Precip
for (i in 1:length(ROi)) {
ROi[[i]] <- ROi[[i]] - TEW[[i]]
ROi[[i]][ROi[[i]] < 0] <- 0
}
print("Growing season runoff estimated")
Irr <- Precip
for (i in 1:length(Irr)) {
Irr[[i]][Irr[[i]] > 0] <- 0
}
Fw.table <- read.csv("Fw.table.csv")
Irr.Eff <- Fw.table$fw[1]
Fw <- Irr
for (i in 1:length(Fw)) {
Fw[[i]][Fw[[i]] == 0] <- Irr.Eff
}
Growing.Files <- list(ETo, Precip, ROi, Irr, Fw)
save(Growing.Files, file = paste0(Intermediates, paste("Growing.Saved",
Croplayer, "Rdata", sep = ".")))
}
if (file.exists(paste0(Intermediates, paste("Growing.Saved",
Croplayer, "Rdata", sep = "."))) == TRUE) {
load(paste0(Intermediates, paste("Growing.Saved", Croplayer,
"Rdata", sep = ".")))
ETo <- Growing.Files[[1]]
Precip <- Growing.Files[[2]]
ROi <- Growing.Files[[3]]
Irr <- Growing.Files[[4]]
Fw <- Growing.Files[[5]]
}
Zr <- read.csv("crop.roots.csv")
Zr <- Zr[Zr$crop == Croplayer, ]
TAW.base <- lapply(Qfc.minus.Qwp, function(x) 1000 * (x[] *
Zr$root_depth))
Kr <- Irr
ETc <- Irr
De <- Irr
DPe <- Irr
Transp <- Irr
Ke <- Irr
E <- Irr
Transp <- Irr
Pval <- Irr
RAW <- Irr
Ks <- Irr
Transp.final <- Irr
Dr <- Irr
DP <- Irr
TAW <- Irr
setwd(paste0(Path, "/CropWatR/Intermediates/"))
load(paste("Preseason_Root.Zone.Depletion", Croplayer, "Rdata",
sep = "."))
load(paste("Preseason_Soil.Top.Layer.Depletion", Croplayer,
"Rdata", sep = "."))
load(paste("Preseason_Deep.Percolation", Croplayer, "Rdata",
sep = "."))
load(paste("Preseason_Soil.Evaporation", Croplayer, "Rdata",
sep = "."))
load(paste("Preseason_Runoff", Croplayer, "Rdata", sep = "."))
load(paste("Preseason_Weed.Transpiration", Croplayer, "Rdata",
sep = "."))
load(paste("Fallow.Saved", Croplayer, "Rdata", sep = "."))
Pre.Few <- Fallow.File[[5]]
setwd(paste0(Path, "/CropWatR/Data"))
if (file.exists(paste0(Intermediates, paste("Growing.Season.Rainfed_Transpiration",
Croplayer, "Rdata", sep = "."))) == TRUE & Auto == TRUE) {
print(paste("Growing Season has been previously calculated for",
Croplayer))
}
if (file.exists(paste0(Intermediates, paste("Growing.Season.Rainfed_Transpiration",
Croplayer, "Rdata", sep = "."))) == FALSE) {
Fw.table <- read.csv("Fw.table.csv")
Irr.Eff <- Fw.table$fw[1]
for (i in 1:length(Precip)) {
for (j in 1:length(Precip[[i]])) {
if (j == 1) {
Few[[i]][, j] <- pmin.int(Few[[i]][, j], Fw[[i]][,
j])
Kr[[i]][, j][Pre.Dei[[i]][, length(Pre.Dei[[i]])] >
REW[[i]]] <- (TEW[[i]][Pre.Dei[[i]][, length(Pre.Dei[[i]])] >
REW[[i]]] - Pre.Dei[[i]][, length(Pre.Dei[[i]])][Pre.Dei[[i]][,
length(Pre.Dei[[i]])] > REW[[i]]])/(TEW[[i]][Pre.Dei[[i]][,
length(Pre.Dei[[i]])] > REW[[i]]] - REW[[i]][Pre.Dei[[i]][,
length(Pre.Dei[[i]])] > REW[[i]]])
Kr[[i]][, j][Pre.Dei[[i]][, length(Pre.Dei[[i]])] <=
REW[[i]]] <- 1
Kr[[i]][, j][Kr[[i]][, j] < 0] <- 0
Ke[[i]][, j] <- pmin.int(Kr[[i]][, j] * (KcMax[[i]][,
j] - Kcb.corrected[[i]][, j]), Few[[i]][,
j] * KcMax[[i]][, j])
Ke[[i]][, j][Ke[[i]][, j] < 0] <- 0
E[[i]][, j] <- Ke[[i]][, j] * ETo[[i]][, j]
DPe[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + (Irr[[i]][, j]/Fw[[i]][, j]) - Pre.Dei[[i]][,
length(Pre.Dei[[i]])]
DPe[[i]][, j][DPe[[i]][, j] < 0] <- 0
De[[i]][, j] <- Pre.Dei[[i]][, length(Pre.Dei[[i]])] -
(Precip[[i]][, j] - ROi[[i]][, j]) + Irr[[i]][,
j]/Fw[[i]][, j] + (E[[i]][, j]/Few[[i]][,
j]) + DPe[[i]][, j]
De[[i]][, j][De[[i]][, j] < 0] <- 0
De[[i]][, j][De[[i]][, j] > TEW[[i]]] <- TEW[[i]][De[[i]][,
j] > TEW[[i]]]
ETc[[i]][, j] <- (Kcb.corrected[[i]][, j] +
Ke[[i]][, j]) * ETo[[i]][, j]
Pval[[i]][, j] <- Zr$p.value + 0.04 * (5 -
(ETc[[i]][, j]))
Pval[[i]][, j][Pval[[i]][, j] < 0.1] <- 0.1
Pval[[i]][, j][Pval[[i]][, j] > 0.8] <- 0.8
if (is.na(Root.depth[[i]][j]/Zr$root_depth)) {
Frac <- Root.depth[[i]][length(Root.depth[[i]])]/Zr$root_depth
}
else Frac <- Root.depth[[i]][j]/Zr$root_depth
TAW[[i]][, j] <- TAW.base[[i]] * Frac
RAW[[i]][, j] <- Pval[[i]][, j] * TAW[[i]][,
j]
Dr[[i]][, j] <- Pre.Dr[[i]][, length(Pre.Dr[[i]])] -
(Precip[[i]][, j] - ROi[[i]][, j]) - Irr[[i]][,
j] + ETc[[i]][, j] + Pre.DP[[i]][, length(Pre.DP[[i]])]
Dr[[i]][, j][Dr[[i]][, j] < 0] <- 0
Dr[[i]][, j][Dr[[i]][, j] > TAW[[i]][, j]] <- TAW[[i]][,
j][Dr[[i]][, j] > TAW[[i]][, j]]
Ks[[i]][, j][Dr[[i]][, j] > RAW[[i]][, j]] <- ((TAW[[i]][,
j] - Dr[[i]][, j])[Dr[[i]][, j] > RAW[[i]][,
j]])/((1 - Pval[[i]][, j][Dr[[i]][, j] >
RAW[[i]][, j]]) * TAW[[i]][, j][Dr[[i]][,
j] > RAW[[i]][, j]])
Ks[[i]][, j][Dr[[i]][, j] <= RAW[[i]][, j]] <- 1
DP[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + Irr[[i]][, j] - ETc[[i]][, j] - Pre.Dr[[i]][,
length(Pre.Dr[[i]])]
DP[[i]][, j][Dr[[i]][, j] > 0] <- 0
DP[[i]][, j][DP[[i]][, j] < 0] <- 0
Transp[[i]][, j] <- (Ks[[i]][, j] * Kcb.corrected[[i]][,
j] + Ke[[i]][, j]) * ETo[[i]][, j]
Transp.final[[i]][, j] <- (Ks[[i]][, j] * Kcb.corrected[[i]][,
j]) * ETo[[i]][, j]
DPe[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + (Irr[[i]][, j]/Fw[[i]][, j]) - Pre.Dei[[i]][,
length(Pre.Dei[[i]])]
DPe[[i]][, j][DPe[[i]][, j] < 0] <- 0
De[[i]][, j] <- Pre.Dei[[i]][, length(Pre.Dei[[i]])] -
(Precip[[i]][, j] - ROi[[i]][, j]) + Irr[[i]][,
j]/Fw[[i]][, j] + (E[[i]][, j]/Few[[i]][,
j]) + DPe[[i]][, j]
De[[i]][, j][De[[i]][, j] < 0] <- 0
De[[i]][, j][De[[i]][, j] > TEW[[i]]] <- TEW[[i]][De[[i]][,
j] > TEW[[i]]]
}
else {
Fw[[i]][, j] <- Fw[[i]][, (j - 1)]
Few[[i]][, j] <- pmin.int(Few[[i]][, j], Fw[[i]][,
j])
Kr[[i]][, j][De[[i]][, (j - 1)] > REW[[i]]] <- (TEW[[i]][De[[i]][,
(j - 1)] > REW[[i]]] - De[[i]][, (j - 1)][De[[i]][,
(j - 1)] > REW[[i]]])/(TEW[[i]][De[[i]][,
(j - 1)] > REW[[i]]] - REW[[i]][De[[i]][,
(j - 1)] > REW[[i]]])
Kr[[i]][, j][De[[i]][, (j - 1)] <= REW[[i]]] <- 1
Kr[[i]][, j][Kr[[i]][, j] < 0] <- 0
Ke[[i]][, j] <- pmin.int(Kr[[i]][, j] * (KcMax[[i]][,
j] - Kcb.corrected[[i]][, j]), Few[[i]][,
j] * KcMax[[i]][, j])
Ke[[i]][, j][Ke[[i]][, j] < 0] <- 0
ETo[[i]]
E[[i]][, j] <- Ke[[i]][, j] * ETo[[i]][, j]
DPe[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + (Irr[[i]][, j]/Fw[[i]][, j]) - De[[i]][,
j - 1]
DPe[[i]][, j][DPe[[i]][, j] < 0] <- 0
De[[i]][, j] <- De[[i]][, j - 1] - (Precip[[i]][,
j] - ROi[[i]][, j]) + Irr[[i]][, j]/Fw[[i]][,
j] + (E[[i]][, j]/Few[[i]][, j]) + DPe[[i]][,
j]
De[[i]][, j][De[[i]][, j] < 0] <- 0
De[[i]][, j][De[[i]][, j] > TEW[[i]]] <- TEW[[i]][De[[i]][,
j] > TEW[[i]]]
ETc[[i]][, j] <- (Kcb.corrected[[i]][, j] +
Ke[[i]][, j]) * ETo[[i]][, j]
Pval[[i]][, j] <- Zr$p.value + 0.04 * (5 -
(ETc[[i]][, j]))
Pval[[i]][, j][Pval[[i]][, j] < 0.1] <- 0.1
Pval[[i]][, j][Pval[[i]][, j] > 0.8] <- 0.8
if (is.na(Root.depth[[i]][j]/Zr$root_depth)) {
Frac <- Root.depth[[i]][length(Root.depth[[i]])]/Zr$root_depth
}
else Frac <- Root.depth[[i]][j]/Zr$root_depth
TAW[[i]][, j] <- TAW.base[[i]] * Frac
RAW[[i]][, j] <- Pval[[i]][, j] * TAW[[i]][,
j]
Dr[[i]][, j] <- Dr[[i]][, j - 1] - (Precip[[i]][,
j] - ROi[[i]][, j]) - Irr[[i]][, j] + ETc[[i]][,
j] + DP[[i]][, j - 1]
Dr[[i]][, j][Dr[[i]][, j] < 0] <- 0
Dr[[i]][, j][Dr[[i]][, j] > TAW[[i]][, j]] <- TAW[[i]][,
j][Dr[[i]][, j] > TAW[[i]][, j]]
Dr[[i]][, j] <- Dr[[i]][, j - 1] - (Precip[[i]][,
j] - ROi[[i]][, j]) - Irr[[i]][, j] + ETc[[i]][,
j] + DP[[i]][, j - 1]
Dr[[i]][, j][Dr[[i]][, j] < 0] <- 0
Dr[[i]][, j][Dr[[i]][, j] > TAW[[i]][, j]] <- TAW[[i]][,
j][Dr[[i]][, j] > TAW[[i]][, j]]
Ks[[i]][, j][Dr[[i]][, j] > RAW[[i]][, j]] <- ((TAW[[i]][,
j] - Dr[[i]][, j])[Dr[[i]][, j] > RAW[[i]][,
j]])/((1 - Pval[[i]][, j][Dr[[i]][, j] >
RAW[[i]][, j]]) * TAW[[i]][, j][Dr[[i]][,
j] > RAW[[i]][, j]])
Ks[[i]][, j][Dr[[i]][, j] <= RAW[[i]][, j]] <- 1
DP[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + Irr[[i]][, j] - ETc[[i]][, j] - Dr[[i]][,
j - 1]
DP[[i]][, j][Dr[[i]][, j] > 0] <- 0
DP[[i]][, j][DP[[i]][, j] < 0] <- 0
Transp[[i]][, j] <- (Ks[[i]][, j] * Kcb.corrected[[i]][,
j] + Ke[[i]][, j]) * ETo[[i]][, j]
Transp.final[[i]][, j] <- (Ks[[i]][, j] * Kcb.corrected[[i]][,
j]) * ETo[[i]][, j]
DPe[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + (Irr[[i]][, j]/Fw[[i]][, j]) - De[[i]][,
j - 1]
DPe[[i]][, j][DPe[[i]][, j] < 0] <- 0
De[[i]][, j] <- De[[i]][, j - 1] - (Precip[[i]][,
j] - ROi[[i]][, j]) + Irr[[i]][, j]/Fw[[i]][,
j] + (E[[i]][, j]/Few[[i]][, j]) + DPe[[i]][,
j]
De[[i]][, j][De[[i]][, j] < 0] <- 0
De[[i]][, j][De[[i]][, j] > TEW[[i]]] <- TEW[[i]][De[[i]][,
j] > TEW[[i]]]
}
}
Few[[i]][, 1] <- Few[[i]][, 2]
Kr[[i]][, 1] <- Kr[[i]][, 2]
Ke[[i]][, 1] <- Ke[[i]][, 2]
E[[i]][, 1] <- E[[i]][, 2]
DPe[[i]][, 1] <- DPe[[i]][, 2]
De[[i]][, 1] <- De[[i]][, 2]
ETc[[i]][, 1] <- ETc[[i]][, 2]
Pval[[i]][, 1] <- Pval[[i]][, 2]
TAW[[i]][, 1] <- TAW[[i]][, 2]
RAW[[i]][, 1] <- RAW[[i]][, 2]
Dr[[i]][, 1] <- Dr[[i]][, 2]
Dr[[i]][, 1] <- Dr[[i]][, 2]
Ks[[i]][, 1] <- Ks[[i]][, 2]
DP[[i]][, 1] <- DP[[i]][, 2]
Transp[[i]][, 1] <- Transp[[i]][, 2]
Transp.final[[i]][, 1] <- Transp.final[[i]][, 2]
}
}
print("Saving rainfed growing season SB files")
setwd(paste0(Path, "/CropWatR/Intermediates/"))
save(Few, file = paste("Growing.Season.Rainfed_Root.Zone.Depletion",
Croplayer, "Rdata", sep = "."))
save(Kr, file = paste("Growing.Season.Rainfed_Kr", Croplayer,
"Rdata", sep = "."))
save(Ks, file = paste("Growing.Season.Rainfed_Ks", Croplayer,
"Rdata", sep = "."))
save(Pval, file = paste("Growing.Season.Rainfed_Pval", Croplayer,
"Rdata", sep = "."))
save(Dr, file = paste("Growing.Season.Rainfed_Root.Zone.Depletion",
Croplayer, "Rdata", sep = "."))
save(De, file = paste("Growing.Season.Rainfed_Soil.Water.Balance",
Croplayer, "Rdata", sep = "."))
save(DP, file = paste("Growing.Season.Rainfed_Deep.Percolation",
Croplayer, "Rdata", sep = "."))
save(ROi, file = paste("Growing.Season.Rainfed_Runoff", Croplayer,
"Rdata", sep = "."))
save(E, file = paste("Growing.Season.Rainfed_Soil.Evaporation",
Croplayer, "Rdata", sep = "."))
save(Transp.final, file = paste("Growing.Season.Rainfed_Transpiration",
Croplayer, "Rdata", sep = "."))
save(DPe, file = paste("Growing.Season.Rainfed.Root.Zone.Percolation.Loss",
Croplayer, "Rdata", sep = "."))
save(Few, file = paste("Growing.Season.Rainfed.Evaporation.Fractions",
Croplayer, "Rdata", sep = "."))
setwd(paste0(Path, "/CropWatR/Data"))
print("Calculation of Growing Season daily soil water balance, deep percolation, and evaporation complete")
print("Growing Season initial run complete, on to post season")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/Main.Rainfed.Growing.Season.Daily.ET.Calc.Rd | no_license | DDorch/CropWatR | R | false | false | 17,081 | rd | \name{Main.Rainfed.Growing.Season.Daily.ET.Calc}
\alias{Main.Rainfed.Growing.Season.Daily.ET.Calc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
~~function to do ... ~~
}
\description{
~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Main.Rainfed.Growing.Season.Daily.ET.Calc(Croplayer, Auto = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Croplayer}{
~~Describe \code{Croplayer} here~~
}
\item{Auto}{
~~Describe \code{Auto} here~~
}
}
\details{
~~ If necessary, more details than the description above ~~
}
\value{
~Describe the value returned
If it is a LIST, use
\item{comp1 }{Description of 'comp1'}
\item{comp2 }{Description of 'comp2'}
...
}
\references{
~put references to the literature/web site here ~
}
\author{
~~who you are~~
}
\note{
~~further notes~~
}
~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (Croplayer, Auto = TRUE)
{
load("Vars.Rdata")
Irr.Vars <- Vars[-c(3, 6, 8, 14, 15)]
if (!(Croplayer \%in\% Irr.Vars))
stop("This function is for irrigated varieties only!")
load(paste0(Intermediates, paste("Growing.Season", Croplayer,
"ETo_", "Rdata", sep = ".")))
ETo <- Growing.Season
rm(Growing.Season)
load(paste0(Intermediates, paste("Growing.Season", Croplayer,
"Precip_", "Rdata", sep = ".")))
Precip <- Growing.Season
rm(Growing.Season)
CROP <- Croplayer
load(paste0("../Intermediates/Daily.Crop.Profile.", CROP,
".Rdata"))
Root.depth <- lapply(DailyKcb, function(x) x$daily_root.depth)
Qfc.minus.Qwp <- lapply(Precip, function(x) x$Qfc.minus.Qwp)
TEW <- lapply(Precip, function(x) x$ave_TEW)
Dei <- TEW
REW <- lapply(Precip, function(x) x$ave_REW)
Precip <- lapply(Precip, function(x) x[, (grep("layer", names(x)))])
load(paste0(Intermediates, paste("Few", Croplayer, "Rdata",
sep = ".")))
load(paste0(Intermediates, paste("KcMax", Croplayer, "Rdata",
sep = ".")))
KcMax <- lapply(KcMax, function(x) x[, (grep("layer", names(x)))])
load(paste0(Intermediates, paste("Kcb.corrected", Croplayer,
"Rdata", sep = ".")))
ETo <- lapply(ETo, function(x) x[, (grep("layer", names(x)))])
sapply(ETo, function(x) length(x[x < 0]))
if (file.exists(paste0(Intermediates, paste("Growing.Saved",
Croplayer, "Rdata", sep = "."))) == FALSE) {
for (i in 1:length(ETo)) {
ETo[[i]][ETo[[i]] < 0] <- 0
ETo[[i]] <- round(ETo[[i]], 3)
ETo[[i]][ETo[[i]] > 28] <- 1.655
print("ETo high vals warning:")
print(length(ETo[[i]][ETo[[i]] > 18]))
}
print("ETo data cleaned")
ROi <- Precip
for (i in 1:length(ROi)) {
ROi[[i]] <- ROi[[i]] - TEW[[i]]
ROi[[i]][ROi[[i]] < 0] <- 0
}
print("Growing season runoff estimated")
Irr <- Precip
for (i in 1:length(Irr)) {
Irr[[i]][Irr[[i]] > 0] <- 0
}
Fw.table <- read.csv("Fw.table.csv")
Irr.Eff <- Fw.table$fw[1]
Fw <- Irr
for (i in 1:length(Fw)) {
Fw[[i]][Fw[[i]] == 0] <- Irr.Eff
}
Growing.Files <- list(ETo, Precip, ROi, Irr, Fw)
save(Growing.Files, file = paste0(Intermediates, paste("Growing.Saved",
Croplayer, "Rdata", sep = ".")))
}
if (file.exists(paste0(Intermediates, paste("Growing.Saved",
Croplayer, "Rdata", sep = "."))) == TRUE) {
load(paste0(Intermediates, paste("Growing.Saved", Croplayer,
"Rdata", sep = ".")))
ETo <- Growing.Files[[1]]
Precip <- Growing.Files[[2]]
ROi <- Growing.Files[[3]]
Irr <- Growing.Files[[4]]
Fw <- Growing.Files[[5]]
}
Zr <- read.csv("crop.roots.csv")
Zr <- Zr[Zr$crop == Croplayer, ]
TAW.base <- lapply(Qfc.minus.Qwp, function(x) 1000 * (x[] *
Zr$root_depth))
Kr <- Irr
ETc <- Irr
De <- Irr
DPe <- Irr
Transp <- Irr
Ke <- Irr
E <- Irr
Transp <- Irr
Pval <- Irr
RAW <- Irr
Ks <- Irr
Transp.final <- Irr
Dr <- Irr
DP <- Irr
TAW <- Irr
setwd(paste0(Path, "/CropWatR/Intermediates/"))
load(paste("Preseason_Root.Zone.Depletion", Croplayer, "Rdata",
sep = "."))
load(paste("Preseason_Soil.Top.Layer.Depletion", Croplayer,
"Rdata", sep = "."))
load(paste("Preseason_Deep.Percolation", Croplayer, "Rdata",
sep = "."))
load(paste("Preseason_Soil.Evaporation", Croplayer, "Rdata",
sep = "."))
load(paste("Preseason_Runoff", Croplayer, "Rdata", sep = "."))
load(paste("Preseason_Weed.Transpiration", Croplayer, "Rdata",
sep = "."))
load(paste("Fallow.Saved", Croplayer, "Rdata", sep = "."))
Pre.Few <- Fallow.File[[5]]
setwd(paste0(Path, "/CropWatR/Data"))
if (file.exists(paste0(Intermediates, paste("Growing.Season.Rainfed_Transpiration",
Croplayer, "Rdata", sep = "."))) == TRUE & Auto == TRUE) {
print(paste("Growing Season has been previously calculated for",
Croplayer))
}
if (file.exists(paste0(Intermediates, paste("Growing.Season.Rainfed_Transpiration",
Croplayer, "Rdata", sep = "."))) == FALSE) {
Fw.table <- read.csv("Fw.table.csv")
Irr.Eff <- Fw.table$fw[1]
for (i in 1:length(Precip)) {
for (j in 1:length(Precip[[i]])) {
if (j == 1) {
Few[[i]][, j] <- pmin.int(Few[[i]][, j], Fw[[i]][,
j])
Kr[[i]][, j][Pre.Dei[[i]][, length(Pre.Dei[[i]])] >
REW[[i]]] <- (TEW[[i]][Pre.Dei[[i]][, length(Pre.Dei[[i]])] >
REW[[i]]] - Pre.Dei[[i]][, length(Pre.Dei[[i]])][Pre.Dei[[i]][,
length(Pre.Dei[[i]])] > REW[[i]]])/(TEW[[i]][Pre.Dei[[i]][,
length(Pre.Dei[[i]])] > REW[[i]]] - REW[[i]][Pre.Dei[[i]][,
length(Pre.Dei[[i]])] > REW[[i]]])
Kr[[i]][, j][Pre.Dei[[i]][, length(Pre.Dei[[i]])] <=
REW[[i]]] <- 1
Kr[[i]][, j][Kr[[i]][, j] < 0] <- 0
Ke[[i]][, j] <- pmin.int(Kr[[i]][, j] * (KcMax[[i]][,
j] - Kcb.corrected[[i]][, j]), Few[[i]][,
j] * KcMax[[i]][, j])
Ke[[i]][, j][Ke[[i]][, j] < 0] <- 0
E[[i]][, j] <- Ke[[i]][, j] * ETo[[i]][, j]
DPe[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + (Irr[[i]][, j]/Fw[[i]][, j]) - Pre.Dei[[i]][,
length(Pre.Dei[[i]])]
DPe[[i]][, j][DPe[[i]][, j] < 0] <- 0
De[[i]][, j] <- Pre.Dei[[i]][, length(Pre.Dei[[i]])] -
(Precip[[i]][, j] - ROi[[i]][, j]) + Irr[[i]][,
j]/Fw[[i]][, j] + (E[[i]][, j]/Few[[i]][,
j]) + DPe[[i]][, j]
De[[i]][, j][De[[i]][, j] < 0] <- 0
De[[i]][, j][De[[i]][, j] > TEW[[i]]] <- TEW[[i]][De[[i]][,
j] > TEW[[i]]]
ETc[[i]][, j] <- (Kcb.corrected[[i]][, j] +
Ke[[i]][, j]) * ETo[[i]][, j]
Pval[[i]][, j] <- Zr$p.value + 0.04 * (5 -
(ETc[[i]][, j]))
Pval[[i]][, j][Pval[[i]][, j] < 0.1] <- 0.1
Pval[[i]][, j][Pval[[i]][, j] > 0.8] <- 0.8
if (is.na(Root.depth[[i]][j]/Zr$root_depth)) {
Frac <- Root.depth[[i]][length(Root.depth[[i]])]/Zr$root_depth
}
else Frac <- Root.depth[[i]][j]/Zr$root_depth
TAW[[i]][, j] <- TAW.base[[i]] * Frac
RAW[[i]][, j] <- Pval[[i]][, j] * TAW[[i]][,
j]
Dr[[i]][, j] <- Pre.Dr[[i]][, length(Pre.Dr[[i]])] -
(Precip[[i]][, j] - ROi[[i]][, j]) - Irr[[i]][,
j] + ETc[[i]][, j] + Pre.DP[[i]][, length(Pre.DP[[i]])]
Dr[[i]][, j][Dr[[i]][, j] < 0] <- 0
Dr[[i]][, j][Dr[[i]][, j] > TAW[[i]][, j]] <- TAW[[i]][,
j][Dr[[i]][, j] > TAW[[i]][, j]]
Ks[[i]][, j][Dr[[i]][, j] > RAW[[i]][, j]] <- ((TAW[[i]][,
j] - Dr[[i]][, j])[Dr[[i]][, j] > RAW[[i]][,
j]])/((1 - Pval[[i]][, j][Dr[[i]][, j] >
RAW[[i]][, j]]) * TAW[[i]][, j][Dr[[i]][,
j] > RAW[[i]][, j]])
Ks[[i]][, j][Dr[[i]][, j] <= RAW[[i]][, j]] <- 1
DP[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + Irr[[i]][, j] - ETc[[i]][, j] - Pre.Dr[[i]][,
length(Pre.Dr[[i]])]
DP[[i]][, j][Dr[[i]][, j] > 0] <- 0
DP[[i]][, j][DP[[i]][, j] < 0] <- 0
Transp[[i]][, j] <- (Ks[[i]][, j] * Kcb.corrected[[i]][,
j] + Ke[[i]][, j]) * ETo[[i]][, j]
Transp.final[[i]][, j] <- (Ks[[i]][, j] * Kcb.corrected[[i]][,
j]) * ETo[[i]][, j]
DPe[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + (Irr[[i]][, j]/Fw[[i]][, j]) - Pre.Dei[[i]][,
length(Pre.Dei[[i]])]
DPe[[i]][, j][DPe[[i]][, j] < 0] <- 0
De[[i]][, j] <- Pre.Dei[[i]][, length(Pre.Dei[[i]])] -
(Precip[[i]][, j] - ROi[[i]][, j]) + Irr[[i]][,
j]/Fw[[i]][, j] + (E[[i]][, j]/Few[[i]][,
j]) + DPe[[i]][, j]
De[[i]][, j][De[[i]][, j] < 0] <- 0
De[[i]][, j][De[[i]][, j] > TEW[[i]]] <- TEW[[i]][De[[i]][,
j] > TEW[[i]]]
}
else {
Fw[[i]][, j] <- Fw[[i]][, (j - 1)]
Few[[i]][, j] <- pmin.int(Few[[i]][, j], Fw[[i]][,
j])
Kr[[i]][, j][De[[i]][, (j - 1)] > REW[[i]]] <- (TEW[[i]][De[[i]][,
(j - 1)] > REW[[i]]] - De[[i]][, (j - 1)][De[[i]][,
(j - 1)] > REW[[i]]])/(TEW[[i]][De[[i]][,
(j - 1)] > REW[[i]]] - REW[[i]][De[[i]][,
(j - 1)] > REW[[i]]])
Kr[[i]][, j][De[[i]][, (j - 1)] <= REW[[i]]] <- 1
Kr[[i]][, j][Kr[[i]][, j] < 0] <- 0
Ke[[i]][, j] <- pmin.int(Kr[[i]][, j] * (KcMax[[i]][,
j] - Kcb.corrected[[i]][, j]), Few[[i]][,
j] * KcMax[[i]][, j])
Ke[[i]][, j][Ke[[i]][, j] < 0] <- 0
ETo[[i]]
E[[i]][, j] <- Ke[[i]][, j] * ETo[[i]][, j]
DPe[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + (Irr[[i]][, j]/Fw[[i]][, j]) - De[[i]][,
j - 1]
DPe[[i]][, j][DPe[[i]][, j] < 0] <- 0
De[[i]][, j] <- De[[i]][, j - 1] - (Precip[[i]][,
j] - ROi[[i]][, j]) + Irr[[i]][, j]/Fw[[i]][,
j] + (E[[i]][, j]/Few[[i]][, j]) + DPe[[i]][,
j]
De[[i]][, j][De[[i]][, j] < 0] <- 0
De[[i]][, j][De[[i]][, j] > TEW[[i]]] <- TEW[[i]][De[[i]][,
j] > TEW[[i]]]
ETc[[i]][, j] <- (Kcb.corrected[[i]][, j] +
Ke[[i]][, j]) * ETo[[i]][, j]
Pval[[i]][, j] <- Zr$p.value + 0.04 * (5 -
(ETc[[i]][, j]))
Pval[[i]][, j][Pval[[i]][, j] < 0.1] <- 0.1
Pval[[i]][, j][Pval[[i]][, j] > 0.8] <- 0.8
if (is.na(Root.depth[[i]][j]/Zr$root_depth)) {
Frac <- Root.depth[[i]][length(Root.depth[[i]])]/Zr$root_depth
}
else Frac <- Root.depth[[i]][j]/Zr$root_depth
TAW[[i]][, j] <- TAW.base[[i]] * Frac
RAW[[i]][, j] <- Pval[[i]][, j] * TAW[[i]][,
j]
Dr[[i]][, j] <- Dr[[i]][, j - 1] - (Precip[[i]][,
j] - ROi[[i]][, j]) - Irr[[i]][, j] + ETc[[i]][,
j] + DP[[i]][, j - 1]
Dr[[i]][, j][Dr[[i]][, j] < 0] <- 0
Dr[[i]][, j][Dr[[i]][, j] > TAW[[i]][, j]] <- TAW[[i]][,
j][Dr[[i]][, j] > TAW[[i]][, j]]
Dr[[i]][, j] <- Dr[[i]][, j - 1] - (Precip[[i]][,
j] - ROi[[i]][, j]) - Irr[[i]][, j] + ETc[[i]][,
j] + DP[[i]][, j - 1]
Dr[[i]][, j][Dr[[i]][, j] < 0] <- 0
Dr[[i]][, j][Dr[[i]][, j] > TAW[[i]][, j]] <- TAW[[i]][,
j][Dr[[i]][, j] > TAW[[i]][, j]]
Ks[[i]][, j][Dr[[i]][, j] > RAW[[i]][, j]] <- ((TAW[[i]][,
j] - Dr[[i]][, j])[Dr[[i]][, j] > RAW[[i]][,
j]])/((1 - Pval[[i]][, j][Dr[[i]][, j] >
RAW[[i]][, j]]) * TAW[[i]][, j][Dr[[i]][,
j] > RAW[[i]][, j]])
Ks[[i]][, j][Dr[[i]][, j] <= RAW[[i]][, j]] <- 1
DP[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + Irr[[i]][, j] - ETc[[i]][, j] - Dr[[i]][,
j - 1]
DP[[i]][, j][Dr[[i]][, j] > 0] <- 0
DP[[i]][, j][DP[[i]][, j] < 0] <- 0
Transp[[i]][, j] <- (Ks[[i]][, j] * Kcb.corrected[[i]][,
j] + Ke[[i]][, j]) * ETo[[i]][, j]
Transp.final[[i]][, j] <- (Ks[[i]][, j] * Kcb.corrected[[i]][,
j]) * ETo[[i]][, j]
DPe[[i]][, j] <- (Precip[[i]][, j] - ROi[[i]][,
j]) + (Irr[[i]][, j]/Fw[[i]][, j]) - De[[i]][,
j - 1]
DPe[[i]][, j][DPe[[i]][, j] < 0] <- 0
De[[i]][, j] <- De[[i]][, j - 1] - (Precip[[i]][,
j] - ROi[[i]][, j]) + Irr[[i]][, j]/Fw[[i]][,
j] + (E[[i]][, j]/Few[[i]][, j]) + DPe[[i]][,
j]
De[[i]][, j][De[[i]][, j] < 0] <- 0
De[[i]][, j][De[[i]][, j] > TEW[[i]]] <- TEW[[i]][De[[i]][,
j] > TEW[[i]]]
}
}
Few[[i]][, 1] <- Few[[i]][, 2]
Kr[[i]][, 1] <- Kr[[i]][, 2]
Ke[[i]][, 1] <- Ke[[i]][, 2]
E[[i]][, 1] <- E[[i]][, 2]
DPe[[i]][, 1] <- DPe[[i]][, 2]
De[[i]][, 1] <- De[[i]][, 2]
ETc[[i]][, 1] <- ETc[[i]][, 2]
Pval[[i]][, 1] <- Pval[[i]][, 2]
TAW[[i]][, 1] <- TAW[[i]][, 2]
RAW[[i]][, 1] <- RAW[[i]][, 2]
Dr[[i]][, 1] <- Dr[[i]][, 2]
Dr[[i]][, 1] <- Dr[[i]][, 2]
Ks[[i]][, 1] <- Ks[[i]][, 2]
DP[[i]][, 1] <- DP[[i]][, 2]
Transp[[i]][, 1] <- Transp[[i]][, 2]
Transp.final[[i]][, 1] <- Transp.final[[i]][, 2]
}
}
print("Saving rainfed growing season SB files")
setwd(paste0(Path, "/CropWatR/Intermediates/"))
save(Few, file = paste("Growing.Season.Rainfed_Root.Zone.Depletion",
Croplayer, "Rdata", sep = "."))
save(Kr, file = paste("Growing.Season.Rainfed_Kr", Croplayer,
"Rdata", sep = "."))
save(Ks, file = paste("Growing.Season.Rainfed_Ks", Croplayer,
"Rdata", sep = "."))
save(Pval, file = paste("Growing.Season.Rainfed_Pval", Croplayer,
"Rdata", sep = "."))
save(Dr, file = paste("Growing.Season.Rainfed_Root.Zone.Depletion",
Croplayer, "Rdata", sep = "."))
save(De, file = paste("Growing.Season.Rainfed_Soil.Water.Balance",
Croplayer, "Rdata", sep = "."))
save(DP, file = paste("Growing.Season.Rainfed_Deep.Percolation",
Croplayer, "Rdata", sep = "."))
save(ROi, file = paste("Growing.Season.Rainfed_Runoff", Croplayer,
"Rdata", sep = "."))
save(E, file = paste("Growing.Season.Rainfed_Soil.Evaporation",
Croplayer, "Rdata", sep = "."))
save(Transp.final, file = paste("Growing.Season.Rainfed_Transpiration",
Croplayer, "Rdata", sep = "."))
save(DPe, file = paste("Growing.Season.Rainfed.Root.Zone.Percolation.Loss",
Croplayer, "Rdata", sep = "."))
save(Few, file = paste("Growing.Season.Rainfed.Evaporation.Fractions",
Croplayer, "Rdata", sep = "."))
setwd(paste0(Path, "/CropWatR/Data"))
print("Calculation of Growing Season daily soil water balance, deep percolation, and evaporation complete")
print("Growing Season initial run complete, on to post season")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{pal_23b}
\alias{pal_23b}
\title{Generate a color palette (n=23)}
\usage{
pal_23b()
}
\description{
Generate a color palette (n=23)
}
| /man/pal_23b.Rd | permissive | orionzhou/rmaize | R | false | true | 224 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{pal_23b}
\alias{pal_23b}
\title{Generate a color palette (n=23)}
\usage{
pal_23b()
}
\description{
Generate a color palette (n=23)
}
|
# Exploratory Data Analysis plot3.R
read_power <- read.csv("household_power_consumption.txt", header = T, sep=";", na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
read_power$Date <- as.Date(read_power$Date, format="%d/%m/%Y")
datetime <- paste(read_power$Date, read_power$Time)
read_power$Time <- as.POSIXct(datetime)
Data <- subset(read_power,Date >= "2007-02-01" & Date <= "2007-02-02")
plot(Data$Time, Data$Sub_metering_1,type="l",col="black",ylab ="Energy sub metering",xlab="")
lines(Data$Time, Data$Sub_metering_2,col="red")
lines(Data$Time, Data$Sub_metering_3,col="blue")
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.copy(png,file="plot3.png",height=480,width=480)
dev.off() | /Explore-Data-Plotting/plot3.R | no_license | rosida/ProgrammingAssignment2 | R | false | false | 809 | r | # Exploratory Data Analysis plot3.R
read_power <- read.csv("household_power_consumption.txt", header = T, sep=";", na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
read_power$Date <- as.Date(read_power$Date, format="%d/%m/%Y")
datetime <- paste(read_power$Date, read_power$Time)
read_power$Time <- as.POSIXct(datetime)
Data <- subset(read_power,Date >= "2007-02-01" & Date <= "2007-02-02")
plot(Data$Time, Data$Sub_metering_1,type="l",col="black",ylab ="Energy sub metering",xlab="")
lines(Data$Time, Data$Sub_metering_2,col="red")
lines(Data$Time, Data$Sub_metering_3,col="blue")
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.copy(png,file="plot3.png",height=480,width=480)
dev.off() |
## R-Programming Iteration 031 T.Debus / Aug 2015
##
## Helper functions to determine whether a matrix has been inverted
## before: If so it will retun the cached values from the environment
## if NOT, then it will inverse the matrix and store the results in the
## new enviroment variable using <<-
## Create special matrix with cache to environment
makeCacheMatrix <- function(mat = matrix()) {
inv_c_mat <- NULL
set <- function(m) { m <<- mat }
get <- function(){ mat }
setinv <- function(inv) { inv_c_mat <<- inv }
getinv <- function() { inv_c_mat }
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## Compute inverse of special matrix from `makeCacheMatrix`
cacheSolve <- function(mat, ...) {
inv_c_mat <- mmm$getinv()
if (!is.null(inv_c_mat)) {
message("retrieving cache")
return(inv_c_mat)
}
inv <- mmm$setinv(solve(mat))
mmm$set(inv)
message("storing cache")
inv
}
## Initialize list function
mmm <- makeCacheMatrix()
| /PA2_MtrxInvCch_Func.R | no_license | tomthebuzz/ProgrammingAssignment2 | R | false | false | 1,018 | r | ## R-Programming Iteration 031 T.Debus / Aug 2015
##
## Helper functions to determine whether a matrix has been inverted
## before: If so it will retun the cached values from the environment
## if NOT, then it will inverse the matrix and store the results in the
## new enviroment variable using <<-
## Create special matrix with cache to environment
makeCacheMatrix <- function(mat = matrix()) {
inv_c_mat <- NULL
set <- function(m) { m <<- mat }
get <- function(){ mat }
setinv <- function(inv) { inv_c_mat <<- inv }
getinv <- function() { inv_c_mat }
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## Compute inverse of special matrix from `makeCacheMatrix`
cacheSolve <- function(mat, ...) {
inv_c_mat <- mmm$getinv()
if (!is.null(inv_c_mat)) {
message("retrieving cache")
return(inv_c_mat)
}
inv <- mmm$setinv(solve(mat))
mmm$set(inv)
message("storing cache")
inv
}
## Initialize list function
mmm <- makeCacheMatrix()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indicies.R
\name{msci_indicies}
\alias{msci_indicies}
\title{MSCI indicies}
\usage{
msci_indicies()
}
\value{
\code{tibble}
}
\description{
Returns all MSCI indicies. This function
can be used to find indicies to search with
\code{\link{msci_indicies_constituents}}
to specify indicies to extract constituents.
}
\examples{
msci_indicies()
}
\seealso{
Other MSCI:
\code{\link{msci_indicies_constituents}()},
\code{\link{msci_realtime_index_values}()}
}
\concept{MSCI}
| /man/msci_indicies.Rd | permissive | CerebralMastication/fundManageR | R | false | true | 548 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indicies.R
\name{msci_indicies}
\alias{msci_indicies}
\title{MSCI indicies}
\usage{
msci_indicies()
}
\value{
\code{tibble}
}
\description{
Returns all MSCI indicies. This function
can be used to find indicies to search with
\code{\link{msci_indicies_constituents}}
to specify indicies to extract constituents.
}
\examples{
msci_indicies()
}
\seealso{
Other MSCI:
\code{\link{msci_indicies_constituents}()},
\code{\link{msci_realtime_index_values}()}
}
\concept{MSCI}
|
library(ape)
testtree <- read.tree("4440_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4440_0_unrooted.txt") | /codeml_files/newick_trees_processed/4440_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("4440_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4440_0_unrooted.txt") |
#' Geometric mean
geomean <- function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
| /utility_functions/geomean.R | no_license | Pennycuick-Lab/cis_immunology | R | false | false | 107 | r | #' Geometric mean
geomean <- function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
|
# VAR_PREFIX, e.g., SMQ01, CQ12
# QUERY_NAME, non NULL
# QUERY_ID, could be NULL
# QUERY_SCOPE, ‘BROAD’, ‘NARROW’, or NULL
# TERM_LEVEL, e.g., AEDECOD, AELLT, ...
# TERM_NAME, non NULL
queries <- tibble::tribble(
~VAR_PREFIX, ~QUERY_NAME, ~QUERY_ID, ~QUERY_SCOPE,
~QUERY_SCOPE_NUM, ~TERM_LEVEL, ~TERM_NAME, ~TERM_ID,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "APPLICATION SITE ERYTHEMA", NA_integer_,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "APPLICATION SITE PRURITUS", NA_integer_,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "ERYTHEMA", NA_integer_,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "LOCALIZED ERYTHEMA", NA_integer_,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "GENERALIZED PRURITUS", NA_integer_,
"SMQ02", "Immune-Mediated Hypothyroidism", 20000160L, "BROAD",
1L, "AEDECOD", "BIOPSY THYROID GLAND ABNORMAL", NA_integer_,
"SMQ02", "Immune-Mediated Hypothyroidism", 20000160L, "BROAD",
1L, "AEDECOD", "BLOOD THYROID STIMULATING HORMONE ABNORMAL", NA_integer_,
"SMQ02", "Immune-Mediated Hypothyroidism", 20000160L, "NARROW",
1L, "AEDECOD", "BIOPSY THYROID GLAND INCREASED", NA_integer_,
"SMQ03", "Immune-Mediated Guillain-Barre Syndrome", 20000131L, "NARROW",
2L, "AEDECOD", "GUILLAIN-BARRE SYNDROME", NA_integer_,
"SMQ03", "Immune-Mediated Guillain-Barre Syndrome", 20000131L, "NARROW",
2L, "AEDECOD", "MILLER FISHER SYNDROME", NA_integer_,
"CQ04", "Immune-Mediated Adrenal Insufficiency",
12150L, NA_character_, NA_integer_, "AEDECOD", "ADDISON'S DISEASE", NA_integer_,
"CQ04", "Immune-Mediated Adrenal Insufficiency",
12150L, NA_character_, NA_integer_, "AEDECOD", "ADRENAL ATROPHY", NA_integer_,
"SMQ05", "Immune-Mediated Pneumonitis", 20000042L, "NARROW",
2L, "AEDECOD", "ALVEOLAR PROTEINOSIS", NA_integer_,
"SMQ05", "Immune-Mediated Pneumonitis", 20000042L, "NARROW",
2L, "AEDECOD", "ALVEOLITIS", NA_integer_,
"CQ06", "Immune-Mediated Colitis", 10009888L, NA_character_,
NA_integer_, "AELLTCD", NA_character_, 1L
)
adae <- tibble::tribble(
~USUBJID, ~ASTDTM, ~AETERM, ~AESEQ, ~AEDECOD, ~AELLT,
"01", "2020-06-02 23:59:59", "ERYTHEMA", 3,
"Erythema", "Localized erythema",
"02", "2020-06-05 23:59:59", "BASEDOW'S DISEASE", 5,
"Basedow's disease", NA_character_,
"02", "2020-06-05 23:59:59", "ALVEOLAR PROTEINOSIS", 1,
"Alveolar proteinosis", NA_character_,
"03", "2020-06-07 23:59:59", "SOME TERM", 2,
"Some query", "Some term",
"04", "2020-06-10 23:59:59", "APPLICATION SITE ERYTHEMA", 7,
"APPLICATION SITE ERYTHEMA", "Application site erythema",
)
# try below:
derive_vars_query(adae, queries)
| /inst/example_scripts/example_query_source.R | no_license | rajkboddu/admiral | R | false | false | 2,801 | r | # VAR_PREFIX, e.g., SMQ01, CQ12
# QUERY_NAME, non NULL
# QUERY_ID, could be NULL
# QUERY_SCOPE, ‘BROAD’, ‘NARROW’, or NULL
# TERM_LEVEL, e.g., AEDECOD, AELLT, ...
# TERM_NAME, non NULL
queries <- tibble::tribble(
~VAR_PREFIX, ~QUERY_NAME, ~QUERY_ID, ~QUERY_SCOPE,
~QUERY_SCOPE_NUM, ~TERM_LEVEL, ~TERM_NAME, ~TERM_ID,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "APPLICATION SITE ERYTHEMA", NA_integer_,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "APPLICATION SITE PRURITUS", NA_integer_,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "ERYTHEMA", NA_integer_,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "LOCALIZED ERYTHEMA", NA_integer_,
"CQ01", "Dermatologic events", NA_integer_, NA_character_,
NA_integer_, "AELLT", "GENERALIZED PRURITUS", NA_integer_,
"SMQ02", "Immune-Mediated Hypothyroidism", 20000160L, "BROAD",
1L, "AEDECOD", "BIOPSY THYROID GLAND ABNORMAL", NA_integer_,
"SMQ02", "Immune-Mediated Hypothyroidism", 20000160L, "BROAD",
1L, "AEDECOD", "BLOOD THYROID STIMULATING HORMONE ABNORMAL", NA_integer_,
"SMQ02", "Immune-Mediated Hypothyroidism", 20000160L, "NARROW",
1L, "AEDECOD", "BIOPSY THYROID GLAND INCREASED", NA_integer_,
"SMQ03", "Immune-Mediated Guillain-Barre Syndrome", 20000131L, "NARROW",
2L, "AEDECOD", "GUILLAIN-BARRE SYNDROME", NA_integer_,
"SMQ03", "Immune-Mediated Guillain-Barre Syndrome", 20000131L, "NARROW",
2L, "AEDECOD", "MILLER FISHER SYNDROME", NA_integer_,
"CQ04", "Immune-Mediated Adrenal Insufficiency",
12150L, NA_character_, NA_integer_, "AEDECOD", "ADDISON'S DISEASE", NA_integer_,
"CQ04", "Immune-Mediated Adrenal Insufficiency",
12150L, NA_character_, NA_integer_, "AEDECOD", "ADRENAL ATROPHY", NA_integer_,
"SMQ05", "Immune-Mediated Pneumonitis", 20000042L, "NARROW",
2L, "AEDECOD", "ALVEOLAR PROTEINOSIS", NA_integer_,
"SMQ05", "Immune-Mediated Pneumonitis", 20000042L, "NARROW",
2L, "AEDECOD", "ALVEOLITIS", NA_integer_,
"CQ06", "Immune-Mediated Colitis", 10009888L, NA_character_,
NA_integer_, "AELLTCD", NA_character_, 1L
)
adae <- tibble::tribble(
~USUBJID, ~ASTDTM, ~AETERM, ~AESEQ, ~AEDECOD, ~AELLT,
"01", "2020-06-02 23:59:59", "ERYTHEMA", 3,
"Erythema", "Localized erythema",
"02", "2020-06-05 23:59:59", "BASEDOW'S DISEASE", 5,
"Basedow's disease", NA_character_,
"02", "2020-06-05 23:59:59", "ALVEOLAR PROTEINOSIS", 1,
"Alveolar proteinosis", NA_character_,
"03", "2020-06-07 23:59:59", "SOME TERM", 2,
"Some query", "Some term",
"04", "2020-06-10 23:59:59", "APPLICATION SITE ERYTHEMA", 7,
"APPLICATION SITE ERYTHEMA", "Application site erythema",
)
# try below:
derive_vars_query(adae, queries)
|
install.packages('shiny')
library('cdsw')
library('shiny')
library('parallel')
mcparallel(runApp(host="0.0.0.0", port=8080, launch.browser=FALSE,
appDir="/home/cdsw/app", display.mode="auto"))
service.url <- paste("http://", Sys.getenv("CDSW_ENGINE_ID"), ".",
Sys.getenv("CDSW_DOMAIN"), sep="")
Sys.sleep(5)
iframe(src=service.url, width="640px", height="480px")
| /Example14-Shiny-Demo/shiny_test.R | permissive | joyer7/CDSW-Demos-KOLON | R | false | false | 369 | r | install.packages('shiny')
library('cdsw')
library('shiny')
library('parallel')
mcparallel(runApp(host="0.0.0.0", port=8080, launch.browser=FALSE,
appDir="/home/cdsw/app", display.mode="auto"))
service.url <- paste("http://", Sys.getenv("CDSW_ENGINE_ID"), ".",
Sys.getenv("CDSW_DOMAIN"), sep="")
Sys.sleep(5)
iframe(src=service.url, width="640px", height="480px")
|
#' tidy_functional_beta_multi
#'
#' @param x matris de abundancia.
#' @param traits traits info data.frame
#' @param index.family "jaccard" o "sorensen".
#' @param warning.time progres bar.
#' @return data_frame
#' @export
#'
#' @examples
tidy_functional_beta_multi <- function (x, traits, index.family = "sorensen", warning.time = TRUE)
{
requireNamespace("tidyverse")
requireNamespace("betapart")
index.family <- match.arg(index.family, c("jaccard", "sorensen"))
fbc <- x
if (!inherits(x, "functional.betapart")) {
fbc <- betapart::functional.betapart.core(x, traits, multi = TRUE,
warning.time = warning.time, return.details = FALSE)
}
maxbibj <- sum(fbc$max.not.shared[lower.tri(fbc$max.not.shared)])
minbibj <- sum(fbc$min.not.shared[lower.tri(fbc$min.not.shared)])
switch(index.family, sorensen = {
funct.beta.sim <- minbibj/(minbibj + fbc$a)
funct.beta.sne <- (fbc$a/(minbibj + fbc$a)) * ((maxbibj -
minbibj)/((2 * fbc$a) + maxbibj + minbibj))
funct.beta.sor <- (minbibj + maxbibj)/(minbibj + maxbibj +
(2 * fbc$a))
functional.multi <- dplyr::data_frame(
funct.beta.SIM = funct.beta.sim,
funct.beta.SNE = funct.beta.sne,
funct.beta.SOR = funct.beta.sor)
}, jaccard = {
funct.beta.jtu <- (2 * minbibj)/((2 * minbibj) + fbc$a)
funct.beta.jne <- (fbc$a/((2 * minbibj) + fbc$a)) *
((maxbibj - minbibj)/((fbc$a) + maxbibj + minbibj))
funct.beta.jac <- (minbibj + maxbibj)/(minbibj + maxbibj +
fbc$a)
functional.multi <- dplyr::data_frame(
funct.beta.JTU = funct.beta.jtu,
funct.beta.JNE = funct.beta.jne,
funct.beta.JAC = funct.beta.jac)
})
return(functional.multi)
}
| /R/tidy_functional_beta_multi.R | no_license | PaulESantos/betapart.tidy | R | false | false | 1,853 | r | #' tidy_functional_beta_multi
#'
#' @param x matris de abundancia.
#' @param traits traits info data.frame
#' @param index.family "jaccard" o "sorensen".
#' @param warning.time progres bar.
#' @return data_frame
#' @export
#'
#' @examples
tidy_functional_beta_multi <- function (x, traits, index.family = "sorensen", warning.time = TRUE)
{
requireNamespace("tidyverse")
requireNamespace("betapart")
index.family <- match.arg(index.family, c("jaccard", "sorensen"))
fbc <- x
if (!inherits(x, "functional.betapart")) {
fbc <- betapart::functional.betapart.core(x, traits, multi = TRUE,
warning.time = warning.time, return.details = FALSE)
}
maxbibj <- sum(fbc$max.not.shared[lower.tri(fbc$max.not.shared)])
minbibj <- sum(fbc$min.not.shared[lower.tri(fbc$min.not.shared)])
switch(index.family, sorensen = {
funct.beta.sim <- minbibj/(minbibj + fbc$a)
funct.beta.sne <- (fbc$a/(minbibj + fbc$a)) * ((maxbibj -
minbibj)/((2 * fbc$a) + maxbibj + minbibj))
funct.beta.sor <- (minbibj + maxbibj)/(minbibj + maxbibj +
(2 * fbc$a))
functional.multi <- dplyr::data_frame(
funct.beta.SIM = funct.beta.sim,
funct.beta.SNE = funct.beta.sne,
funct.beta.SOR = funct.beta.sor)
}, jaccard = {
funct.beta.jtu <- (2 * minbibj)/((2 * minbibj) + fbc$a)
funct.beta.jne <- (fbc$a/((2 * minbibj) + fbc$a)) *
((maxbibj - minbibj)/((fbc$a) + maxbibj + minbibj))
funct.beta.jac <- (minbibj + maxbibj)/(minbibj + maxbibj +
fbc$a)
functional.multi <- dplyr::data_frame(
funct.beta.JTU = funct.beta.jtu,
funct.beta.JNE = funct.beta.jne,
funct.beta.JAC = funct.beta.jac)
})
return(functional.multi)
}
|
setwd("/Users/JHY/Documents/2018SpringCourse/Applied Data Science/Spring2018-Project3-Group1")
img_dir <- "./data/train/images/"
#source("http://bioconductor.org/biocLite.R")
#biocLite("EBImage")
feature_HOG<-function(img_dir){
### HOG: calculate the Histogram of Oriented Gradient for an image
### Input: a directory that contains images ready for processing
### Output: an .RData file contains features for the images
### load libraries
library("EBImage")
library("OpenImageR")
dir_names <- list.files(img_dir)
n_files <- length(dir_names)
### calculate HOG of images
dat <- vector()
for(i in 1:n_files){
img <- readImage(paste0(img_dir,dir_names[i]))
img<-rgb_2gray(img)
dat<- rbind(dat,HOG(img))
}
### output constructed features
save(dat, file="./output/features/HOG.RData")
return(dat)
}
dat_HOG<-feature_HOG(img_dir)
| /lib/feature_HOG.R | no_license | TZstatsADS/Spring2018-Project3-Group1 | R | false | false | 883 | r | setwd("/Users/JHY/Documents/2018SpringCourse/Applied Data Science/Spring2018-Project3-Group1")
img_dir <- "./data/train/images/"
#source("http://bioconductor.org/biocLite.R")
#biocLite("EBImage")
feature_HOG<-function(img_dir){
### HOG: calculate the Histogram of Oriented Gradient for an image
### Input: a directory that contains images ready for processing
### Output: an .RData file contains features for the images
### load libraries
library("EBImage")
library("OpenImageR")
dir_names <- list.files(img_dir)
n_files <- length(dir_names)
### calculate HOG of images
dat <- vector()
for(i in 1:n_files){
img <- readImage(paste0(img_dir,dir_names[i]))
img<-rgb_2gray(img)
dat<- rbind(dat,HOG(img))
}
### output constructed features
save(dat, file="./output/features/HOG.RData")
return(dat)
}
dat_HOG<-feature_HOG(img_dir)
|
## Copyright (C) 2012 Marius Hofert, Ivan Kojadinovic, Martin Maechler, and Jun Yan
##
## This program is free software; you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation; either version 3 of the License, or (at your option) any later
## version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
## FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
## details.
##
## You should have received a copy of the GNU General Public License along with
## this program; if not, see <http://www.gnu.org/licenses/>.
### Estimation for nested Archimedean copulas
### initial interval/value for optimization procedures #########################
##' Compute an initial interval or value for optimization/estimation routines
##' (only a heuristic; if this fails, choose your own interval or value)
##'
##' @title Compute an initial interval or value for estimation procedures
##' @param family Archimedean family
##' @param tau.range vector containing lower and upper admissible Kendall's tau
##' @param interval logical determining if an initial interval (the default) or
##' an initial value should be returned
##' @param u matrix of realizations following a copula
##' @param method method for obtaining initial values
##' @param warn logical indicating whether a warning message is printed (the
##' default) if the DMLE for Gumbel is < 1 or not
##' @param ... further arguments to cor() for method="tau.mean"
##' @return initial interval or value which can be used for optimization
##' @author Marius Hofert
initOpt <- function(family, tau.range=NULL, interval=TRUE, u,
method=c("tau.Gumbel", "tau.mean"), warn=TRUE, ...)
{
cop <- getAcop(family)
if(is.null(tau.range)){
tau.range <- switch(cop@name, # limiting (attainable) taus that can be dealt with in estimation/optimization/root-finding
"AMH" = { c(0, 1/3-5e-5) }, # FIXME: closer to 1, emle's mle2 fails; note: typically, Std. Error still not available and thus profile() may fail => adjust by hand
"Clayton" = { c(1e-8, 0.95) },
"Frank" = { c(1e-8, 0.94) }, # FIXME: beyond that, estimation.gof() fails for ebeta()!
"Gumbel" = { c(0, 0.95) },
"Joe" = { c(0, 0.95) },
stop("unsupported family for initOpt"))
}
if(interval) return(cop@iTau(tau.range)) # u is not required
stopifnot(length(dim(u)) == 2L)
method <- match.arg(method)
## estimate Kendall's tau
tau.hat <- switch(method,
"tau.Gumbel" = {
x <- apply(u, 1, max)
theta.hat.G <- log(ncol(u))/(log(length(x))-log(sum(-log(x)))) # direct formula from edmle for Gumbel
if(theta.hat.G < 1){
if(warn) warning("initOpt: DMLE for Gumbel = ",theta.hat.G," < 1; is set to 1")
theta.hat.G <- 1
}
copGumbel@tau(theta.hat.G)
},
"tau.mean" = {
tau.hat.mat <- cor(u, method="kendall", ...) # matrix of pairwise tau()
mean(tau.hat.mat[upper.tri(tau.hat.mat)]) # mean of estimated taus
},
stop("wrong method for initOpt"))
## truncate to range if required
cop@iTau(pmax(tau.range[1], pmin(tau.range[2], tau.hat)))
}
### Blomqvist's beta ###########################################################
##' Compute the sample version of Blomqvist's beta,
##' see, e.g., Schmid and Schmidt (2007) "Nonparametric inference on multivariate
##' versions of Blomqvist's beta and related measures of tail dependence"
##'
##' @title Sample version of Blomqvist's beta
##' @param u matrix of realizations following the copula
##' @param scaling if TRUE then the factors 2^(d-1)/(2^(d-1)-1) and
##' 2^(1-d) in Blomqvist's beta are omitted
##' @return sample version of multivariate Blomqvist beta
##' @author Marius Hofert
betan <- function(u, scaling = FALSE) {
less.u <- u <= 0.5
prod1 <- apply( less.u, 1, all)
prod2 <- apply(!less.u, 1, all)
b <- mean(prod1 + prod2)
if(scaling) b else {T <- 2^(ncol(u)-1); (T*b - 1)/(T - 1)}
}
beta.hat <- function(u, scaling = FALSE) { .Deprecated("betan") ; betan(u, scaling) }
##' Compute the population version of Blomqvist's beta for Archimedean copulas
##'
##' @title Population version of Blomqvist's beta for Archimedean copulas
##' @param cop acopula to be estimated
##' @param theta copula parameter
##' @param d dimension
##' @param scaling if TRUE then the factors 2^(d-1)/(2^(d-1)-1) and
##' 2^(1-d) in Blomqvist's beta are omitted
##' @return population version of multivariate Blomqvist beta
##' @author Marius Hofert & Martin Maechler
beta. <- function(cop, theta, d, scaling=FALSE) {
j <- seq_len(d)
diags <- cop@psi(j*cop@iPsi(0.5, theta), theta) # compute diagonals
b <- 1 + diags[d] + if(d < 30) sum((-1)^j * choose(d, j) * diags)
else sum((-1)^j * exp(lchoose(d, j) + log(diags)))
if(scaling) b else { T <- 2^(d-1); (T*b - 1)/(T - 1)}
}
##' Method-of-moment-like estimation of nested Archimedean copulas based on a
##' multivariate version of Blomqvist's beta
##'
##' @title Method-of-moment-like parameter estimation of nested Archimedean copulas
##' based on Blomqvist's beta
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param interval bivariate vector denoting the interval where optimization takes
##' place
##' @param ... additional parameters for safeUroot
##' @return Blomqvist beta estimator; return value of safeUroot (more or less
##' equal to the return value of uniroot)
##' @author Marius Hofert
ebeta <- function(u, cop, interval=initOpt(cop@copula@name), ...) {
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d)
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
## Note: We do not need the constants 2^(d-1)/(2^(d-1)-1) and 2^(1-d) here,
## since we equate the population and sample versions of Blomqvist's
## beta anyway.
b.hat <- betan(u, scaling = TRUE)
d <- ncol(u)
safeUroot(function(theta) {beta.(cop@copula, theta, d, scaling=TRUE) - b.hat},
interval=interval, Sig=+1, check.conv=TRUE, ...)
}
### Kendall's tau ##############################################################
##' Sample tau checker
##'
##' @title Check sample versions of Kendall's tau
##' @param x vector of sample versions of Kendall's tau to be checked for whether
##' they are in the range of tau of the corresponding family
##' @param family Archimedean family
##' @return checked and (if check failed) modified x
##' @author Marius Hofert
tau.checker <- function(x, family, warn=TRUE){
eps <- 1e-8 ## "fixed" currently, see below
tau.range <- switch(family,
## limiting (attainable) taus that can be dealt with by
## cop<family>@iTau() *and* that can be used to construct
## a corresponding copula object; checked via:
## eps <- 1e-8
## th <- copAMH@iTau(c(0,1/3-eps)); onacopulaL("AMH",list(th[1], 1:5)); onacopulaL("AMH",list(th[2], 1:5))
## th <- copClayton@iTau(c(eps,1-eps)); onacopulaL("Clayton",list(th[1], 1:5)); onacopulaL("Clayton",list(th[2], 1:5))
## th <- copFrank@iTau(c(eps,1-eps)); onacopulaL("Frank",list(th[1], 1:5)); onacopulaL("Frank",list(th[2], 1:5))
## th <- copGumbel@iTau(c(0,1-eps)); onacopulaL("Gumbel",list(th[1], 1:5)); onacopulaL("Gumbel",list(th[2], 1:5))
## th <- copJoe@iTau(c(0,1-eps)); onacopulaL("Joe",list(th[1], 1:5)); onacopulaL("Joe",list(th[2], 1:5))
"AMH" = { c(0, 1/3-eps) },
"Clayton" = { c(eps, 1-eps) }, # copClayton@iTau(c(eps,1-eps))
"Frank" = { c(eps, 1-eps) }, # copFrank@iTau(c(eps,1-eps))
"Gumbel" = { c(0, 1-eps) }, # copGumbel@iTau(c(0,1-eps))
"Joe" = { c(0, 1-eps) }, # copJoe@iTau(c(0,1-eps))
stop("unsupported family for initOpt"))
toosmall <- which(x < tau.range[1])
toolarge <- which(x > tau.range[2])
if(warn && length(toosmall)+length(toolarge) > 0){
r <- range(x)
if(length(x) == 1){
warning("tau.checker: found (and adjusted) an x value out of range (x = ",
x,")")
}else{
warning("tau.checker: found (and adjusted) x values out of range (min(x) = ",
r[1],", max(x) = ",r[2],")")
}
}
x. <- x
x.[toosmall] <- tau.range[1]
x.[toolarge] <- tau.range[2]
x.
}
##' Compute pairwise estimators for nested Archimedean copulas based on Kendall's tau
##'
##' @title Pairwise estimators for nested Archimedean copulas based on Kendall's tau
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param method tau.mean indicates that the average of the sample versions of
##' Kendall's tau are computed first and then theta is determined;
##' theta.mean stands for first computing all Kendall's tau
##' estimators and then returning the mean of these estimators
##' @param warn logical indicating whether warnings are produced (for AMH and in
##' general for pairwise sample versions of Kendall's tau < 0) [the default]
##' or not
##' @param ... additional arguments to cor()
##' @return averaged pairwise cor() estimators
##' @author Marius Hofert
etau <- function(u, cop, method = c("tau.mean", "theta.mean"), warn=TRUE, ...){
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d)
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
tau.hat.mat <- cor(u, method="kendall",...) # matrix of pairwise tau()
tau.hat <- tau.hat.mat[upper.tri(tau.hat.mat)] # all tau hat's
## define tau^{-1}
tau_inv <- if(cop@copula@name == "AMH")
function(tau) cop@copula@iTau(tau, check=FALSE, warn=warn) else cop@copula@iTau
## check and apply iTau in the appropriate way
method <- match.arg(method)
switch(method,
"tau.mean" = {
mean.tau.hat <- mean(tau.hat) # mean of pairwise tau.hat
mean.tau.hat. <- tau.checker(mean.tau.hat, family=cop@copula@name,
warn=warn) # check the mean
tau_inv(mean.tau.hat.) # Kendall's tau corresponding to the mean of the sample versions of Kendall's taus
},
"theta.mean" = {
tau.hat. <- tau.checker(tau.hat, family=cop@copula@name, warn=warn) # check all values
mean(tau_inv(tau.hat.)) # mean of the pairwise Kendall's tau estimators
},
{stop("wrong method")})
}
### Minimum distance estimation ################################################
##' Distances for minimum distance estimation
##'
##' @title Distances for minimum distance estimation
##' @param u matrix of realizations (ideally) following U[0,1]^(d-1) or U[0,1]^d
##' @param method distance methods available:
##' mde.chisq.CvM = map to a chi-square distribution (Cramer-von Mises distance)
##' mde.chisq.KS = map to a chi-square distribution (Kolmogorov-Smirnov distance)
##' mde.gamma.CvM = map to an Erlang (gamma) distribution (Cramer-von Mises distance)
##' mde.gamma.KS = map to an Erlang (gamma) distribution (Kolmogorov-Smirnov distance)
##' @return distance
##' @author Marius Hofert
emde.dist <- function(u, method = c("mde.chisq.CvM", "mde.chisq.KS", "mde.gamma.CvM",
"mde.gamma.KS")) {
if(!is.matrix(u)) u <- rbind(u, deparse.level = 0L)
d <- ncol(u)
n <- nrow(u)
method <- match.arg(method) # match argument method
switch(method,
"mde.chisq.CvM" = { # map to a chi-square distribution
y <- sort(rowSums(qnorm(u)^2))
Fvals <- pchisq(y, d)
weights <- (2*(1:n)-1)/(2*n)
1/(12*n) + sum((weights - Fvals)^2)
},
"mde.chisq.KS" = { # map to a chi-square distribution
y <- sort(rowSums(qnorm(u)^2))
Fvals <- pchisq(y, d)
i <- 1:n
max(Fvals[i]-(i-1)/n, i/n-Fvals[i])
},
"mde.gamma.CvM" = { # map to an Erlang distribution
y <- sort(rowSums(-log(u)))
Fvals <- pgamma(y, shape = d)
weights <- (2*(1:n)-1)/(2*n)
1/(12*n) + sum((weights - Fvals)^2)
},
"mde.gamma.KS" = { # map to an Erlang distribution
y <- rowSums(-log(u))
Fvals <- pgamma(y, shape = d)
i <- 1:n
max(Fvals[i]-(i-1)/n, i/n-Fvals[i])
},
## Note: The distances S_n^{(B)} and S_n^{(C)} turned out to be (far)
## too slow.
stop("wrong distance method"))
}
##' Minimum distance estimation for nested Archimedean copulas
##'
##' @title Minimum distance estimation for nested Archimedean copulas
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param method distance methods available, see emde.dist
##' @param interval bivariate vector denoting the interval where optimization takes
##' place
##' @param include.K logical indicating whether the last component, K, is also
##' used or not
##' @param repara logical indicating whether the distance function is
##' reparameterized for the optimization
##' @param ... additional parameters for optimize
##' @return minimum distance estimator; return value of optimize
##' @author Marius Hofert
emde <- function(u, cop, method = c("mde.chisq.CvM", "mde.chisq.KS", "mde.gamma.CvM",
"mde.gamma.KS"), interval = initOpt(cop@copula@name),
include.K = FALSE, repara = TRUE, ...)
{
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d)
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
method <- match.arg(method) # match argument method
distance <- function(theta) { # distance to be minimized
cop@copula@theta <- theta
u. <- htrafo(u, cop=cop, include.K=include.K, n.MC=0) # transform data [don't use MC here; too slow]
emde.dist(u., method)
}
if(repara){
## reparameterization function
rfun <- function(x, inverse=FALSE){ # reparameterization
switch(cop@copula@name,
"AMH"={
x
},
"Clayton"={
if(inverse) tanpi(x/2) else atan(x)*2/pi
},
"Frank"={
if(inverse) tanpi(x/2) else atan(x)*2/pi
},
"Gumbel"={
if(inverse) 1/(1-x) else 1-1/x
},
"Joe"={
if(inverse) 1/(1-x) else 1-1/x
},
stop("emde: Reparameterization got unsupported family"))
}
## optimize
opt <- optimize(function(alpha) distance(rfun(alpha, inverse=TRUE)),
interval=rfun(interval), ...)
opt$minimum <- rfun(opt$minimum, inverse=TRUE)
opt
}else{
optimize(distance, interval=interval, ...)
}
}
### Diagonal maximum likelihood estimation #####################################
##' Density of the diagonal of a nested Archimedean copula
##'
##' @title Diagonal density of a nested Archimedean copula
##' @param u evaluation point in [0,1]
##' @param cop outer_nacopula
##' @param log if TRUE the log-density is evaluated
##' @return density of the diagonal of cop
##' @author Marius Hofert
dDiag <- function(u, cop, log=FALSE) {
stopifnot(is(cop, "outer_nacopula"), (d <- max(cop@comp)) >= 2)
if(length(cop@childCops)) {
stop("currently, only Archimedean copulas are supported")
}
else ## (non-nested) Archimedean :
## FIXME: choose one or the other (if a family has no such slot)
## dDiagA(u, d=d, cop = cop@copula, log=log)
cop@copula@dDiag(u, theta=cop@copula@theta, d=d, log=log)
}
##' @title Generic density of the diagonal of d-dim. Archimedean copula
##' @param u evaluation point in [0, 1]
##' @param d dimension
##' @param cop acopula
##' @param log if TRUE the log-density is evaluated
##' @return density of the diagonal of cop
##' @author Martin Maechler
dDiagA <- function(u, d, cop, log=FALSE) {
stopifnot(is.finite(th <- cop@theta), d >= 2)
## catch the '0' case directly; needed, e.g., for AMH:
if(any(copAMH@name == c("AMH","Frank","Gumbel","Joe")) &&
any(i0 <- u == 0)) {
if(log) u[i0] <- -Inf
u[!i0] <- dDiagA(u[!i0], d=d, cop=cop, log=log)
return(u)
}
if(log) {
log(d) + cop@absdPsi(d*cop@iPsi(u, th), th, log=TRUE) +
cop@absdiPsi(u, th, log=TRUE)
} else {
d * cop@absdPsi(d*cop@iPsi(u, th), th) * cop@absdiPsi(u, th)
}
}
##' Maximum likelihood estimation based on the diagonal of a nested Archimedean copula
##'
##' @title Maximum likelihood estimation based on the diagonal of a nested Archimedean copula
##' @param u matrix of realizations following a copula
##' @param cop outer_nacopula to be estimated
##' @param interval bivariate vector denoting the interval where optimization takes
##' place
##' @param warn logical indicating whether a warning message is printed (the
##' default) if the DMLE for Gumbel is < 1 or not
##' @param ... additional parameters for optimize
##' @return diagonal maximum likelihood estimator; return value of optimize
##' @author Marius Hofert
edmle <- function(u, cop, interval=initOpt(cop@copula@name), warn=TRUE, ...)
{
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d) # dimension
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
x <- apply(u, 1, max) # data from the diagonal
## explicit estimator for Gumbel
if(cop@copula@name == "Gumbel") {
th.G <- log(d)/(log(length(x))-log(sum(-log(x))))
if(!is.finite(th.G) || th.G < 1) {
if(warn) warning("edmle: DMLE for Gumbel = ",th.G,"; not in [1, Inf); is set to 1")
th.G <- 1
}
list(minimum = th.G, objective = 0) # return value of the same structure as for optimize
} else {
## optimize
nlogL <- function(theta) # -log-likelihood of the diagonal
-sum(cop@copula@dDiag(x, theta=theta, d=d, log=TRUE))
optimize(nlogL, interval=interval, ...)
}
}
### (Simulated) maximum likelihood estimation ##################################
##' (Simulated) maximum likelihood estimation for nested Archimedean copulas
##' -- *Fast* version (based on optimize()) called from enacopula
##'
##' @title (Simulated) maximum likelihood estimation for nested Archimedean copulas
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param n.MC if > 0 SMLE is applied with sample size equal to n.MC; otherwise,
##' MLE is applied
##' @param interval bivariate vector denoting the interval where optimization takes
##' place
##' @param ... additional parameters for optimize
##' @return (simulated) maximum likelihood estimator; return value of optimize
##' @author Marius Hofert
.emle <- function(u, cop, n.MC=0, interval=initOpt(cop@copula@name), ...)
{
stopifnot(is(cop, "outer_nacopula"))
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
if(!is.matrix(u)) u <- rbind(u, deparse.level = 0L)
## optimize
mLogL <- function(theta) { # -log-likelihood
cop@copula@theta <- theta
-sum(.dnacopula(u, cop, n.MC=n.MC, log=TRUE))
}
optimize(mLogL, interval=interval, ...)
}
##' (Simulated) maximum likelihood estimation for nested Archimedean copulas
##'
##' @title (Simulated) maximum likelihood estimation for nested Archimedean copulas
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param n.MC if > 0 SMLE is applied with sample size equal to n.MC; otherwise,
##' MLE is applied
##' @param optimizer optimizer used (if optimizer=NULL (or NA), then mle (instead
##' of mle2) is used with the provided method)
##' @param method optim's method to be used (when optimizer=NULL or "optim" and
##' in these cases method is a required argument)
##' @param interval bivariate vector denoting the interval where optimization
##' takes place
##' @param start list containing the initial value(s) (unfortunately required by mle2)
##' @param ... additional parameters for optimize
##' @return an "mle2" object with the (simulated) maximum likelihood estimator.
##' @author Martin Maechler and Marius Hofert
##' Note: this is the *slower* version which also allows for profiling
emle <- function(u, cop, n.MC=0, optimizer="optimize", method,
interval=initOpt(cop@copula@name),
##vvv awkward to be needed, but it is - by mle2():
start = list(theta=initOpt(cop@copula@name, interval=FALSE, u=u)),
...)
{
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d)
## nLL <- function(theta) { # -log-likelihood
## cop@copula@theta <- theta
## -sum(.dnacopula(u, cop, n.MC=n.MC, log=TRUE))
## }
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
else ## For (*non*-nested) copulas only:
nLL <- function(theta) # -(log-likelihood)
-sum(cop@copula@dacopula(u, theta, n.MC=n.MC, log=TRUE))
## optimization
if(!(is.null(optimizer) || is.na(optimizer))) {
## stopifnot(requireNamespace("bbmle"))
if(optimizer == "optimize")
bbmle::mle2(minuslogl = nLL, optimizer = "optimize",
lower = interval[1], upper = interval[2],
##vvv awkward to be needed, but it is - by mle2():
start=start, ...)
else if(optimizer == "optim") {
message(" optimizer = \"optim\" -- using mle2(); consider optimizer=NULL instead")
bbmle::mle2(minuslogl = nLL, optimizer = "optim", method = method,
start=start, ...)
}
else ## "general"
bbmle::mle2(minuslogl = nLL, optimizer = optimizer, start=start, ...)
}
else
## use optim() .. [which uses suboptimal method for 1D, but provides Hessian]
mle(minuslogl = nLL, method = method, start=start, ...)
}
### Estimation wrapper #########################################################
##' Computes the pseudo-observations for the given data matrix
##'
##' @title Pseudo-observations
##' @param x matrix of random variates to be converted to pseudo-observations
##' @param na.last passed to rank()
##' @param ties.method passed to rank()
##' @param lower.tail if FALSE, pseudo-observations when apply the empirical
##' marginal survival functions are returned.
##' @return pseudo-observations (matrix of the same dimensions as x)
##' @author Marius Hofert
pobs <- function(x, na.last = "keep",
## formals(rank) works in pre-2015-10-15 and newer version of rank():
ties.method = eval(formals(rank)$ties.method),
lower.tail = TRUE) {
ties.method <- match.arg(ties.method)
U <- apply(x, 2, rank, na.last=na.last, ties.method=ties.method) / (nrow(x)+1)
if(lower.tail) U else 1-U
}
##' Computes different parameter estimates for a nested Archimedean copula
##'
##' @title Estimation procedures for nested Archimedean copulas
##' @param u data matrix (of pseudo-observations or from the copula "directly")
##' @param cop outer_nacopula to be estimated
##' @param method estimation method; can be
##' "mle" MLE
##' "smle" SMLE
##' "dmle" MLE based on the diagonal
##' "mde.chisq.CvM" minimum distance estimation based on the chisq distribution and CvM distance
##' "mde.chisq.KS" minimum distance estimation based on the chisq distribution and KS distance
##' "mde.gamma.CvM" minimum distance estimation based on the Erlang distribution and CvM distance
##' "mde.gamma.KS" minimum distance estimation based on the Erlang distribution and KS distance
##' "tau.tau.mean" averaged pairwise Kendall's tau estimator
##' "tau.theta.mean" average of Kendall's tau estimators
##' "beta" multivariate Blomqvist's beta estimator
##' @param n.MC if > 0 it denotes the sample size for SMLE
##' @param interval initial optimization interval for "mle", "smle", and "dmle"
##' @param xargs additional arguments for the estimation procedures
##' @param ... additional parameters for optimize
##' @return estimated value/vector according to the chosen method
##' @author Marius Hofert
enacopula <- function(u, cop, method=c("mle", "smle", "dmle", "mde.chisq.CvM",
"mde.chisq.KS", "mde.gamma.CvM", "mde.gamma.KS",
"tau.tau.mean", "tau.theta.mean", "beta"),
n.MC = if(method=="smle") 10000 else 0,
interval=initOpt(cop@copula@name),
xargs=list(), ...)
{
## setup
if(!is.matrix(u)) u <- rbind(u, deparse.level = 0L)
stopifnot(0 <= u, u <= 1, is(cop, "outer_nacopula"), (d <- ncol(u)) >= 2,
max(cop@comp) == d, n.MC >= 0, is.list(xargs))
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
if(n.MC > 0 && method != "smle")
stop("n.MC > 0 is not applicable to method '%s'", method)
method <- match.arg(method)
## main part
res <- switch(method,
"mle" = do.call(.emle, c(list(u, cop,
interval = interval, ...), xargs)),
"smle" = do.call(.emle, c(list(u, cop, n.MC = n.MC,
interval = interval, ...), xargs)),
"dmle" = do.call(edmle, c(list(u, cop,
interval = interval, ...), xargs)),
"mde.chisq.CvM" = do.call(emde, c(list(u, cop, "mde.chisq.CvM",
interval = interval, ...), xargs)),
"mde.chisq.KS" = do.call(emde, c(list(u, cop, "mde.chisq.KS",
interval = interval, ...), xargs)),
"mde.gamma.CvM" = do.call(emde, c(list(u, cop, "mde.gamma.CvM",
interval = interval, ...), xargs)),
"mde.gamma.KS" = do.call(emde, c(list(u, cop, "mde.gamma.KS",
interval = interval, ...), xargs)),
"tau.tau.mean" = do.call(etau, c(list(u, cop, "tau.mean", ...),
xargs)),
"tau.theta.mean" = do.call(etau, c(list(u, cop, "theta.mean", ...),
xargs)),
"beta" = do.call(ebeta, c(list(u, cop,
interval = interval, ...), xargs)),
stop("wrong estimation method for enacopula"))
## FIXME: deal with result, check details, give warnings
## return the estimate
switch(method,
"mle" = res$minimum,
"smle" = res$minimum,
"dmle" = res$minimum,
"mde.chisq.CvM" = res$minimum,
"mde.chisq.KS" = res$minimum,
"mde.gamma.CvM" = res$minimum,
"mde.gamma.KS" = res$minimum,
"tau.tau.mean" = res,
"tau.theta.mean" = res,
"beta" = res$root,
stop("wrong estimation method"))
}
| /copula/R/estimation.R | no_license | ingted/R-Examples | R | false | false | 28,354 | r | ## Copyright (C) 2012 Marius Hofert, Ivan Kojadinovic, Martin Maechler, and Jun Yan
##
## This program is free software; you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation; either version 3 of the License, or (at your option) any later
## version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
## FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
## details.
##
## You should have received a copy of the GNU General Public License along with
## this program; if not, see <http://www.gnu.org/licenses/>.
### Estimation for nested Archimedean copulas
### initial interval/value for optimization procedures #########################
##' Compute an initial interval or value for optimization/estimation routines
##' (only a heuristic; if this fails, choose your own interval or value)
##'
##' @title Compute an initial interval or value for estimation procedures
##' @param family Archimedean family
##' @param tau.range vector containing lower and upper admissible Kendall's tau
##' @param interval logical determining if an initial interval (the default) or
##' an initial value should be returned
##' @param u matrix of realizations following a copula
##' @param method method for obtaining initial values
##' @param warn logical indicating whether a warning message is printed (the
##' default) if the DMLE for Gumbel is < 1 or not
##' @param ... further arguments to cor() for method="tau.mean"
##' @return initial interval or value which can be used for optimization
##' @author Marius Hofert
initOpt <- function(family, tau.range=NULL, interval=TRUE, u,
method=c("tau.Gumbel", "tau.mean"), warn=TRUE, ...)
{
cop <- getAcop(family)
if(is.null(tau.range)){
tau.range <- switch(cop@name, # limiting (attainable) taus that can be dealt with in estimation/optimization/root-finding
"AMH" = { c(0, 1/3-5e-5) }, # FIXME: closer to 1, emle's mle2 fails; note: typically, Std. Error still not available and thus profile() may fail => adjust by hand
"Clayton" = { c(1e-8, 0.95) },
"Frank" = { c(1e-8, 0.94) }, # FIXME: beyond that, estimation.gof() fails for ebeta()!
"Gumbel" = { c(0, 0.95) },
"Joe" = { c(0, 0.95) },
stop("unsupported family for initOpt"))
}
if(interval) return(cop@iTau(tau.range)) # u is not required
stopifnot(length(dim(u)) == 2L)
method <- match.arg(method)
## estimate Kendall's tau
tau.hat <- switch(method,
"tau.Gumbel" = {
x <- apply(u, 1, max)
theta.hat.G <- log(ncol(u))/(log(length(x))-log(sum(-log(x)))) # direct formula from edmle for Gumbel
if(theta.hat.G < 1){
if(warn) warning("initOpt: DMLE for Gumbel = ",theta.hat.G," < 1; is set to 1")
theta.hat.G <- 1
}
copGumbel@tau(theta.hat.G)
},
"tau.mean" = {
tau.hat.mat <- cor(u, method="kendall", ...) # matrix of pairwise tau()
mean(tau.hat.mat[upper.tri(tau.hat.mat)]) # mean of estimated taus
},
stop("wrong method for initOpt"))
## truncate to range if required
cop@iTau(pmax(tau.range[1], pmin(tau.range[2], tau.hat)))
}
### Blomqvist's beta ###########################################################
##' Compute the sample version of Blomqvist's beta,
##' see, e.g., Schmid and Schmidt (2007) "Nonparametric inference on multivariate
##' versions of Blomqvist's beta and related measures of tail dependence"
##'
##' @title Sample version of Blomqvist's beta
##' @param u matrix of realizations following the copula
##' @param scaling if TRUE then the factors 2^(d-1)/(2^(d-1)-1) and
##' 2^(1-d) in Blomqvist's beta are omitted
##' @return sample version of multivariate Blomqvist beta
##' @author Marius Hofert
betan <- function(u, scaling = FALSE) {
less.u <- u <= 0.5
prod1 <- apply( less.u, 1, all)
prod2 <- apply(!less.u, 1, all)
b <- mean(prod1 + prod2)
if(scaling) b else {T <- 2^(ncol(u)-1); (T*b - 1)/(T - 1)}
}
beta.hat <- function(u, scaling = FALSE) { .Deprecated("betan") ; betan(u, scaling) }
##' Compute the population version of Blomqvist's beta for Archimedean copulas
##'
##' @title Population version of Blomqvist's beta for Archimedean copulas
##' @param cop acopula to be estimated
##' @param theta copula parameter
##' @param d dimension
##' @param scaling if TRUE then the factors 2^(d-1)/(2^(d-1)-1) and
##' 2^(1-d) in Blomqvist's beta are omitted
##' @return population version of multivariate Blomqvist beta
##' @author Marius Hofert & Martin Maechler
beta. <- function(cop, theta, d, scaling=FALSE) {
j <- seq_len(d)
diags <- cop@psi(j*cop@iPsi(0.5, theta), theta) # compute diagonals
b <- 1 + diags[d] + if(d < 30) sum((-1)^j * choose(d, j) * diags)
else sum((-1)^j * exp(lchoose(d, j) + log(diags)))
if(scaling) b else { T <- 2^(d-1); (T*b - 1)/(T - 1)}
}
##' Method-of-moment-like estimation of nested Archimedean copulas based on a
##' multivariate version of Blomqvist's beta
##'
##' @title Method-of-moment-like parameter estimation of nested Archimedean copulas
##' based on Blomqvist's beta
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param interval bivariate vector denoting the interval where optimization takes
##' place
##' @param ... additional parameters for safeUroot
##' @return Blomqvist beta estimator; return value of safeUroot (more or less
##' equal to the return value of uniroot)
##' @author Marius Hofert
ebeta <- function(u, cop, interval=initOpt(cop@copula@name), ...) {
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d)
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
## Note: We do not need the constants 2^(d-1)/(2^(d-1)-1) and 2^(1-d) here,
## since we equate the population and sample versions of Blomqvist's
## beta anyway.
b.hat <- betan(u, scaling = TRUE)
d <- ncol(u)
safeUroot(function(theta) {beta.(cop@copula, theta, d, scaling=TRUE) - b.hat},
interval=interval, Sig=+1, check.conv=TRUE, ...)
}
### Kendall's tau ##############################################################
##' Sample tau checker
##'
##' @title Check sample versions of Kendall's tau
##' @param x vector of sample versions of Kendall's tau to be checked for whether
##' they are in the range of tau of the corresponding family
##' @param family Archimedean family
##' @return checked and (if check failed) modified x
##' @author Marius Hofert
tau.checker <- function(x, family, warn=TRUE){
eps <- 1e-8 ## "fixed" currently, see below
tau.range <- switch(family,
## limiting (attainable) taus that can be dealt with by
## cop<family>@iTau() *and* that can be used to construct
## a corresponding copula object; checked via:
## eps <- 1e-8
## th <- copAMH@iTau(c(0,1/3-eps)); onacopulaL("AMH",list(th[1], 1:5)); onacopulaL("AMH",list(th[2], 1:5))
## th <- copClayton@iTau(c(eps,1-eps)); onacopulaL("Clayton",list(th[1], 1:5)); onacopulaL("Clayton",list(th[2], 1:5))
## th <- copFrank@iTau(c(eps,1-eps)); onacopulaL("Frank",list(th[1], 1:5)); onacopulaL("Frank",list(th[2], 1:5))
## th <- copGumbel@iTau(c(0,1-eps)); onacopulaL("Gumbel",list(th[1], 1:5)); onacopulaL("Gumbel",list(th[2], 1:5))
## th <- copJoe@iTau(c(0,1-eps)); onacopulaL("Joe",list(th[1], 1:5)); onacopulaL("Joe",list(th[2], 1:5))
"AMH" = { c(0, 1/3-eps) },
"Clayton" = { c(eps, 1-eps) }, # copClayton@iTau(c(eps,1-eps))
"Frank" = { c(eps, 1-eps) }, # copFrank@iTau(c(eps,1-eps))
"Gumbel" = { c(0, 1-eps) }, # copGumbel@iTau(c(0,1-eps))
"Joe" = { c(0, 1-eps) }, # copJoe@iTau(c(0,1-eps))
stop("unsupported family for initOpt"))
toosmall <- which(x < tau.range[1])
toolarge <- which(x > tau.range[2])
if(warn && length(toosmall)+length(toolarge) > 0){
r <- range(x)
if(length(x) == 1){
warning("tau.checker: found (and adjusted) an x value out of range (x = ",
x,")")
}else{
warning("tau.checker: found (and adjusted) x values out of range (min(x) = ",
r[1],", max(x) = ",r[2],")")
}
}
x. <- x
x.[toosmall] <- tau.range[1]
x.[toolarge] <- tau.range[2]
x.
}
##' Compute pairwise estimators for nested Archimedean copulas based on Kendall's tau
##'
##' @title Pairwise estimators for nested Archimedean copulas based on Kendall's tau
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param method tau.mean indicates that the average of the sample versions of
##' Kendall's tau are computed first and then theta is determined;
##' theta.mean stands for first computing all Kendall's tau
##' estimators and then returning the mean of these estimators
##' @param warn logical indicating whether warnings are produced (for AMH and in
##' general for pairwise sample versions of Kendall's tau < 0) [the default]
##' or not
##' @param ... additional arguments to cor()
##' @return averaged pairwise cor() estimators
##' @author Marius Hofert
etau <- function(u, cop, method = c("tau.mean", "theta.mean"), warn=TRUE, ...){
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d)
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
tau.hat.mat <- cor(u, method="kendall",...) # matrix of pairwise tau()
tau.hat <- tau.hat.mat[upper.tri(tau.hat.mat)] # all tau hat's
## define tau^{-1}
tau_inv <- if(cop@copula@name == "AMH")
function(tau) cop@copula@iTau(tau, check=FALSE, warn=warn) else cop@copula@iTau
## check and apply iTau in the appropriate way
method <- match.arg(method)
switch(method,
"tau.mean" = {
mean.tau.hat <- mean(tau.hat) # mean of pairwise tau.hat
mean.tau.hat. <- tau.checker(mean.tau.hat, family=cop@copula@name,
warn=warn) # check the mean
tau_inv(mean.tau.hat.) # Kendall's tau corresponding to the mean of the sample versions of Kendall's taus
},
"theta.mean" = {
tau.hat. <- tau.checker(tau.hat, family=cop@copula@name, warn=warn) # check all values
mean(tau_inv(tau.hat.)) # mean of the pairwise Kendall's tau estimators
},
{stop("wrong method")})
}
### Minimum distance estimation ################################################
##' Distances for minimum distance estimation
##'
##' @title Distances for minimum distance estimation
##' @param u matrix of realizations (ideally) following U[0,1]^(d-1) or U[0,1]^d
##' @param method distance methods available:
##' mde.chisq.CvM = map to a chi-square distribution (Cramer-von Mises distance)
##' mde.chisq.KS = map to a chi-square distribution (Kolmogorov-Smirnov distance)
##' mde.gamma.CvM = map to an Erlang (gamma) distribution (Cramer-von Mises distance)
##' mde.gamma.KS = map to an Erlang (gamma) distribution (Kolmogorov-Smirnov distance)
##' @return distance
##' @author Marius Hofert
emde.dist <- function(u, method = c("mde.chisq.CvM", "mde.chisq.KS", "mde.gamma.CvM",
"mde.gamma.KS")) {
if(!is.matrix(u)) u <- rbind(u, deparse.level = 0L)
d <- ncol(u)
n <- nrow(u)
method <- match.arg(method) # match argument method
switch(method,
"mde.chisq.CvM" = { # map to a chi-square distribution
y <- sort(rowSums(qnorm(u)^2))
Fvals <- pchisq(y, d)
weights <- (2*(1:n)-1)/(2*n)
1/(12*n) + sum((weights - Fvals)^2)
},
"mde.chisq.KS" = { # map to a chi-square distribution
y <- sort(rowSums(qnorm(u)^2))
Fvals <- pchisq(y, d)
i <- 1:n
max(Fvals[i]-(i-1)/n, i/n-Fvals[i])
},
"mde.gamma.CvM" = { # map to an Erlang distribution
y <- sort(rowSums(-log(u)))
Fvals <- pgamma(y, shape = d)
weights <- (2*(1:n)-1)/(2*n)
1/(12*n) + sum((weights - Fvals)^2)
},
"mde.gamma.KS" = { # map to an Erlang distribution
y <- rowSums(-log(u))
Fvals <- pgamma(y, shape = d)
i <- 1:n
max(Fvals[i]-(i-1)/n, i/n-Fvals[i])
},
## Note: The distances S_n^{(B)} and S_n^{(C)} turned out to be (far)
## too slow.
stop("wrong distance method"))
}
##' Minimum distance estimation for nested Archimedean copulas
##'
##' @title Minimum distance estimation for nested Archimedean copulas
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param method distance methods available, see emde.dist
##' @param interval bivariate vector denoting the interval where optimization takes
##' place
##' @param include.K logical indicating whether the last component, K, is also
##' used or not
##' @param repara logical indicating whether the distance function is
##' reparameterized for the optimization
##' @param ... additional parameters for optimize
##' @return minimum distance estimator; return value of optimize
##' @author Marius Hofert
emde <- function(u, cop, method = c("mde.chisq.CvM", "mde.chisq.KS", "mde.gamma.CvM",
"mde.gamma.KS"), interval = initOpt(cop@copula@name),
include.K = FALSE, repara = TRUE, ...)
{
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d)
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
method <- match.arg(method) # match argument method
distance <- function(theta) { # distance to be minimized
cop@copula@theta <- theta
u. <- htrafo(u, cop=cop, include.K=include.K, n.MC=0) # transform data [don't use MC here; too slow]
emde.dist(u., method)
}
if(repara){
## reparameterization function
rfun <- function(x, inverse=FALSE){ # reparameterization
switch(cop@copula@name,
"AMH"={
x
},
"Clayton"={
if(inverse) tanpi(x/2) else atan(x)*2/pi
},
"Frank"={
if(inverse) tanpi(x/2) else atan(x)*2/pi
},
"Gumbel"={
if(inverse) 1/(1-x) else 1-1/x
},
"Joe"={
if(inverse) 1/(1-x) else 1-1/x
},
stop("emde: Reparameterization got unsupported family"))
}
## optimize
opt <- optimize(function(alpha) distance(rfun(alpha, inverse=TRUE)),
interval=rfun(interval), ...)
opt$minimum <- rfun(opt$minimum, inverse=TRUE)
opt
}else{
optimize(distance, interval=interval, ...)
}
}
### Diagonal maximum likelihood estimation #####################################
##' Density of the diagonal of a nested Archimedean copula
##'
##' @title Diagonal density of a nested Archimedean copula
##' @param u evaluation point in [0,1]
##' @param cop outer_nacopula
##' @param log if TRUE the log-density is evaluated
##' @return density of the diagonal of cop
##' @author Marius Hofert
dDiag <- function(u, cop, log=FALSE) {
stopifnot(is(cop, "outer_nacopula"), (d <- max(cop@comp)) >= 2)
if(length(cop@childCops)) {
stop("currently, only Archimedean copulas are supported")
}
else ## (non-nested) Archimedean :
## FIXME: choose one or the other (if a family has no such slot)
## dDiagA(u, d=d, cop = cop@copula, log=log)
cop@copula@dDiag(u, theta=cop@copula@theta, d=d, log=log)
}
##' @title Generic density of the diagonal of d-dim. Archimedean copula
##' @param u evaluation point in [0, 1]
##' @param d dimension
##' @param cop acopula
##' @param log if TRUE the log-density is evaluated
##' @return density of the diagonal of cop
##' @author Martin Maechler
dDiagA <- function(u, d, cop, log=FALSE) {
stopifnot(is.finite(th <- cop@theta), d >= 2)
## catch the '0' case directly; needed, e.g., for AMH:
if(any(copAMH@name == c("AMH","Frank","Gumbel","Joe")) &&
any(i0 <- u == 0)) {
if(log) u[i0] <- -Inf
u[!i0] <- dDiagA(u[!i0], d=d, cop=cop, log=log)
return(u)
}
if(log) {
log(d) + cop@absdPsi(d*cop@iPsi(u, th), th, log=TRUE) +
cop@absdiPsi(u, th, log=TRUE)
} else {
d * cop@absdPsi(d*cop@iPsi(u, th), th) * cop@absdiPsi(u, th)
}
}
##' Maximum likelihood estimation based on the diagonal of a nested Archimedean copula
##'
##' @title Maximum likelihood estimation based on the diagonal of a nested Archimedean copula
##' @param u matrix of realizations following a copula
##' @param cop outer_nacopula to be estimated
##' @param interval bivariate vector denoting the interval where optimization takes
##' place
##' @param warn logical indicating whether a warning message is printed (the
##' default) if the DMLE for Gumbel is < 1 or not
##' @param ... additional parameters for optimize
##' @return diagonal maximum likelihood estimator; return value of optimize
##' @author Marius Hofert
edmle <- function(u, cop, interval=initOpt(cop@copula@name), warn=TRUE, ...)
{
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d) # dimension
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
x <- apply(u, 1, max) # data from the diagonal
## explicit estimator for Gumbel
if(cop@copula@name == "Gumbel") {
th.G <- log(d)/(log(length(x))-log(sum(-log(x))))
if(!is.finite(th.G) || th.G < 1) {
if(warn) warning("edmle: DMLE for Gumbel = ",th.G,"; not in [1, Inf); is set to 1")
th.G <- 1
}
list(minimum = th.G, objective = 0) # return value of the same structure as for optimize
} else {
## optimize
nlogL <- function(theta) # -log-likelihood of the diagonal
-sum(cop@copula@dDiag(x, theta=theta, d=d, log=TRUE))
optimize(nlogL, interval=interval, ...)
}
}
### (Simulated) maximum likelihood estimation ##################################
##' (Simulated) maximum likelihood estimation for nested Archimedean copulas
##' -- *Fast* version (based on optimize()) called from enacopula
##'
##' @title (Simulated) maximum likelihood estimation for nested Archimedean copulas
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param n.MC if > 0 SMLE is applied with sample size equal to n.MC; otherwise,
##' MLE is applied
##' @param interval bivariate vector denoting the interval where optimization takes
##' place
##' @param ... additional parameters for optimize
##' @return (simulated) maximum likelihood estimator; return value of optimize
##' @author Marius Hofert
.emle <- function(u, cop, n.MC=0, interval=initOpt(cop@copula@name), ...)
{
stopifnot(is(cop, "outer_nacopula"))
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
if(!is.matrix(u)) u <- rbind(u, deparse.level = 0L)
## optimize
mLogL <- function(theta) { # -log-likelihood
cop@copula@theta <- theta
-sum(.dnacopula(u, cop, n.MC=n.MC, log=TRUE))
}
optimize(mLogL, interval=interval, ...)
}
##' (Simulated) maximum likelihood estimation for nested Archimedean copulas
##'
##' @title (Simulated) maximum likelihood estimation for nested Archimedean copulas
##' @param u matrix of realizations following the copula
##' @param cop outer_nacopula to be estimated
##' @param n.MC if > 0 SMLE is applied with sample size equal to n.MC; otherwise,
##' MLE is applied
##' @param optimizer optimizer used (if optimizer=NULL (or NA), then mle (instead
##' of mle2) is used with the provided method)
##' @param method optim's method to be used (when optimizer=NULL or "optim" and
##' in these cases method is a required argument)
##' @param interval bivariate vector denoting the interval where optimization
##' takes place
##' @param start list containing the initial value(s) (unfortunately required by mle2)
##' @param ... additional parameters for optimize
##' @return an "mle2" object with the (simulated) maximum likelihood estimator.
##' @author Martin Maechler and Marius Hofert
##' Note: this is the *slower* version which also allows for profiling
emle <- function(u, cop, n.MC=0, optimizer="optimize", method,
interval=initOpt(cop@copula@name),
##vvv awkward to be needed, but it is - by mle2():
start = list(theta=initOpt(cop@copula@name, interval=FALSE, u=u)),
...)
{
stopifnot(is(cop, "outer_nacopula"), is.numeric(d <- ncol(u)), d >= 2,
max(cop@comp) == d)
## nLL <- function(theta) { # -log-likelihood
## cop@copula@theta <- theta
## -sum(.dnacopula(u, cop, n.MC=n.MC, log=TRUE))
## }
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
else ## For (*non*-nested) copulas only:
nLL <- function(theta) # -(log-likelihood)
-sum(cop@copula@dacopula(u, theta, n.MC=n.MC, log=TRUE))
## optimization
if(!(is.null(optimizer) || is.na(optimizer))) {
## stopifnot(requireNamespace("bbmle"))
if(optimizer == "optimize")
bbmle::mle2(minuslogl = nLL, optimizer = "optimize",
lower = interval[1], upper = interval[2],
##vvv awkward to be needed, but it is - by mle2():
start=start, ...)
else if(optimizer == "optim") {
message(" optimizer = \"optim\" -- using mle2(); consider optimizer=NULL instead")
bbmle::mle2(minuslogl = nLL, optimizer = "optim", method = method,
start=start, ...)
}
else ## "general"
bbmle::mle2(minuslogl = nLL, optimizer = optimizer, start=start, ...)
}
else
## use optim() .. [which uses suboptimal method for 1D, but provides Hessian]
mle(minuslogl = nLL, method = method, start=start, ...)
}
### Estimation wrapper #########################################################
##' Computes the pseudo-observations for the given data matrix
##'
##' @title Pseudo-observations
##' @param x matrix of random variates to be converted to pseudo-observations
##' @param na.last passed to rank()
##' @param ties.method passed to rank()
##' @param lower.tail if FALSE, pseudo-observations when apply the empirical
##' marginal survival functions are returned.
##' @return pseudo-observations (matrix of the same dimensions as x)
##' @author Marius Hofert
pobs <- function(x, na.last = "keep",
## formals(rank) works in pre-2015-10-15 and newer version of rank():
ties.method = eval(formals(rank)$ties.method),
lower.tail = TRUE) {
ties.method <- match.arg(ties.method)
U <- apply(x, 2, rank, na.last=na.last, ties.method=ties.method) / (nrow(x)+1)
if(lower.tail) U else 1-U
}
##' Computes different parameter estimates for a nested Archimedean copula
##'
##' @title Estimation procedures for nested Archimedean copulas
##' @param u data matrix (of pseudo-observations or from the copula "directly")
##' @param cop outer_nacopula to be estimated
##' @param method estimation method; can be
##' "mle" MLE
##' "smle" SMLE
##' "dmle" MLE based on the diagonal
##' "mde.chisq.CvM" minimum distance estimation based on the chisq distribution and CvM distance
##' "mde.chisq.KS" minimum distance estimation based on the chisq distribution and KS distance
##' "mde.gamma.CvM" minimum distance estimation based on the Erlang distribution and CvM distance
##' "mde.gamma.KS" minimum distance estimation based on the Erlang distribution and KS distance
##' "tau.tau.mean" averaged pairwise Kendall's tau estimator
##' "tau.theta.mean" average of Kendall's tau estimators
##' "beta" multivariate Blomqvist's beta estimator
##' @param n.MC if > 0 it denotes the sample size for SMLE
##' @param interval initial optimization interval for "mle", "smle", and "dmle"
##' @param xargs additional arguments for the estimation procedures
##' @param ... additional parameters for optimize
##' @return estimated value/vector according to the chosen method
##' @author Marius Hofert
enacopula <- function(u, cop, method=c("mle", "smle", "dmle", "mde.chisq.CvM",
"mde.chisq.KS", "mde.gamma.CvM", "mde.gamma.KS",
"tau.tau.mean", "tau.theta.mean", "beta"),
n.MC = if(method=="smle") 10000 else 0,
interval=initOpt(cop@copula@name),
xargs=list(), ...)
{
## setup
if(!is.matrix(u)) u <- rbind(u, deparse.level = 0L)
stopifnot(0 <= u, u <= 1, is(cop, "outer_nacopula"), (d <- ncol(u)) >= 2,
max(cop@comp) == d, n.MC >= 0, is.list(xargs))
if(length(cop@childCops))
stop("currently, only Archimedean copulas are supported")
if(n.MC > 0 && method != "smle")
stop("n.MC > 0 is not applicable to method '%s'", method)
method <- match.arg(method)
## main part
res <- switch(method,
"mle" = do.call(.emle, c(list(u, cop,
interval = interval, ...), xargs)),
"smle" = do.call(.emle, c(list(u, cop, n.MC = n.MC,
interval = interval, ...), xargs)),
"dmle" = do.call(edmle, c(list(u, cop,
interval = interval, ...), xargs)),
"mde.chisq.CvM" = do.call(emde, c(list(u, cop, "mde.chisq.CvM",
interval = interval, ...), xargs)),
"mde.chisq.KS" = do.call(emde, c(list(u, cop, "mde.chisq.KS",
interval = interval, ...), xargs)),
"mde.gamma.CvM" = do.call(emde, c(list(u, cop, "mde.gamma.CvM",
interval = interval, ...), xargs)),
"mde.gamma.KS" = do.call(emde, c(list(u, cop, "mde.gamma.KS",
interval = interval, ...), xargs)),
"tau.tau.mean" = do.call(etau, c(list(u, cop, "tau.mean", ...),
xargs)),
"tau.theta.mean" = do.call(etau, c(list(u, cop, "theta.mean", ...),
xargs)),
"beta" = do.call(ebeta, c(list(u, cop,
interval = interval, ...), xargs)),
stop("wrong estimation method for enacopula"))
## FIXME: deal with result, check details, give warnings
## return the estimate
switch(method,
"mle" = res$minimum,
"smle" = res$minimum,
"dmle" = res$minimum,
"mde.chisq.CvM" = res$minimum,
"mde.chisq.KS" = res$minimum,
"mde.gamma.CvM" = res$minimum,
"mde.gamma.KS" = res$minimum,
"tau.tau.mean" = res,
"tau.theta.mean" = res,
"beta" = res$root,
stop("wrong estimation method"))
}
|
#' Arm-level changes
#'
#' Get the altered chromosome arms in sample. Does not include the acrocentric p arms of chromosomes 12, 14, 15, 31, and 22.
#'
#' @param segs FACETS segmentation output.
#' @param ploidy Sample ploidy.
#' @param genome Genome build.
#' @param algorithm Choice between FACETS \code{em} and \code{cncf} algorithm.
#'
#' @return List of items, containing:
#' @return \code{data.frame} for all genes mapping onto a segment in the output segmentation, with the columns:
#' \itemize{
#' \item{\code{genome_doubled}:} {Boolean indicating whether sample genome is doubled.}
#' \item{\code{fraction_cna}:} {Fraction of genome altered.}
#' \item{\code{weighted_fraction_cna}:} {A weighted version of \code{fraction_cna} where only altered chromosomes are counted and weighted according to their length relative to total genome.}
#' \item{\code{aneuploidy_scores}:} {Count of the number of altered arms, see source URL.}
#' \item{\code{full_output}:} {Full per-arm copy-number status.}
#' }
#'
#' @importFrom dplyr left_join filter summarize select %>% mutate_at case_when group_by rowwise arrange
#' @importFrom purrr map_dfr map_lgl map_chr discard
#' @importFrom tidyr gather separate_rows
#' @importFrom plyr mapvalues
#'
#' @source \url{https://www.ncbi.nlm.nih.gov/pubmed/29622463}
#' @export
arm_level_changes = function(segs,
ploidy,
genome = c('hg19', 'hg18', 'hg38'),
algorithm = c('em', 'cncf')) {
genome_choice = get(match.arg(genome, c('hg19', 'hg18', 'hg38'), several.ok = FALSE))
algorithm = match.arg(algorithm, c('em', 'cncf'), several.ok = FALSE)
# Get WGD status
fcna_output = calculate_fraction_cna(segs, ploidy, genome, algorithm)
wgd = fcna_output$genome_doubled
sample_ploidy = ifelse(wgd, round(ploidy), 2)
# Create chrom_info for sample
sample_chrom_info = get_sample_genome(segs, genome_choice)
segs = parse_segs(segs, algorithm) %>%
left_join(., select(sample_chrom_info, chr, centromere), by = c('chrom' = 'chr'))
# Find altered arms
# Split centromere-spanning segments
# Remove segments where lcn is NA
segs = filter(segs, !is.na(lcn)) %>%
rowwise() %>%
mutate(
arm = case_when(
start < centromere & end <= centromere ~ 'p',
start >= centromere ~ 'q',
TRUE ~ 'span'),
start = ifelse(arm == 'span', paste(c(start, centromere), collapse = ','), as.character(start)),
end = ifelse(arm == 'span', paste(c(centromere, end), collapse = ','), as.character(end))
) %>%
separate_rows(start, end, sep = ',') %>%
mutate(start = as.numeric(start),
end = as.numeric(end),
arm = case_when(
start < centromere & end <= centromere ~ paste0(chrom, 'p'),
start >= centromere ~ paste0(chrom, 'q')),
length = end - start)
# Find distinct copy-number states
# Requires that >=80% exist at given copy-number state
acro_arms = c('13p', '14p', '15p', '21p', '22p') # acrocentric chromsomes
chrom_arms = setdiff(paste0(rep(unique(test_facets_output$segs$chrom), each = 2), c('p', 'q')), acro_arms)
segs = group_by(segs, arm, tcn, lcn) %>%
summarize(cn_length = sum(length)) %>%
group_by(arm) %>%
mutate(arm_length = sum(cn_length),
majority = cn_length >= 0.8 * arm_length,
frac_of_arm = signif(cn_length/arm_length, 2),
cn_state = mapvalues(paste(wgd, tcn-lcn, lcn, sep = ':'),
copy_number_states$map_string, copy_number_states$call,
warn_missing = FALSE)) %>%
ungroup() %>%
filter(majority == TRUE, arm %in% chrom_arms) %>%
select(-majority) %>%
mutate(arm = factor(arm, chrom_arms, ordered = T)) %>%
arrange(arm)
altered_arms = filter(segs, cn_state != 'DIPLOID')
# Weighted fraction copy-number altered
frac_altered_w = select(sample_chrom_info, chr, p = plength, q = qlength) %>%
gather(arm, length, -chr) %>%
filter(paste0(chr, arm) %in% chrom_arms) %>%
summarize(sum(length[paste0(chr, arm) %in% altered_arms$arm]) / sum(length)) %>%
as.numeric()
list(
genome_doubled = fcna_output$genome_doubled,
fraction_cna = fcna_output$fraction_cna,
weighted_fraction_cna = frac_altered_w,
aneuploidy_score = length(altered_arms),
full_output = segs
)
}
#' @export
arm_level_changes_dmp = function(segs,
ploidy,
genome = c('hg19', 'hg18', 'hg38'),
algorithm = c('em', 'cncf'),
diplogr) {
genome_choice = get(match.arg(genome, c('hg19', 'hg18', 'hg38'), several.ok = FALSE))
algorithm = match.arg(algorithm, c('em', 'cncf'), several.ok = FALSE)
# Get WGD status
fcna_output = calculate_fraction_cna(segs, ploidy, genome, algorithm)
wgd = fcna_output$genome_doubled
sample_ploidy = ifelse(wgd, round(ploidy), 2)
sample_chrom_info = get_sample_genome(segs, genome_choice)
segs = parse_segs(segs, algorithm) %>%
left_join(., select(sample_chrom_info, chr, centromere), by = c('chrom' = 'chr'))
segs = segs %>%
rowwise() %>%
mutate(
arm = case_when(
start < centromere & end <= centromere ~ 'p',
start >= centromere ~ 'q',
TRUE ~ 'span'),
start = ifelse(arm == 'span', paste(c(start, centromere), collapse = ','), as.character(start)),
end = ifelse(arm == 'span', paste(c(centromere, end), collapse = ','), as.character(end))
) %>%
separate_rows(start, end, sep = ',') %>%
mutate(start = as.numeric(start),
end = as.numeric(end),
arm = case_when(
start < centromere & end <= centromere ~ paste0(chrom, 'p'),
start >= centromere ~ paste0(chrom, 'q')),
length = end - start,
cnlr.adj = cnlr.median - diplogr,
phet = nhet / num.mark)
arm_lengths = segs %>%
group_by(arm) %>%
summarize(arm_length = sum(length))
segs = left_join(segs, arm_lengths)
# Find distinct copy-number states
# Requires that >=50% exist at given copy-number state
acro_arms = c('13p', '14p', '15p', '21p', '22p') # acrocentric chromsomes
chrom_arms = setdiff(paste0(rep(unique(test_facets_output$segs$chrom), each = 2), c('p', 'q')), acro_arms)
segs = segs %>%
mutate(tcn = case_when(algorithm=="em" ~ tcn.em, TRUE ~ tcn),
lcn = case_when(algorithm=="em" ~ lcn.em, TRUE ~ lcn),
cf = case_when(algorithm=="em" ~ cf.em, TRUE ~ cf))
#annotate sample sex based on proportion of heterozygous SNPs on chrX
chrx = segs %>% filter(chrom==23) %>% group_by(chrom) %>% summarize(prop_het = max(phet))
sex = case_when(chrx$prop_het > 0.01 ~ "Female", TRUE ~"Male")
#correct NAs for high confidence CNLOH
#theoretical values cnloh
phis = seq(0, 0.9, by = 0.01)
cnlr = function(phi, m = 0, p = 1) {
log2((2 * (1 - phi) + (m + p) * phi) / 2)
}
valor = function(phi, m = 0, p = 1) {
abs(log((m * phi + 1 - phi) / (p * phi + 1 - phi)))
}
cnloh_line = data.frame(
phi = phis,
cnlr = sapply(
phis,
function(phi){cnlr(phi, m = 2, p = 0)}
),
valor = sapply(
phis,
function(phi){valor(phi, m = 2, p = 0)}
))
hetloss_line = data.frame(
phi = phis,
cnlr = sapply(
phis,
function(phi){cnlr(phi, m = 0, p = 1)}
),
valor = sapply(
phis,
function(phi){valor(phi, m = 0, p = 1)}
))
#calculate estimated cellular fraction with obvious CNLOH misses
segs = segs %>%
mutate(
#correct CNLOH to het loss for obvious errors
tcn = case_when(
tcn==2 & cnlr.adj < -0.5 & mafR>0.5 & lcn==0 & chrom !=23 ~ 1,
TRUE ~ as.numeric(tcn)),
cf = case_when(
is.na(lcn) & tcn ==2 & mafR>1 & nhet >= 5 & abs(cnlr.adj)<0.2 ~ cnloh_line[findInterval(abs(mafR), cnloh_line$valor),]$phi,
lcn==1 & tcn==2 & mafR>1 & abs(cnlr.adj)<0.2 & nhet>=5 ~ cnloh_line[findInterval(abs(mafR), cnloh_line$valor),]$phi,
tcn==2 & cnlr.adj < -0.5 & mafR>0.5 & lcn==0 & chrom !=23 ~ hetloss_line[findInterval(abs(mafR), hetloss_line$valor),]$phi,
TRUE ~ as.numeric(cf)),
lcn = case_when(
is.na(lcn) & tcn ==2 & mafR>1 & nhet>=5 & abs(cnlr.adj)<0.2 ~ 0,
lcn==1 & tcn ==2 & mafR>1 & abs(cnlr.adj) <0.2 & nhet>=5 ~ 0,
TRUE ~ as.numeric(lcn)),
arm_fraction = length / arm_length
)
#annotate cn_state
segs = segs %>%
mutate(
cn_state = mapvalues(paste(wgd, tcn-lcn, lcn, sep = ':'),
copy_number_states$map_string,
copy_number_states$call,
warn_missing = FALSE),
cn_state_num = mapvalues(paste(wgd, tcn-lcn, lcn, sep = ':'),
copy_number_states$map_string,
copy_number_states$numeric_call,
warn_missing = FALSE)
)
#fix for AMP with NA lcn
segs = segs %>%
mutate(
cn_state = case_when(tcn>5 & is.na(lcn) ~ "AMP", TRUE ~ cn_state),
cn_state_num = case_when(tcn>5 & is.na(lcn) ~ "2", TRUE ~ cn_state_num)
)
#correct X for patient sex
segs = segs %>%
mutate(cn_state = case_when(chrom==23 & sex=="Male" & tcn==1 ~ "DIPLOID",
chrom==23 & sex=="Male" & tcn > 1 ~ "GAIN",
cn_state=="FALSE:NA:NA" ~ "Indeterminate", #handle NA values for lcn
cn_state=="TRUE:NA:NA" ~ "Indeterminate",
TRUE ~cn_state),
cn_state_num = case_when(chrom==23 & sex=="Male" & tcn==1 ~ 0,
chrom==23 & sex=="Male" & tcn > 1 ~ 1,
cn_state=="Indeterminate" ~ 0,
TRUE ~ as.numeric(cn_state_num)),
lcn = case_when(chrom==23 & sex=="Male" ~ 0, TRUE ~ as.numeric(lcn))
)
segs.full = segs %>%
mutate(chrom = gsub('23', 'X', chrom),
arm = gsub('23', 'X', arm))
# val_arms = c("3q_Loss", "5q_Loss", "7q_Loss",
# "8p_Gain", "8q_Gain", "11q_Gain", "11q_Loss",
# "12p_Loss", "12p_Gain", "12q_Gain","13q_Loss",
# "17p_Loss", "19p_Gain", "19q_Gain", "20q_Loss")
segs.full = segs.full %>%
mutate(Class = case_when(cn_state=="DIPLOID" ~ 'Diploid', #exclude 1,0 males from being called loss
chrom==23 & sex=="Male" & cn_state =='GAIN' ~ 'Gain',
tcn==2 & lcn==0 ~ 'Loss', #CNLOH
# **gain/loss definitions do not account for WGD, e.g. 3,0 | 4,0 states**
tcn>2 ~ 'Gain',
tcn<2 ~ 'Loss',
cn_state=="Indeterminate" ~"Indeterminate",
TRUE ~ 'Indeterminate'),
arm_change = paste(arm, Class, sep = "_"))
maxarm = segs.full %>%
#filter(arm_change %in% val_arms) %>%
group_by(arm) %>%
summarize(max_arm_len= max(length)) %>%
mutate(key = paste(arm, max_arm_len, sep = "_"))
chrom_arms = gsub('23', 'X', chrom_arms)
segs.filt = segs.full %>%
mutate(key = paste(arm, length, sep = "_")) %>%
filter(key %in% maxarm$key, arm %in% chrom_arms) %>%
dplyr::select(arm,tcn, lcn, cf, arm_fraction, cn_state, Class)
list(
full_output = segs.full,
filtered_output = segs.filt
)
}
| /R/arm-level-changes.R | permissive | rptashkin/facets-suite | R | false | false | 12,337 | r | #' Arm-level changes
#'
#' Get the altered chromosome arms in sample. Does not include the acrocentric p arms of chromosomes 12, 14, 15, 31, and 22.
#'
#' @param segs FACETS segmentation output.
#' @param ploidy Sample ploidy.
#' @param genome Genome build.
#' @param algorithm Choice between FACETS \code{em} and \code{cncf} algorithm.
#'
#' @return List of items, containing:
#' @return \code{data.frame} for all genes mapping onto a segment in the output segmentation, with the columns:
#' \itemize{
#' \item{\code{genome_doubled}:} {Boolean indicating whether sample genome is doubled.}
#' \item{\code{fraction_cna}:} {Fraction of genome altered.}
#' \item{\code{weighted_fraction_cna}:} {A weighted version of \code{fraction_cna} where only altered chromosomes are counted and weighted according to their length relative to total genome.}
#' \item{\code{aneuploidy_scores}:} {Count of the number of altered arms, see source URL.}
#' \item{\code{full_output}:} {Full per-arm copy-number status.}
#' }
#'
#' @importFrom dplyr left_join filter summarize select %>% mutate_at case_when group_by rowwise arrange
#' @importFrom purrr map_dfr map_lgl map_chr discard
#' @importFrom tidyr gather separate_rows
#' @importFrom plyr mapvalues
#'
#' @source \url{https://www.ncbi.nlm.nih.gov/pubmed/29622463}
#' @export
arm_level_changes = function(segs,
ploidy,
genome = c('hg19', 'hg18', 'hg38'),
algorithm = c('em', 'cncf')) {
genome_choice = get(match.arg(genome, c('hg19', 'hg18', 'hg38'), several.ok = FALSE))
algorithm = match.arg(algorithm, c('em', 'cncf'), several.ok = FALSE)
# Get WGD status
fcna_output = calculate_fraction_cna(segs, ploidy, genome, algorithm)
wgd = fcna_output$genome_doubled
sample_ploidy = ifelse(wgd, round(ploidy), 2)
# Create chrom_info for sample
sample_chrom_info = get_sample_genome(segs, genome_choice)
segs = parse_segs(segs, algorithm) %>%
left_join(., select(sample_chrom_info, chr, centromere), by = c('chrom' = 'chr'))
# Find altered arms
# Split centromere-spanning segments
# Remove segments where lcn is NA
segs = filter(segs, !is.na(lcn)) %>%
rowwise() %>%
mutate(
arm = case_when(
start < centromere & end <= centromere ~ 'p',
start >= centromere ~ 'q',
TRUE ~ 'span'),
start = ifelse(arm == 'span', paste(c(start, centromere), collapse = ','), as.character(start)),
end = ifelse(arm == 'span', paste(c(centromere, end), collapse = ','), as.character(end))
) %>%
separate_rows(start, end, sep = ',') %>%
mutate(start = as.numeric(start),
end = as.numeric(end),
arm = case_when(
start < centromere & end <= centromere ~ paste0(chrom, 'p'),
start >= centromere ~ paste0(chrom, 'q')),
length = end - start)
# Find distinct copy-number states
# Requires that >=80% exist at given copy-number state
acro_arms = c('13p', '14p', '15p', '21p', '22p') # acrocentric chromsomes
chrom_arms = setdiff(paste0(rep(unique(test_facets_output$segs$chrom), each = 2), c('p', 'q')), acro_arms)
segs = group_by(segs, arm, tcn, lcn) %>%
summarize(cn_length = sum(length)) %>%
group_by(arm) %>%
mutate(arm_length = sum(cn_length),
majority = cn_length >= 0.8 * arm_length,
frac_of_arm = signif(cn_length/arm_length, 2),
cn_state = mapvalues(paste(wgd, tcn-lcn, lcn, sep = ':'),
copy_number_states$map_string, copy_number_states$call,
warn_missing = FALSE)) %>%
ungroup() %>%
filter(majority == TRUE, arm %in% chrom_arms) %>%
select(-majority) %>%
mutate(arm = factor(arm, chrom_arms, ordered = T)) %>%
arrange(arm)
altered_arms = filter(segs, cn_state != 'DIPLOID')
# Weighted fraction copy-number altered
frac_altered_w = select(sample_chrom_info, chr, p = plength, q = qlength) %>%
gather(arm, length, -chr) %>%
filter(paste0(chr, arm) %in% chrom_arms) %>%
summarize(sum(length[paste0(chr, arm) %in% altered_arms$arm]) / sum(length)) %>%
as.numeric()
list(
genome_doubled = fcna_output$genome_doubled,
fraction_cna = fcna_output$fraction_cna,
weighted_fraction_cna = frac_altered_w,
aneuploidy_score = length(altered_arms),
full_output = segs
)
}
#' @export
arm_level_changes_dmp = function(segs,
ploidy,
genome = c('hg19', 'hg18', 'hg38'),
algorithm = c('em', 'cncf'),
diplogr) {
genome_choice = get(match.arg(genome, c('hg19', 'hg18', 'hg38'), several.ok = FALSE))
algorithm = match.arg(algorithm, c('em', 'cncf'), several.ok = FALSE)
# Get WGD status
fcna_output = calculate_fraction_cna(segs, ploidy, genome, algorithm)
wgd = fcna_output$genome_doubled
sample_ploidy = ifelse(wgd, round(ploidy), 2)
sample_chrom_info = get_sample_genome(segs, genome_choice)
segs = parse_segs(segs, algorithm) %>%
left_join(., select(sample_chrom_info, chr, centromere), by = c('chrom' = 'chr'))
segs = segs %>%
rowwise() %>%
mutate(
arm = case_when(
start < centromere & end <= centromere ~ 'p',
start >= centromere ~ 'q',
TRUE ~ 'span'),
start = ifelse(arm == 'span', paste(c(start, centromere), collapse = ','), as.character(start)),
end = ifelse(arm == 'span', paste(c(centromere, end), collapse = ','), as.character(end))
) %>%
separate_rows(start, end, sep = ',') %>%
mutate(start = as.numeric(start),
end = as.numeric(end),
arm = case_when(
start < centromere & end <= centromere ~ paste0(chrom, 'p'),
start >= centromere ~ paste0(chrom, 'q')),
length = end - start,
cnlr.adj = cnlr.median - diplogr,
phet = nhet / num.mark)
arm_lengths = segs %>%
group_by(arm) %>%
summarize(arm_length = sum(length))
segs = left_join(segs, arm_lengths)
# Find distinct copy-number states
# Requires that >=50% exist at given copy-number state
acro_arms = c('13p', '14p', '15p', '21p', '22p') # acrocentric chromsomes
chrom_arms = setdiff(paste0(rep(unique(test_facets_output$segs$chrom), each = 2), c('p', 'q')), acro_arms)
segs = segs %>%
mutate(tcn = case_when(algorithm=="em" ~ tcn.em, TRUE ~ tcn),
lcn = case_when(algorithm=="em" ~ lcn.em, TRUE ~ lcn),
cf = case_when(algorithm=="em" ~ cf.em, TRUE ~ cf))
#annotate sample sex based on proportion of heterozygous SNPs on chrX
chrx = segs %>% filter(chrom==23) %>% group_by(chrom) %>% summarize(prop_het = max(phet))
sex = case_when(chrx$prop_het > 0.01 ~ "Female", TRUE ~"Male")
#correct NAs for high confidence CNLOH
#theoretical values cnloh
phis = seq(0, 0.9, by = 0.01)
cnlr = function(phi, m = 0, p = 1) {
log2((2 * (1 - phi) + (m + p) * phi) / 2)
}
valor = function(phi, m = 0, p = 1) {
abs(log((m * phi + 1 - phi) / (p * phi + 1 - phi)))
}
cnloh_line = data.frame(
phi = phis,
cnlr = sapply(
phis,
function(phi){cnlr(phi, m = 2, p = 0)}
),
valor = sapply(
phis,
function(phi){valor(phi, m = 2, p = 0)}
))
hetloss_line = data.frame(
phi = phis,
cnlr = sapply(
phis,
function(phi){cnlr(phi, m = 0, p = 1)}
),
valor = sapply(
phis,
function(phi){valor(phi, m = 0, p = 1)}
))
#calculate estimated cellular fraction with obvious CNLOH misses
segs = segs %>%
mutate(
#correct CNLOH to het loss for obvious errors
tcn = case_when(
tcn==2 & cnlr.adj < -0.5 & mafR>0.5 & lcn==0 & chrom !=23 ~ 1,
TRUE ~ as.numeric(tcn)),
cf = case_when(
is.na(lcn) & tcn ==2 & mafR>1 & nhet >= 5 & abs(cnlr.adj)<0.2 ~ cnloh_line[findInterval(abs(mafR), cnloh_line$valor),]$phi,
lcn==1 & tcn==2 & mafR>1 & abs(cnlr.adj)<0.2 & nhet>=5 ~ cnloh_line[findInterval(abs(mafR), cnloh_line$valor),]$phi,
tcn==2 & cnlr.adj < -0.5 & mafR>0.5 & lcn==0 & chrom !=23 ~ hetloss_line[findInterval(abs(mafR), hetloss_line$valor),]$phi,
TRUE ~ as.numeric(cf)),
lcn = case_when(
is.na(lcn) & tcn ==2 & mafR>1 & nhet>=5 & abs(cnlr.adj)<0.2 ~ 0,
lcn==1 & tcn ==2 & mafR>1 & abs(cnlr.adj) <0.2 & nhet>=5 ~ 0,
TRUE ~ as.numeric(lcn)),
arm_fraction = length / arm_length
)
#annotate cn_state
segs = segs %>%
mutate(
cn_state = mapvalues(paste(wgd, tcn-lcn, lcn, sep = ':'),
copy_number_states$map_string,
copy_number_states$call,
warn_missing = FALSE),
cn_state_num = mapvalues(paste(wgd, tcn-lcn, lcn, sep = ':'),
copy_number_states$map_string,
copy_number_states$numeric_call,
warn_missing = FALSE)
)
#fix for AMP with NA lcn
segs = segs %>%
mutate(
cn_state = case_when(tcn>5 & is.na(lcn) ~ "AMP", TRUE ~ cn_state),
cn_state_num = case_when(tcn>5 & is.na(lcn) ~ "2", TRUE ~ cn_state_num)
)
#correct X for patient sex
segs = segs %>%
mutate(cn_state = case_when(chrom==23 & sex=="Male" & tcn==1 ~ "DIPLOID",
chrom==23 & sex=="Male" & tcn > 1 ~ "GAIN",
cn_state=="FALSE:NA:NA" ~ "Indeterminate", #handle NA values for lcn
cn_state=="TRUE:NA:NA" ~ "Indeterminate",
TRUE ~cn_state),
cn_state_num = case_when(chrom==23 & sex=="Male" & tcn==1 ~ 0,
chrom==23 & sex=="Male" & tcn > 1 ~ 1,
cn_state=="Indeterminate" ~ 0,
TRUE ~ as.numeric(cn_state_num)),
lcn = case_when(chrom==23 & sex=="Male" ~ 0, TRUE ~ as.numeric(lcn))
)
segs.full = segs %>%
mutate(chrom = gsub('23', 'X', chrom),
arm = gsub('23', 'X', arm))
# val_arms = c("3q_Loss", "5q_Loss", "7q_Loss",
# "8p_Gain", "8q_Gain", "11q_Gain", "11q_Loss",
# "12p_Loss", "12p_Gain", "12q_Gain","13q_Loss",
# "17p_Loss", "19p_Gain", "19q_Gain", "20q_Loss")
segs.full = segs.full %>%
mutate(Class = case_when(cn_state=="DIPLOID" ~ 'Diploid', #exclude 1,0 males from being called loss
chrom==23 & sex=="Male" & cn_state =='GAIN' ~ 'Gain',
tcn==2 & lcn==0 ~ 'Loss', #CNLOH
# **gain/loss definitions do not account for WGD, e.g. 3,0 | 4,0 states**
tcn>2 ~ 'Gain',
tcn<2 ~ 'Loss',
cn_state=="Indeterminate" ~"Indeterminate",
TRUE ~ 'Indeterminate'),
arm_change = paste(arm, Class, sep = "_"))
maxarm = segs.full %>%
#filter(arm_change %in% val_arms) %>%
group_by(arm) %>%
summarize(max_arm_len= max(length)) %>%
mutate(key = paste(arm, max_arm_len, sep = "_"))
chrom_arms = gsub('23', 'X', chrom_arms)
segs.filt = segs.full %>%
mutate(key = paste(arm, length, sep = "_")) %>%
filter(key %in% maxarm$key, arm %in% chrom_arms) %>%
dplyr::select(arm,tcn, lcn, cf, arm_fraction, cn_state, Class)
list(
full_output = segs.full,
filtered_output = segs.filt
)
}
|
params <-
list(id = "410886026")
## ----setup, include=FALSE----------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----------------------------------------------------------------------------------------------------------------------
install.packages(c("googlesheets4"))
install.packages(c("tidyverse"))
install.packages("dplyr")
install.packages("lubridate")
library(googlesheets4)
library(tidyverse)
| /2020-09-30.R | no_license | kkqq123123123/109-1-inclass-practice | R | false | false | 503 | r | params <-
list(id = "410886026")
## ----setup, include=FALSE----------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----------------------------------------------------------------------------------------------------------------------
install.packages(c("googlesheets4"))
install.packages(c("tidyverse"))
install.packages("dplyr")
install.packages("lubridate")
library(googlesheets4)
library(tidyverse)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bb-lm.R, R/bb-point.R
\name{bb_lm}
\alias{bb_lm}
\alias{bb_point}
\title{layer}
\usage{
bb_lm(mapping = NULL, data = NULL, ...)
bb_point(mapping = NULL, data = NULL, position = "identity", ...)
}
\arguments{
\item{mapping}{aesthetic mapping}
\item{data}{layer data}
\item{...}{addition parameter for the layer}
\item{position}{one of 'identity' or 'jitter'}
}
\value{
A modified bbplot object
}
\description{
layer
}
\details{
bbplot layers
}
\author{
Guangchuang Yu
}
| /man/layer.Rd | no_license | nemochina2008/plotbb | R | false | true | 551 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bb-lm.R, R/bb-point.R
\name{bb_lm}
\alias{bb_lm}
\alias{bb_point}
\title{layer}
\usage{
bb_lm(mapping = NULL, data = NULL, ...)
bb_point(mapping = NULL, data = NULL, position = "identity", ...)
}
\arguments{
\item{mapping}{aesthetic mapping}
\item{data}{layer data}
\item{...}{addition parameter for the layer}
\item{position}{one of 'identity' or 'jitter'}
}
\value{
A modified bbplot object
}
\description{
layer
}
\details{
bbplot layers
}
\author{
Guangchuang Yu
}
|
## working directory and packages setting
wd <- 'C:/Users/mdlyz/Desktop/UCI HAR Dataset'
setwd(wd)
library(data.table)
## Merges the training data and the test sets to create one data set
y_test <- data.table(read.table('./test/y_test.txt', header=F))
x_test <- data.table(read.table('./test/X_test.txt', header=F))
subject_test <- data.table(read.table('./test/subject_test.txt', header=F))
y_train <- data.table(read.table('./train/y_train.txt', header=F))
x_train <- data.table(read.table('./train/X_train.txt', header=F))
subject_train <- data.table(read.table('./train/subject_train.txt', header=F))
features <- read.table('./features.txt')
all_subject <- rbind(subject_train,subject_test)
x <- rbind(x_train,x_test)
y <- rbind(y_train,y_test)
names(all_subject) <- 'subject'
names(x) <- as.character(features$V2)
names(y) <- 'activity'
combined_data <- cbind(x,y,all_subject)
## Extracts only the measurements on the mean and the standard deviation for each measurement
mean_std_select <- grep("mean\\(\\)|std\\(\\)",names(x))
all_select <- c(mean_std_select,562,563)
selected_data <- combined_data[,all_select,with=FALSE]
## Uses descriptive activity names to name the activities in the data set
activity_labels <- read.table('./activity_labels.txt')
names(activity_labels) <- c('activity','act_label')
merged_data <- merge(selected_data,activity_labels,by="activity",all=T)
new_order <- c(names(merged_data)[-1],'activity')
setcolorder(merged_data,new_order)
## Appropriately labels the data set with descriptive variable names
names(merged_data)<-gsub("Acc", "Accelerometer", names(merged_data))
names(merged_data)<-gsub("Gyro", "Gyroscope", names(merged_data))
names(merged_data)<-gsub("BodyBody", "Body", names(merged_data))
names(merged_data)<-gsub("Mag", "Magnitude", names(merged_data))
names(merged_data)<-gsub("^t", "Time", names(merged_data))
names(merged_data)<-gsub("^f", "Frequency", names(merged_data))
names(merged_data)<-gsub("tBody", "TimeBody", names(merged_data))
names(merged_data)<-gsub("-mean\\(\\)", "Mean", names(merged_data), ignore.case = TRUE)
names(merged_data)<-gsub("-std\\(\\)", "STD", names(merged_data), ignore.case = TRUE)
names(merged_data)<-gsub("-freq\\(\\)", "Frequency", names(merged_data), ignore.case = TRUE)
names(merged_data)<-gsub("angle", "Angle", names(merged_data))
names(merged_data)<-gsub("gravity", "Gravity", names(merged_data))
## From the data set in step 4 create a second, independent tidy data set
## with the average of each variable for each activity and each subject
avg_data <- aggregate(merged_data[,1:(ncol(merged_data)-3)],
by=list(subject=merged_data$subject,
activity=merged_data$activity),
mean)
write.table(avg_data,file='tidy_data.txt',row.names = FALSE)
| /run_analysis.R | no_license | posleo511/Coursera_c3_final_assignment | R | false | false | 2,859 | r | ## working directory and packages setting
wd <- 'C:/Users/mdlyz/Desktop/UCI HAR Dataset'
setwd(wd)
library(data.table)
## Merges the training data and the test sets to create one data set
y_test <- data.table(read.table('./test/y_test.txt', header=F))
x_test <- data.table(read.table('./test/X_test.txt', header=F))
subject_test <- data.table(read.table('./test/subject_test.txt', header=F))
y_train <- data.table(read.table('./train/y_train.txt', header=F))
x_train <- data.table(read.table('./train/X_train.txt', header=F))
subject_train <- data.table(read.table('./train/subject_train.txt', header=F))
features <- read.table('./features.txt')
all_subject <- rbind(subject_train,subject_test)
x <- rbind(x_train,x_test)
y <- rbind(y_train,y_test)
names(all_subject) <- 'subject'
names(x) <- as.character(features$V2)
names(y) <- 'activity'
combined_data <- cbind(x,y,all_subject)
## Extracts only the measurements on the mean and the standard deviation for each measurement
mean_std_select <- grep("mean\\(\\)|std\\(\\)",names(x))
all_select <- c(mean_std_select,562,563)
selected_data <- combined_data[,all_select,with=FALSE]
## Uses descriptive activity names to name the activities in the data set
activity_labels <- read.table('./activity_labels.txt')
names(activity_labels) <- c('activity','act_label')
merged_data <- merge(selected_data,activity_labels,by="activity",all=T)
new_order <- c(names(merged_data)[-1],'activity')
setcolorder(merged_data,new_order)
## Appropriately labels the data set with descriptive variable names
names(merged_data)<-gsub("Acc", "Accelerometer", names(merged_data))
names(merged_data)<-gsub("Gyro", "Gyroscope", names(merged_data))
names(merged_data)<-gsub("BodyBody", "Body", names(merged_data))
names(merged_data)<-gsub("Mag", "Magnitude", names(merged_data))
names(merged_data)<-gsub("^t", "Time", names(merged_data))
names(merged_data)<-gsub("^f", "Frequency", names(merged_data))
names(merged_data)<-gsub("tBody", "TimeBody", names(merged_data))
names(merged_data)<-gsub("-mean\\(\\)", "Mean", names(merged_data), ignore.case = TRUE)
names(merged_data)<-gsub("-std\\(\\)", "STD", names(merged_data), ignore.case = TRUE)
names(merged_data)<-gsub("-freq\\(\\)", "Frequency", names(merged_data), ignore.case = TRUE)
names(merged_data)<-gsub("angle", "Angle", names(merged_data))
names(merged_data)<-gsub("gravity", "Gravity", names(merged_data))
## From the data set in step 4 create a second, independent tidy data set
## with the average of each variable for each activity and each subject
avg_data <- aggregate(merged_data[,1:(ncol(merged_data)-3)],
by=list(subject=merged_data$subject,
activity=merged_data$activity),
mean)
write.table(avg_data,file='tidy_data.txt',row.names = FALSE)
|
# Test Methods
run_tests <- function(){
q_artist <- 'slayer'
artist_id <- 2683
album_id <- 10392458
track_id <- 13886643
# search_artist
tryCatch(expr=search_artist(q_artist),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=search_artist(q_artist,page_size = 2),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=search_artist(q_artist,page_size = 2,f_artist_id=artist_id),
error=function(e) {
print(e)
return(1)
})
#
# get_artist_albums
#
tryCatch(expr=get_artist_albums(artist_id),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=get_artist_albums(artist_id,page_size=2),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=get_artist_albums(artist_id,g_album_name=1),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=get_artist_albums(artist_id,s_release_date='desc'),
error=function(e) {
print(e)
return(1)
})
#
# get_artist
#
tryCatch(expr=get_artist(artist_id),
error=function(e) {
print(e)
return(1)
})
#
# get_artist_related
#
tryCatch(expr=get_artist_related(artist_id),
error=function(e) {
print(e)
return(1)
})
#
# get_album
#
tryCatch(expr=get_album(album_id),
error=function(e) {
print(e)
return(1)
})
#
# get_album_tracks
#
tryCatch(expr=get_album_tracks(album_id),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=get_album_tracks(album_id,f_has_lyrics=1,page=2),
error=function(e) {
print(e)
return(1)
})
#
# search_track
#
tryCatch(expr=search_track(q_track='Show No Mercy',q_artist='Slayer'),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=search_track(q_track='Show No Mercy',f_has_lyrics=1),
error=function(e) {
print(e)
return(1)
})
#
# get_track
#
tryCatch(expr=get_track(track_id),
error=function(e) {
print(e)
return(1)
})
#
# get_track_subtitle - ERROR
#
tryCatch(expr=get_track_subtitle(track_id),
error=function(e) {
print(e)
return(1)
})
}
| /R/testing.R | no_license | kraigrs/musixmatch | R | false | false | 2,686 | r | # Test Methods
run_tests <- function(){
q_artist <- 'slayer'
artist_id <- 2683
album_id <- 10392458
track_id <- 13886643
# search_artist
tryCatch(expr=search_artist(q_artist),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=search_artist(q_artist,page_size = 2),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=search_artist(q_artist,page_size = 2,f_artist_id=artist_id),
error=function(e) {
print(e)
return(1)
})
#
# get_artist_albums
#
tryCatch(expr=get_artist_albums(artist_id),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=get_artist_albums(artist_id,page_size=2),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=get_artist_albums(artist_id,g_album_name=1),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=get_artist_albums(artist_id,s_release_date='desc'),
error=function(e) {
print(e)
return(1)
})
#
# get_artist
#
tryCatch(expr=get_artist(artist_id),
error=function(e) {
print(e)
return(1)
})
#
# get_artist_related
#
tryCatch(expr=get_artist_related(artist_id),
error=function(e) {
print(e)
return(1)
})
#
# get_album
#
tryCatch(expr=get_album(album_id),
error=function(e) {
print(e)
return(1)
})
#
# get_album_tracks
#
tryCatch(expr=get_album_tracks(album_id),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=get_album_tracks(album_id,f_has_lyrics=1,page=2),
error=function(e) {
print(e)
return(1)
})
#
# search_track
#
tryCatch(expr=search_track(q_track='Show No Mercy',q_artist='Slayer'),
error=function(e) {
print(e)
return(1)
})
tryCatch(expr=search_track(q_track='Show No Mercy',f_has_lyrics=1),
error=function(e) {
print(e)
return(1)
})
#
# get_track
#
tryCatch(expr=get_track(track_id),
error=function(e) {
print(e)
return(1)
})
#
# get_track_subtitle - ERROR
#
tryCatch(expr=get_track_subtitle(track_id),
error=function(e) {
print(e)
return(1)
})
}
|
#' Plots vaf distribution of genes
#' @description Plots vaf distribution of genes as a boxplot.
#'
#' @param maf an \code{\link{MAF}} object generated by \code{\link{read.maf}}
#' @param vafCol manually specify column name for vafs. Default looks for column 't_vaf'
#' @param genes specify genes for which plots has to be generated
#' @param orderByMedian Orders genes by decreasing median VAF. Default TRUE
#' @param keepGeneOrder keep gene order. Default FALSE
#' @param top if \code{genes} is NULL plots top n number of genes. Defaults to 5.
#' @param flip if TRUE, flips axes. Default FALSE
#' @param showN if TRUE, includes number of observations
#' @param gene_fs font size for gene names. Default 0.8
#' @param fn Filename. If given saves plot as a output pdf. Default NULL.
#' @param axis_fs font size for axis. Default 0.8
#' @param width Width of plot to be saved. Default 4
#' @param height Height of plot to be saved. Default 5
#' @return Nothing.
#' @examples
#' laml.maf <- system.file("extdata", "tcga_laml.maf.gz", package = "maftools")
#' laml <- read.maf(maf = laml.maf)
#' plotVaf(maf = laml, vafCol = 'i_TumorVAF_WU')
#'
#' @export
plotVaf = function(maf, vafCol = NULL, genes = NULL, top = 10,
orderByMedian = TRUE, keepGeneOrder = FALSE, flip = FALSE, fn = NULL,
gene_fs = 0.8, axis_fs = 0.8, height = 5, width = 5, showN = TRUE){
if(is.null(genes)){
genes = as.character(getGeneSummary(x =maf)[1:top, Hugo_Symbol])
}
dat = subsetMaf(maf = maf, genes = genes, includeSyn = FALSE, mafObj = FALSE)
if(!'t_vaf' %in% colnames(dat)){
if(is.null(vafCol)){
if(all(c('t_ref_count', 't_alt_count') %in% colnames(dat))){
message("t_vaf field is missing, but found t_ref_count & t_alt_count columns. Estimating vaf..")
dat[,t_vaf := as.numeric(as.character(t_alt_count))/(as.numeric(as.character(t_ref_count)) + as.numeric(as.character(t_alt_count)))]
}else{
print(colnames(dat))
stop('t_vaf field is missing. Use vafCol to manually specify vaf column name.')
}
}else{
colnames(dat)[which(colnames(dat) == vafCol)] = 't_vaf'
}
}
#dat.genes = data.frame(dat[dat$Hugo_Symbol %in% genes])
#suppressMessages(datm <- melt(dat.genes[,c('Hugo_Symbol', 't_vaf')]))
dat.genes = dat[dat$Hugo_Symbol %in% genes]
datm <- data.table::melt(data = dat.genes[,.(Hugo_Symbol, t_vaf)], id.vars = 'Hugo_Symbol', measure.vars = 't_vaf')
#remove NA from vcf
datm = datm[!is.na(value)]
datm[,value := as.numeric(as.character(value))]
if(nrow(datm) == 0){
stop("Nothing to plot.")
}
#maximum vaf
if(max(datm$value, na.rm = TRUE) > 1){
datm$value = datm$value/100
}
if(keepGeneOrder){
geneOrder = genes
datm$Hugo_Symbol = factor(x = datm$Hugo_Symbol, levels = geneOrder)
}else if(orderByMedian){
geneOrder = datm[,median(value),Hugo_Symbol][order(V1, decreasing = TRUE)][,Hugo_Symbol]
datm$Hugo_Symbol = factor(x = datm$Hugo_Symbol, levels = geneOrder)
}
bcol = c(RColorBrewer::brewer.pal(n = 8, name = "Dark2"),
RColorBrewer::brewer.pal(n = 8, name = "Accent"))
if(length(genes) > length(bcol)){
bcol = sample(x = colors(distinct = TRUE), size = length(genes), replace = FALSE)
}
if(!is.null(fn)){
pdf(file = paste0(fn, ".pdf"), width = width, height = height, paper = "special", bg = "white")
}
if(flip){
par(mar = c(3, 4, 2, 2))
b = boxplot(value ~ Hugo_Symbol, data = datm, xaxt="n", boxwex=0.5, outline=FALSE, lty=1, lwd = 1.4, outwex=0,
staplewex=0, ylim = c(0, 1), axes = FALSE, border = bcol, horizontal = TRUE, ylab = NA)
axis(side = 1, at = seq(0, 1, 0.2), las = 1, font =1, lwd = 1.5, cex.axis = axis_fs)
axis(side = 2, at = 1:length(b$names), labels = b$names, tick = FALSE, las = 2, font = 3, line = -1, cex.axis = gene_fs)
if(showN){
axis(side = 4, at = 1:length(b$names), labels = b$n, font =1, tick = FALSE, line = -1, las = 2, cex.axis = gene_fs)
}
abline(v = seq(0, 1, 0.2), h = 1:length(b$names), col = grDevices::adjustcolor(col = "gray70", alpha.f = 0.25), lty = 2)
stripchart(value ~ Hugo_Symbol, vertical = FALSE, data = datm,
method = "jitter", add = TRUE, pch = 16, col = bcol, cex = 0.5)
}else{
par(mar = c(4, 3, 2, 1))
b = boxplot(value ~ Hugo_Symbol, data = datm, xaxt="n", boxwex=0.5, outline=FALSE, lty=1, lwd = 1.4, outwex=0,
staplewex=0, ylim = c(0, 1), axes = FALSE, border = bcol, xlab = NA)
axis(side = 1, at = 1:length(b$names), labels = b$names, tick = FALSE, las = 2, font = 3, line = -1, cex.axis = gene_fs)
axis(side = 2, at = seq(0, 1, 0.2), las = 2, cex.axis = axis_fs, lwd = 1.2, font.axis = 2, cex = 1.5, font =1)
if(showN){
axis(side = 3, at = 1:length(b$names), labels = b$n, font =1, tick = FALSE,
line = -1, cex.axis = gene_fs)
}
abline(h = seq(0, 1, 0.2), v = 1:length(b$names), col = grDevices::adjustcolor(col = "gray70", alpha.f = 0.5), lty = 2)
stripchart(value ~ Hugo_Symbol, vertical = TRUE, data = datm,
method = "jitter", add = TRUE, pch = 16, col = bcol, cex = 0.5)
}
if(!is.null(fn)){
dev.off()
}
}
| /R/plot_vaf.R | permissive | AxelitoMartin/maftools | R | false | false | 5,228 | r | #' Plots vaf distribution of genes
#' @description Plots vaf distribution of genes as a boxplot.
#'
#' @param maf an \code{\link{MAF}} object generated by \code{\link{read.maf}}
#' @param vafCol manually specify column name for vafs. Default looks for column 't_vaf'
#' @param genes specify genes for which plots has to be generated
#' @param orderByMedian Orders genes by decreasing median VAF. Default TRUE
#' @param keepGeneOrder keep gene order. Default FALSE
#' @param top if \code{genes} is NULL plots top n number of genes. Defaults to 5.
#' @param flip if TRUE, flips axes. Default FALSE
#' @param showN if TRUE, includes number of observations
#' @param gene_fs font size for gene names. Default 0.8
#' @param fn Filename. If given saves plot as a output pdf. Default NULL.
#' @param axis_fs font size for axis. Default 0.8
#' @param width Width of plot to be saved. Default 4
#' @param height Height of plot to be saved. Default 5
#' @return Nothing.
#' @examples
#' laml.maf <- system.file("extdata", "tcga_laml.maf.gz", package = "maftools")
#' laml <- read.maf(maf = laml.maf)
#' plotVaf(maf = laml, vafCol = 'i_TumorVAF_WU')
#'
#' @export
plotVaf = function(maf, vafCol = NULL, genes = NULL, top = 10,
orderByMedian = TRUE, keepGeneOrder = FALSE, flip = FALSE, fn = NULL,
gene_fs = 0.8, axis_fs = 0.8, height = 5, width = 5, showN = TRUE){
if(is.null(genes)){
genes = as.character(getGeneSummary(x =maf)[1:top, Hugo_Symbol])
}
dat = subsetMaf(maf = maf, genes = genes, includeSyn = FALSE, mafObj = FALSE)
if(!'t_vaf' %in% colnames(dat)){
if(is.null(vafCol)){
if(all(c('t_ref_count', 't_alt_count') %in% colnames(dat))){
message("t_vaf field is missing, but found t_ref_count & t_alt_count columns. Estimating vaf..")
dat[,t_vaf := as.numeric(as.character(t_alt_count))/(as.numeric(as.character(t_ref_count)) + as.numeric(as.character(t_alt_count)))]
}else{
print(colnames(dat))
stop('t_vaf field is missing. Use vafCol to manually specify vaf column name.')
}
}else{
colnames(dat)[which(colnames(dat) == vafCol)] = 't_vaf'
}
}
#dat.genes = data.frame(dat[dat$Hugo_Symbol %in% genes])
#suppressMessages(datm <- melt(dat.genes[,c('Hugo_Symbol', 't_vaf')]))
dat.genes = dat[dat$Hugo_Symbol %in% genes]
datm <- data.table::melt(data = dat.genes[,.(Hugo_Symbol, t_vaf)], id.vars = 'Hugo_Symbol', measure.vars = 't_vaf')
#remove NA from vcf
datm = datm[!is.na(value)]
datm[,value := as.numeric(as.character(value))]
if(nrow(datm) == 0){
stop("Nothing to plot.")
}
#maximum vaf
if(max(datm$value, na.rm = TRUE) > 1){
datm$value = datm$value/100
}
if(keepGeneOrder){
geneOrder = genes
datm$Hugo_Symbol = factor(x = datm$Hugo_Symbol, levels = geneOrder)
}else if(orderByMedian){
geneOrder = datm[,median(value),Hugo_Symbol][order(V1, decreasing = TRUE)][,Hugo_Symbol]
datm$Hugo_Symbol = factor(x = datm$Hugo_Symbol, levels = geneOrder)
}
bcol = c(RColorBrewer::brewer.pal(n = 8, name = "Dark2"),
RColorBrewer::brewer.pal(n = 8, name = "Accent"))
if(length(genes) > length(bcol)){
bcol = sample(x = colors(distinct = TRUE), size = length(genes), replace = FALSE)
}
if(!is.null(fn)){
pdf(file = paste0(fn, ".pdf"), width = width, height = height, paper = "special", bg = "white")
}
if(flip){
par(mar = c(3, 4, 2, 2))
b = boxplot(value ~ Hugo_Symbol, data = datm, xaxt="n", boxwex=0.5, outline=FALSE, lty=1, lwd = 1.4, outwex=0,
staplewex=0, ylim = c(0, 1), axes = FALSE, border = bcol, horizontal = TRUE, ylab = NA)
axis(side = 1, at = seq(0, 1, 0.2), las = 1, font =1, lwd = 1.5, cex.axis = axis_fs)
axis(side = 2, at = 1:length(b$names), labels = b$names, tick = FALSE, las = 2, font = 3, line = -1, cex.axis = gene_fs)
if(showN){
axis(side = 4, at = 1:length(b$names), labels = b$n, font =1, tick = FALSE, line = -1, las = 2, cex.axis = gene_fs)
}
abline(v = seq(0, 1, 0.2), h = 1:length(b$names), col = grDevices::adjustcolor(col = "gray70", alpha.f = 0.25), lty = 2)
stripchart(value ~ Hugo_Symbol, vertical = FALSE, data = datm,
method = "jitter", add = TRUE, pch = 16, col = bcol, cex = 0.5)
}else{
par(mar = c(4, 3, 2, 1))
b = boxplot(value ~ Hugo_Symbol, data = datm, xaxt="n", boxwex=0.5, outline=FALSE, lty=1, lwd = 1.4, outwex=0,
staplewex=0, ylim = c(0, 1), axes = FALSE, border = bcol, xlab = NA)
axis(side = 1, at = 1:length(b$names), labels = b$names, tick = FALSE, las = 2, font = 3, line = -1, cex.axis = gene_fs)
axis(side = 2, at = seq(0, 1, 0.2), las = 2, cex.axis = axis_fs, lwd = 1.2, font.axis = 2, cex = 1.5, font =1)
if(showN){
axis(side = 3, at = 1:length(b$names), labels = b$n, font =1, tick = FALSE,
line = -1, cex.axis = gene_fs)
}
abline(h = seq(0, 1, 0.2), v = 1:length(b$names), col = grDevices::adjustcolor(col = "gray70", alpha.f = 0.5), lty = 2)
stripchart(value ~ Hugo_Symbol, vertical = TRUE, data = datm,
method = "jitter", add = TRUE, pch = 16, col = bcol, cex = 0.5)
}
if(!is.null(fn)){
dev.off()
}
}
|
#####################################
### Bayesian Copula Deconvolution ###
#####################################
# Codes accompanying "Bayesian Copula Density Deconvolution for Zero-Inflated Data with Applications in Nutritional Epidemiology" by Sarkar, Pati, Mallick and Carroll.
# Codes written by Abhra Sarkar (abhra.sarkar@utexas.edu), last modified on Dec 15, 2019, in Austin, TX
# The current file is for univariate density deconvolution for variables with strictly continuously measured surrogates.
# The method uses mixtures of truncated-normals with shared atoms to model the density of interest,
# mixtures of moment-restricted normals to model the density of the measurement errors,
# and mixtures of B-spines to model the conditional variability of the measurement errors.
# See paper for additional details.
#############
### Input ###
#############
# While running from within the file 'Bayes_Copula_Decon_MVT.R' that implements the multivariate method, these arguments are read from the original file.
# The univariate method can also be independently implemented using the current file.
# ws <- strictly continuously measured surrogate values
# xs.lwr <- lower limit of the range of the variable of interest
# xs.upr <- upper limit of the range of the variable of interest
# mis <- no of surrogates for each subject, must be greater than or equal to 3
# z.xs.max <- number of mixture components allowed in the model for the density of interest
# z.us.max <- number of mixture components allowed in the model for the density of the measurement errors
# K.t <- number of B-spline knots for the variance functions modeling conditional variability of the measurement errors
# simsize <- total num of MCMC iterations
# burnin <- burnin for the MCMC iterations
# show_progress <- if TRUE, shows progress by printing every 100th iteartion number, MUST be set at FALSE while running in parrellel from within 'Bayes_Copula_Decon_MVT.R'
# plot_results <- if TRUE, plots the estimated density of interest, the estimated density of measurement errors, the estimated variance function etc., MUST be set at FALSE while running in parrellel from within 'Bayes_Copula_Decon_MVT.R'
##############
### Output ###
##############
# Output comprises a list of the following variables.
# While running from within the file 'Bayes_Copula_Decon_MVT.R' that implements the multivariate method, these variables are used as.
# knots <- knot-points for constructing the B-splines bases that model the conditional variability of the measurement errors
# thetas <- estimated coefficients of B-splines bases that model the conditional variability of the measurement errors
# xs <- estimated subject-specific values of the variable of interest
# us <- estimated subject and replicate-specific values of the measurement errors
# z.xs <- mixture component labels for the mixture model for the density of interest
# pi.xs <- mixture component probabilities for the mixture model for the density of interest
# params.xs <- mixture component-specific parameters for the mixture model for the density of interest
# z.us <- mixture component labels for the mixture model for the density of the measurement errors
# pi.us <- mixture component probabilities for the mixture model for the density of the measurement errors
# params.us <- mixture component-specific parameters for the mixture model for the density of the measurement errors
UNIV_DECON_REGULAR = function(ws, xs.lwr, xs.upr, mis, z.xs.max, z.us.max, K.t, simsize, burnin, show_progress=TRUE, plot_results=TRUE)
{
#################################
### Priors and Initial Values ###
#################################
### Initialization and prior of xs and us
n = length(mis)
N = sum(mis)
inds = rep(1:n,times=mis)
inds1 = inds2 = numeric(n)
inds1[1] = 1
inds2[1] = inds1[1]+mis[1]-1
for(ii in 2:n)
{
inds1[ii] = inds1[ii-1]+mis[ii-1]
inds2[ii] = inds1[ii] + mis[ii]-1
}
wbars = tapply(ws,inds,"mean")
xs = as.vector(wbars)
xs[xs <= xs.lwr] = xs.lwr+0.1
xs[xs >= xs.upr] = xs.upr-0.1
current.xs = start.xs = xs
us = ws - rep(xs,times=mis)
range.start.xs = diff(range(xs))
s2is = as.vector(tapply(ws,inds,var))
xs.grid = seq(xs.lwr,xs.upr,length=500)
xs.grid.length = length(xs.grid)
alpha.xs = 1
# Normal
mu0.xs = mean(xs)
sigmasq0.xs = var(xs)
# Inverse-Gamma (Independnt from mu - independence is important)
a.sigmasq.xs = 1
b.sigmasq.xs = 1
pi.xs = rep(1/z.xs.max,z.xs.max)
clusters.xs = kmeans(xs,z.xs.max)
mu.xs = clusters.xs$center
z.xs = clusters.xs$cluster
sigmasq.xs = rep(var(xs)/5,z.xs.max)
d.ordinates.xs = matrix(0,nrow=n,ncol=z.xs.max)
### Prior and initialization of s2t and thetas
alpha.t = 100
beta.t = 1
s2t = 0.01
P.t = P.mat(K.t+1) # penalty matrix
knots.t = seq(xs.lwr,xs.upr,length=K.t)
optim_results = optim(rep(1,K.t+1), fr, NULL, xs, mis, knots.t, P.t, s2t, us, method = "BFGS")
thetas = current.thetas = start.thetas = optim_results$par
prop.sig.thetas = make.positive.definite(prop.sig.thetas.fn(thetas,xs,mis,us,s2t,K.t,P.t,knots.t,n))
var.grid = seq(xs.lwr,xs.upr,length=100)
vars = current.vars = t(B.basis(xs,knots.t)%*%exp(current.thetas))
B.basis.var.grid.knots.t = B.basis(var.grid,knots.t)
B.basis.store = B.basis(xs.grid,knots.t)
close.ind = rep(0,n)
### Prior and initialization for mixture
simsize.mh.us = 10
z.us = rep(1,N)
alpha.us = 0.1
params.us = matrix(c(0.5,0,1,1),nrow=z.us.max,ncol=4,byrow=T) # unique values
pi.us = rep(1/z.us.max,z.us.max)
d.ordinates.us = matrix(0,nrow=N,ncol=z.us.max)
#########################
### Tuning Parameters ###
#########################
sig.tune.thetas.1 = 0
sig.tune.thetas.2 = 0.1
###############################
### Storage for MCMC Output ###
###############################
es.grid = seq(-3,3,length=500)
density.xs.est = numeric(xs.grid.length)
var.es = numeric(1)
var.est = numeric(length(var.grid))
density.es.est = numeric(length(es.grid))
prob.consumption.est = numeric(xs.grid.length)
proposed.xs = current.xs = xs
proposed.us = current.us = us
proposed.vars = current.vars = vars
current.likelihood = proposed.likelihood = matrix(1,2,n)
temp.proposed.us.likelihood = temp.current.us.likelihood = matrix(1,2,N)
thetas.est = numeric(length(thetas))
thetas.MCMC = matrix(0,nrow=simsize,ncol=length(thetas))
##################
### Start MCMC ###
##################
for (iii in 1:simsize)
{
if((show_progress==TRUE)&&(iii%%10==0))
print(iii)
### Updating z.xs
for(kk in 1:z.xs.max)
d.ordinates.xs[,kk] = dtnorm(xs,mu.xs[kk],sqrt(sigmasq.xs[kk]),lower=xs.lwr,upper=xs.upr)
d.ordinates.xs[is.nan(d.ordinates.xs)] = 0
d.ordinates.xs[is.infinite(d.ordinates.xs)] = 0
for(ii in 1:n)
z.xs[ii] = sample(z.xs.max,1,prob=pi.xs*d.ordinates.xs[ii,])
### Updating cluster probabilities
n.kk.xs = tabulate(z.xs,nbins=z.xs.max)
pi.xs = rdirichlet(1,alpha.xs/z.xs.max+n.kk.xs)
### Updating mu.xs, sigmasq.xs
xs.trans = mu.xs[z.xs]+sqrt(sigmasq.xs[z.xs])*qnorm((pnorm((xs-mu.xs[z.xs])/sqrt(sigmasq.xs[z.xs]))-pnorm((xs.lwr-mu.xs[z.xs])/sqrt(sigmasq.xs[z.xs])))/(pnorm((xs.upr-mu.xs[z.xs])/sqrt(sigmasq.xs[z.xs]))-pnorm((xs.lwr-mu.xs[z.xs])/sqrt(sigmasq.xs[z.xs]))))
xs.trans[xs.trans < xs.lwr - 10] = xs.lwr - 10
xs.trans[xs.trans > xs.upr + 10] = xs.upr + 10
for(kk in 1:z.xs.max)
{
temp = which(z.xs==kk)
xspool = xs.trans[temp]
sigmasq.temp = 1/(n.kk.xs[kk]/sigmasq.xs[kk] + 1/sigmasq0.xs)
mu.temp = (sum(xspool)/sigmasq.xs[kk] + mu0.xs/sigmasq0.xs) * sigmasq.temp
mu.xs[kk] = rnorm(1,mu.temp,sqrt(sigmasq.temp))
post.a.sigmasq.xs = a.sigmasq.xs + length(xspool)/2
post.b.sigmasq.xs = b.sigmasq.xs + sum((xspool-mu.xs[kk])^2)/2
sigmasq.xs[kk] = 1/rgamma(1,shape=post.a.sigmasq.xs,rate=post.b.sigmasq.xs)
}
### Updating xs (and us)
proposed.xs = rtnorm(n,mean=current.xs,sd=0.1,lower=xs.lwr,upper=xs.upr)
TempMat = abs(matrix(rep(proposed.xs,xs.grid.length),n,xs.grid.length)-matrix(rep(xs.grid,n),n,xs.grid.length,byrow=T))
close.ind = apply(TempMat,1,which.min)
proposed.vars = B.basis.store[close.ind,]%*%exp(thetas)
proposed.prior = dtnorm(proposed.xs,mu.xs[z.xs],sqrt(sigmasq.xs[z.xs]),lower=xs.lwr,upper=xs.upr)
current.prior = dtnorm(current.xs,mu.xs[z.xs],sqrt(sigmasq.xs[z.xs]),lower=xs.lwr,upper=xs.upr)
proposed.us = ws-rep(proposed.xs,times=mis)
k.us = max(z.us)
temp.current.us.likelihood = fu_mixnorm(current.us,mean=0,sd=rep(sqrt(current.vars),times=mis),pi.us[1:k.us],params.us[1:k.us,])
temp.proposed.us.likelihood = fu_mixnorm(proposed.us,mean=0,sd=rep(sqrt(proposed.vars),times=mis),pi.us[1:k.us],params.us[1:k.us,])
current.likelihood = tapply(temp.current.us.likelihood,inds,"prod")
proposed.likelihood = tapply(temp.proposed.us.likelihood,inds,"prod")
mh.ratio = (proposed.prior * proposed.likelihood * dtnorm(current.xs,mean=proposed.xs,sd=0.1,lower=xs.lwr,upper=xs.upr)) / (current.prior * current.likelihood * dtnorm(proposed.xs,mean=current.xs,sd=0.1,lower=xs.lwr,upper=xs.upr))
mh.ratio[is.nan(mh.ratio)] = 0
u = runif(n)
inds.to.replace = (1:n)[u<mh.ratio]
xs[inds.to.replace] = current.xs[inds.to.replace] = proposed.xs[inds.to.replace]
vars[inds.to.replace] = current.vars[inds.to.replace] = proposed.vars[inds.to.replace]
us = current.us = ws - rep(xs,times=mis)
### Updating thetas
proposed.thetas = t(rmvnorm(1,current.thetas,(diag(rep(sig.tune.thetas.1,(K.t+1)))+sig.tune.thetas.2*prop.sig.thetas)))
TempMat = abs(matrix(rep(xs,xs.grid.length),n,xs.grid.length)-matrix(rep(xs.grid,n),n,xs.grid.length,byrow=T))
close.ind = apply(TempMat,1,which.min)
proposed.vars = B.basis.store[close.ind,]%*%exp(proposed.thetas)
current.log.prior = - t(current.thetas)%*%P.t%*%current.thetas/(2*s2t)
proposed.log.prior = - t(proposed.thetas)%*%P.t%*%proposed.thetas/(2*s2t)
temp.current.likelihood = d.restricted.mix.norm(us,mean=rep(0,times=N),sd=rep(sqrt(current.vars),times=mis),params.us[z.us,])
temp.proposed.likelihood = d.restricted.mix.norm(us,mean=rep(0,times=N),sd=rep(sqrt(proposed.vars),times=mis),params.us[z.us,])
current.log.likelihood = sum(log(temp.current.likelihood))
proposed.log.likelihood = sum(log(temp.proposed.likelihood))
log.mh.ratio = proposed.log.prior + proposed.log.likelihood - current.log.likelihood - current.log.prior
if(is.nan(log.mh.ratio)) log.mh.ratio = -Inf
if(log(runif(1))<log.mh.ratio)
{
thetas = current.thetas = proposed.thetas
vars = current.vars = proposed.vars
}
### Updating s2t
s2t = 1/rgamma(1,shape=alpha.t+(K.t+1)/2,rate=beta.t+t(thetas)%*%P.t%*%thetas)
### Updating z.us
for(ii in 1:N)
{
prob.us = pi.us * d.restricted.mix.norm(us[ii],mean=0,sd=sqrt(vars[inds[ii]]), params.us)
if(sum(prob.us)==0) {prob.us=rep(1/z.us.max,z.us.max)}
z.us[ii] = sample(1:z.us.max,1,TRUE,prob.us) # New z.us[ii] drawn
}
### Updating cluster probabilities
n.kk.us = tabulate(z.us,nbins=z.us.max)
pi.us = rdirichlet(1,alpha.us/z.us.max+n.kk.us)
### Updating params.us
k.us = max(z.us) # Number of clusters
if(iii>2000) simsize.mh.us = 1
for(rr in 1:simsize.mh.us)
{
for(kk in 1:k.us)
{
temp = which(z.us==kk)
uspool = us[temp]
varspool = vars[inds[temp]]
proposed.params.us = r.tnorm.proposal.params.restricted.mix.norm(params.us[kk,])
temp.proposed.log.likelihood = log(d.restricted.mix.norm(uspool,mean=0,sd=sqrt(varspool),proposed.params.us))
temp.current.log.likelihood = log(d.restricted.mix.norm(uspool,mean=0,sd=sqrt(varspool),params.us[kk,]))
temp.proposed.log.likelihood[is.infinite(temp.proposed.log.likelihood)] = 0
temp.current.log.likelihood[is.infinite(temp.current.log.likelihood)] = 0
proposed.log.likelihood = sum(temp.proposed.log.likelihood)
current.log.likelihood = sum(temp.current.log.likelihood)
log.acc.prob = proposed.log.likelihood-current.log.likelihood
if(log(runif(1))<log.acc.prob)
params.us[kk,] = proposed.params.us
}
}
if(k.us<z.us.max)
for(kk in (k.us+1):z.us.max)
params.us[kk,] = r.proposal.params.restricted.mix.norm(1,1,3,3,3,3,3)
var.es = var.e.fn(pi.us[1:k.us],params.us[1:k.us,])
params.us[1:k.us,2] = params.us[1:k.us,2]/sqrt(var.es)
params.us[1:k.us,3:4] = params.us[1:k.us,3:4]/var.es
thetas.MCMC[iii,] = thetas
if(iii>burnin)
{
for(kk in 1:z.xs.max)
density.xs.est = density.xs.est + pi.xs[kk]*dtnorm(xs.grid,mu.xs[kk],sqrt(sigmasq.xs[kk]),lower=xs.lwr,upper=xs.upr)
k.us = max(z.us)
var.es = var.e.fn(pi.us[1:k.us],params.us[1:k.us,])
density.es.est = density.es.est + d.scaled.restricted.mix.norm(es.grid,0,1,pi.us[1:k.us],params.us[1:k.us,])
var.est = var.est + B.basis.var.grid.knots.t %*% exp(thetas) * var.es
thetas.est = thetas.est + log(var.es) + thetas
}
}
density.xs.est = density.xs.est/(simsize-burnin)
density.es.est = density.es.est/(simsize-burnin)
var.est = var.est/(simsize-burnin)
thetas.est = thetas.est/(simsize-burnin)
thetas.final = thetas.est
xs.final = xs
var.final = sqrt(B.basis(xs.final,knots.t)%*%exp(thetas.final))
us.final = (ws-rep(xs.final,times=mis))
if(plot_results==TRUE)
{
dev.new()
par(mfrow=c(2,2))
plot(xs.grid,density.xs.est,xlab="x",ylab="f(x)",type="l",lty=1,col="green3",lwd=3)
plot(es.grid,density.es.est,xlab="e",ylab="f(e)",type="l",lty=1,col="green3",lwd=3)
points(es.grid,dnorm(es.grid),type="l",lty=1)
plot(xs,s2is,pch="*",xlab="x",ylab="v(x)")
points(var.grid,var.est,type="l",lty=1,col="blue",lwd=2)
points(var.grid,B.basis.var.grid.knots.t%*%exp(thetas.final),type="l",lty=1,col="green3",lwd=2)
par(mfrow=c(1,1))
}
params.xs = cbind(mu.xs,sigmasq.xs)
return(list(knots=knots.t, thetas=thetas.final,
xs=xs.final, us=us.final,
z.xs=z.xs, pi.xs=pi.xs, params.xs=params.xs,
z.us=z.us, pi.us=pi.us, params.us=params.us))
}
| /Bayes_Copula_Decon_Univariate_Regular.R | no_license | jasa-acs/Bayesian-Copula-Density-Deconvolution-for-Zero-Inflated-Data-in-Nutritional-Epidemiology | R | false | false | 14,409 | r |
#####################################
### Bayesian Copula Deconvolution ###
#####################################
# Codes accompanying "Bayesian Copula Density Deconvolution for Zero-Inflated Data with Applications in Nutritional Epidemiology" by Sarkar, Pati, Mallick and Carroll.
# Codes written by Abhra Sarkar (abhra.sarkar@utexas.edu), last modified on Dec 15, 2019, in Austin, TX
# The current file is for univariate density deconvolution for variables with strictly continuously measured surrogates.
# The method uses mixtures of truncated-normals with shared atoms to model the density of interest,
# mixtures of moment-restricted normals to model the density of the measurement errors,
# and mixtures of B-spines to model the conditional variability of the measurement errors.
# See paper for additional details.
#############
### Input ###
#############
# While running from within the file 'Bayes_Copula_Decon_MVT.R' that implements the multivariate method, these arguments are read from the original file.
# The univariate method can also be independently implemented using the current file.
# ws <- strictly continuously measured surrogate values
# xs.lwr <- lower limit of the range of the variable of interest
# xs.upr <- upper limit of the range of the variable of interest
# mis <- no of surrogates for each subject, must be greater than or equal to 3
# z.xs.max <- number of mixture components allowed in the model for the density of interest
# z.us.max <- number of mixture components allowed in the model for the density of the measurement errors
# K.t <- number of B-spline knots for the variance functions modeling conditional variability of the measurement errors
# simsize <- total num of MCMC iterations
# burnin <- burnin for the MCMC iterations
# show_progress <- if TRUE, shows progress by printing every 100th iteartion number, MUST be set at FALSE while running in parrellel from within 'Bayes_Copula_Decon_MVT.R'
# plot_results <- if TRUE, plots the estimated density of interest, the estimated density of measurement errors, the estimated variance function etc., MUST be set at FALSE while running in parrellel from within 'Bayes_Copula_Decon_MVT.R'
##############
### Output ###
##############
# Output comprises a list of the following variables.
# While running from within the file 'Bayes_Copula_Decon_MVT.R' that implements the multivariate method, these variables are used as.
# knots <- knot-points for constructing the B-splines bases that model the conditional variability of the measurement errors
# thetas <- estimated coefficients of B-splines bases that model the conditional variability of the measurement errors
# xs <- estimated subject-specific values of the variable of interest
# us <- estimated subject and replicate-specific values of the measurement errors
# z.xs <- mixture component labels for the mixture model for the density of interest
# pi.xs <- mixture component probabilities for the mixture model for the density of interest
# params.xs <- mixture component-specific parameters for the mixture model for the density of interest
# z.us <- mixture component labels for the mixture model for the density of the measurement errors
# pi.us <- mixture component probabilities for the mixture model for the density of the measurement errors
# params.us <- mixture component-specific parameters for the mixture model for the density of the measurement errors
UNIV_DECON_REGULAR = function(ws, xs.lwr, xs.upr, mis, z.xs.max, z.us.max, K.t, simsize, burnin, show_progress=TRUE, plot_results=TRUE)
{
#################################
### Priors and Initial Values ###
#################################
### Initialization and prior of xs and us
n = length(mis)
N = sum(mis)
inds = rep(1:n,times=mis)
inds1 = inds2 = numeric(n)
inds1[1] = 1
inds2[1] = inds1[1]+mis[1]-1
for(ii in 2:n)
{
inds1[ii] = inds1[ii-1]+mis[ii-1]
inds2[ii] = inds1[ii] + mis[ii]-1
}
wbars = tapply(ws,inds,"mean")
xs = as.vector(wbars)
xs[xs <= xs.lwr] = xs.lwr+0.1
xs[xs >= xs.upr] = xs.upr-0.1
current.xs = start.xs = xs
us = ws - rep(xs,times=mis)
range.start.xs = diff(range(xs))
s2is = as.vector(tapply(ws,inds,var))
xs.grid = seq(xs.lwr,xs.upr,length=500)
xs.grid.length = length(xs.grid)
alpha.xs = 1
# Normal
mu0.xs = mean(xs)
sigmasq0.xs = var(xs)
# Inverse-Gamma (Independnt from mu - independence is important)
a.sigmasq.xs = 1
b.sigmasq.xs = 1
pi.xs = rep(1/z.xs.max,z.xs.max)
clusters.xs = kmeans(xs,z.xs.max)
mu.xs = clusters.xs$center
z.xs = clusters.xs$cluster
sigmasq.xs = rep(var(xs)/5,z.xs.max)
d.ordinates.xs = matrix(0,nrow=n,ncol=z.xs.max)
### Prior and initialization of s2t and thetas
alpha.t = 100
beta.t = 1
s2t = 0.01
P.t = P.mat(K.t+1) # penalty matrix
knots.t = seq(xs.lwr,xs.upr,length=K.t)
optim_results = optim(rep(1,K.t+1), fr, NULL, xs, mis, knots.t, P.t, s2t, us, method = "BFGS")
thetas = current.thetas = start.thetas = optim_results$par
prop.sig.thetas = make.positive.definite(prop.sig.thetas.fn(thetas,xs,mis,us,s2t,K.t,P.t,knots.t,n))
var.grid = seq(xs.lwr,xs.upr,length=100)
vars = current.vars = t(B.basis(xs,knots.t)%*%exp(current.thetas))
B.basis.var.grid.knots.t = B.basis(var.grid,knots.t)
B.basis.store = B.basis(xs.grid,knots.t)
close.ind = rep(0,n)
### Prior and initialization for mixture
simsize.mh.us = 10
z.us = rep(1,N)
alpha.us = 0.1
params.us = matrix(c(0.5,0,1,1),nrow=z.us.max,ncol=4,byrow=T) # unique values
pi.us = rep(1/z.us.max,z.us.max)
d.ordinates.us = matrix(0,nrow=N,ncol=z.us.max)
#########################
### Tuning Parameters ###
#########################
sig.tune.thetas.1 = 0
sig.tune.thetas.2 = 0.1
###############################
### Storage for MCMC Output ###
###############################
es.grid = seq(-3,3,length=500)
density.xs.est = numeric(xs.grid.length)
var.es = numeric(1)
var.est = numeric(length(var.grid))
density.es.est = numeric(length(es.grid))
prob.consumption.est = numeric(xs.grid.length)
proposed.xs = current.xs = xs
proposed.us = current.us = us
proposed.vars = current.vars = vars
current.likelihood = proposed.likelihood = matrix(1,2,n)
temp.proposed.us.likelihood = temp.current.us.likelihood = matrix(1,2,N)
thetas.est = numeric(length(thetas))
thetas.MCMC = matrix(0,nrow=simsize,ncol=length(thetas))
##################
### Start MCMC ###
##################
for (iii in 1:simsize)
{
if((show_progress==TRUE)&&(iii%%10==0))
print(iii)
### Updating z.xs
for(kk in 1:z.xs.max)
d.ordinates.xs[,kk] = dtnorm(xs,mu.xs[kk],sqrt(sigmasq.xs[kk]),lower=xs.lwr,upper=xs.upr)
d.ordinates.xs[is.nan(d.ordinates.xs)] = 0
d.ordinates.xs[is.infinite(d.ordinates.xs)] = 0
for(ii in 1:n)
z.xs[ii] = sample(z.xs.max,1,prob=pi.xs*d.ordinates.xs[ii,])
### Updating cluster probabilities
n.kk.xs = tabulate(z.xs,nbins=z.xs.max)
pi.xs = rdirichlet(1,alpha.xs/z.xs.max+n.kk.xs)
### Updating mu.xs, sigmasq.xs
xs.trans = mu.xs[z.xs]+sqrt(sigmasq.xs[z.xs])*qnorm((pnorm((xs-mu.xs[z.xs])/sqrt(sigmasq.xs[z.xs]))-pnorm((xs.lwr-mu.xs[z.xs])/sqrt(sigmasq.xs[z.xs])))/(pnorm((xs.upr-mu.xs[z.xs])/sqrt(sigmasq.xs[z.xs]))-pnorm((xs.lwr-mu.xs[z.xs])/sqrt(sigmasq.xs[z.xs]))))
xs.trans[xs.trans < xs.lwr - 10] = xs.lwr - 10
xs.trans[xs.trans > xs.upr + 10] = xs.upr + 10
for(kk in 1:z.xs.max)
{
temp = which(z.xs==kk)
xspool = xs.trans[temp]
sigmasq.temp = 1/(n.kk.xs[kk]/sigmasq.xs[kk] + 1/sigmasq0.xs)
mu.temp = (sum(xspool)/sigmasq.xs[kk] + mu0.xs/sigmasq0.xs) * sigmasq.temp
mu.xs[kk] = rnorm(1,mu.temp,sqrt(sigmasq.temp))
post.a.sigmasq.xs = a.sigmasq.xs + length(xspool)/2
post.b.sigmasq.xs = b.sigmasq.xs + sum((xspool-mu.xs[kk])^2)/2
sigmasq.xs[kk] = 1/rgamma(1,shape=post.a.sigmasq.xs,rate=post.b.sigmasq.xs)
}
### Updating xs (and us)
proposed.xs = rtnorm(n,mean=current.xs,sd=0.1,lower=xs.lwr,upper=xs.upr)
TempMat = abs(matrix(rep(proposed.xs,xs.grid.length),n,xs.grid.length)-matrix(rep(xs.grid,n),n,xs.grid.length,byrow=T))
close.ind = apply(TempMat,1,which.min)
proposed.vars = B.basis.store[close.ind,]%*%exp(thetas)
proposed.prior = dtnorm(proposed.xs,mu.xs[z.xs],sqrt(sigmasq.xs[z.xs]),lower=xs.lwr,upper=xs.upr)
current.prior = dtnorm(current.xs,mu.xs[z.xs],sqrt(sigmasq.xs[z.xs]),lower=xs.lwr,upper=xs.upr)
proposed.us = ws-rep(proposed.xs,times=mis)
k.us = max(z.us)
temp.current.us.likelihood = fu_mixnorm(current.us,mean=0,sd=rep(sqrt(current.vars),times=mis),pi.us[1:k.us],params.us[1:k.us,])
temp.proposed.us.likelihood = fu_mixnorm(proposed.us,mean=0,sd=rep(sqrt(proposed.vars),times=mis),pi.us[1:k.us],params.us[1:k.us,])
current.likelihood = tapply(temp.current.us.likelihood,inds,"prod")
proposed.likelihood = tapply(temp.proposed.us.likelihood,inds,"prod")
mh.ratio = (proposed.prior * proposed.likelihood * dtnorm(current.xs,mean=proposed.xs,sd=0.1,lower=xs.lwr,upper=xs.upr)) / (current.prior * current.likelihood * dtnorm(proposed.xs,mean=current.xs,sd=0.1,lower=xs.lwr,upper=xs.upr))
mh.ratio[is.nan(mh.ratio)] = 0
u = runif(n)
inds.to.replace = (1:n)[u<mh.ratio]
xs[inds.to.replace] = current.xs[inds.to.replace] = proposed.xs[inds.to.replace]
vars[inds.to.replace] = current.vars[inds.to.replace] = proposed.vars[inds.to.replace]
us = current.us = ws - rep(xs,times=mis)
### Updating thetas
proposed.thetas = t(rmvnorm(1,current.thetas,(diag(rep(sig.tune.thetas.1,(K.t+1)))+sig.tune.thetas.2*prop.sig.thetas)))
TempMat = abs(matrix(rep(xs,xs.grid.length),n,xs.grid.length)-matrix(rep(xs.grid,n),n,xs.grid.length,byrow=T))
close.ind = apply(TempMat,1,which.min)
proposed.vars = B.basis.store[close.ind,]%*%exp(proposed.thetas)
current.log.prior = - t(current.thetas)%*%P.t%*%current.thetas/(2*s2t)
proposed.log.prior = - t(proposed.thetas)%*%P.t%*%proposed.thetas/(2*s2t)
temp.current.likelihood = d.restricted.mix.norm(us,mean=rep(0,times=N),sd=rep(sqrt(current.vars),times=mis),params.us[z.us,])
temp.proposed.likelihood = d.restricted.mix.norm(us,mean=rep(0,times=N),sd=rep(sqrt(proposed.vars),times=mis),params.us[z.us,])
current.log.likelihood = sum(log(temp.current.likelihood))
proposed.log.likelihood = sum(log(temp.proposed.likelihood))
log.mh.ratio = proposed.log.prior + proposed.log.likelihood - current.log.likelihood - current.log.prior
if(is.nan(log.mh.ratio)) log.mh.ratio = -Inf
if(log(runif(1))<log.mh.ratio)
{
thetas = current.thetas = proposed.thetas
vars = current.vars = proposed.vars
}
### Updating s2t
s2t = 1/rgamma(1,shape=alpha.t+(K.t+1)/2,rate=beta.t+t(thetas)%*%P.t%*%thetas)
### Updating z.us
for(ii in 1:N)
{
prob.us = pi.us * d.restricted.mix.norm(us[ii],mean=0,sd=sqrt(vars[inds[ii]]), params.us)
if(sum(prob.us)==0) {prob.us=rep(1/z.us.max,z.us.max)}
z.us[ii] = sample(1:z.us.max,1,TRUE,prob.us) # New z.us[ii] drawn
}
### Updating cluster probabilities
n.kk.us = tabulate(z.us,nbins=z.us.max)
pi.us = rdirichlet(1,alpha.us/z.us.max+n.kk.us)
### Updating params.us
k.us = max(z.us) # Number of clusters
if(iii>2000) simsize.mh.us = 1
for(rr in 1:simsize.mh.us)
{
for(kk in 1:k.us)
{
temp = which(z.us==kk)
uspool = us[temp]
varspool = vars[inds[temp]]
proposed.params.us = r.tnorm.proposal.params.restricted.mix.norm(params.us[kk,])
temp.proposed.log.likelihood = log(d.restricted.mix.norm(uspool,mean=0,sd=sqrt(varspool),proposed.params.us))
temp.current.log.likelihood = log(d.restricted.mix.norm(uspool,mean=0,sd=sqrt(varspool),params.us[kk,]))
temp.proposed.log.likelihood[is.infinite(temp.proposed.log.likelihood)] = 0
temp.current.log.likelihood[is.infinite(temp.current.log.likelihood)] = 0
proposed.log.likelihood = sum(temp.proposed.log.likelihood)
current.log.likelihood = sum(temp.current.log.likelihood)
log.acc.prob = proposed.log.likelihood-current.log.likelihood
if(log(runif(1))<log.acc.prob)
params.us[kk,] = proposed.params.us
}
}
if(k.us<z.us.max)
for(kk in (k.us+1):z.us.max)
params.us[kk,] = r.proposal.params.restricted.mix.norm(1,1,3,3,3,3,3)
var.es = var.e.fn(pi.us[1:k.us],params.us[1:k.us,])
params.us[1:k.us,2] = params.us[1:k.us,2]/sqrt(var.es)
params.us[1:k.us,3:4] = params.us[1:k.us,3:4]/var.es
thetas.MCMC[iii,] = thetas
if(iii>burnin)
{
for(kk in 1:z.xs.max)
density.xs.est = density.xs.est + pi.xs[kk]*dtnorm(xs.grid,mu.xs[kk],sqrt(sigmasq.xs[kk]),lower=xs.lwr,upper=xs.upr)
k.us = max(z.us)
var.es = var.e.fn(pi.us[1:k.us],params.us[1:k.us,])
density.es.est = density.es.est + d.scaled.restricted.mix.norm(es.grid,0,1,pi.us[1:k.us],params.us[1:k.us,])
var.est = var.est + B.basis.var.grid.knots.t %*% exp(thetas) * var.es
thetas.est = thetas.est + log(var.es) + thetas
}
}
density.xs.est = density.xs.est/(simsize-burnin)
density.es.est = density.es.est/(simsize-burnin)
var.est = var.est/(simsize-burnin)
thetas.est = thetas.est/(simsize-burnin)
thetas.final = thetas.est
xs.final = xs
var.final = sqrt(B.basis(xs.final,knots.t)%*%exp(thetas.final))
us.final = (ws-rep(xs.final,times=mis))
if(plot_results==TRUE)
{
dev.new()
par(mfrow=c(2,2))
plot(xs.grid,density.xs.est,xlab="x",ylab="f(x)",type="l",lty=1,col="green3",lwd=3)
plot(es.grid,density.es.est,xlab="e",ylab="f(e)",type="l",lty=1,col="green3",lwd=3)
points(es.grid,dnorm(es.grid),type="l",lty=1)
plot(xs,s2is,pch="*",xlab="x",ylab="v(x)")
points(var.grid,var.est,type="l",lty=1,col="blue",lwd=2)
points(var.grid,B.basis.var.grid.knots.t%*%exp(thetas.final),type="l",lty=1,col="green3",lwd=2)
par(mfrow=c(1,1))
}
params.xs = cbind(mu.xs,sigmasq.xs)
return(list(knots=knots.t, thetas=thetas.final,
xs=xs.final, us=us.final,
z.xs=z.xs, pi.xs=pi.xs, params.xs=params.xs,
z.us=z.us, pi.us=pi.us, params.us=params.us))
}
|
\name{fastnureal}
\alias{fastnureal}
\title{Fast Algorithm for Spectral Estimation of Irregularly Sampled Data over a Logarithmic
Frequency Range.}
\description{ The function \code{fastnureal} computes a spectrum of irregularly sampled data. The
resulting coefficients are represented as complex numbers, which stems
from the fact that the accelerated final summation is of complex
nature. The computation uses a divide-and-conquer scheme and allows
dramatic speedups compared to \code{\link{nureal}}.
}
\usage{
fastnureal(X, Y, omegamax, ncoeff, noctave)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{ \code{X} is the ordered sequence of abscissa values. }
\item{Y}{ \code{Y} is the sequence of corresponding ordinate values. }
\item{omegamax}{ \code{omegamax} is the top circular frequency for which the spectrum is to be computed. }
\item{ncoeff}{ \code{ncoeff} is the number of coefficients evenly distributed per octave to be calculated. }
\item{noctave}{ \code{noctave} is the number of octaves to be calculated. }
}
\value{An array of spectral coefficients in complex representation.}
\references{ http://basic-research.zkm.de }
\author{ Adolf Mathias <dolfi@zkm.de> }
\note{}
\seealso{\code{\link{nureal}}}
\examples{data(deut); fastnureal(deut[[2]],deut[[4]],1e-4,16,4);
## The function is currently defined as
function(X, Y, xlength, omegamax, ncoeff, noctave)
.C("fastnureal",
as.double(X), as.double(Y), as.integer(min(length(X),length(Y))), as.double(X[[1]],X[[length(X)]]),
rp = complex(noctave*ncoeff), as.integer(ncoeff), as.integer(noctave))$rp
}
\keyword{ts}
| /man/fastnureal.Rd | no_license | nickmckay/nuspectral | R | false | false | 1,639 | rd | \name{fastnureal}
\alias{fastnureal}
\title{Fast Algorithm for Spectral Estimation of Irregularly Sampled Data over a Logarithmic
Frequency Range.}
\description{ The function \code{fastnureal} computes a spectrum of irregularly sampled data. The
resulting coefficients are represented as complex numbers, which stems
from the fact that the accelerated final summation is of complex
nature. The computation uses a divide-and-conquer scheme and allows
dramatic speedups compared to \code{\link{nureal}}.
}
\usage{
fastnureal(X, Y, omegamax, ncoeff, noctave)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{ \code{X} is the ordered sequence of abscissa values. }
\item{Y}{ \code{Y} is the sequence of corresponding ordinate values. }
\item{omegamax}{ \code{omegamax} is the top circular frequency for which the spectrum is to be computed. }
\item{ncoeff}{ \code{ncoeff} is the number of coefficients evenly distributed per octave to be calculated. }
\item{noctave}{ \code{noctave} is the number of octaves to be calculated. }
}
\value{An array of spectral coefficients in complex representation.}
\references{ http://basic-research.zkm.de }
\author{ Adolf Mathias <dolfi@zkm.de> }
\note{}
\seealso{\code{\link{nureal}}}
\examples{data(deut); fastnureal(deut[[2]],deut[[4]],1e-4,16,4);
## The function is currently defined as
function(X, Y, xlength, omegamax, ncoeff, noctave)
.C("fastnureal",
as.double(X), as.double(Y), as.integer(min(length(X),length(Y))), as.double(X[[1]],X[[length(X)]]),
rp = complex(noctave*ncoeff), as.integer(ncoeff), as.integer(noctave))$rp
}
\keyword{ts}
|
# Dean Attali
# November 21 2014
# This is the server portion of a shiny app shows cancer data in the United
# States
source("helpers.R") # have the helper functions avaiable
library(shiny)
library(magrittr)
library(plyr)
library(dplyr)
library(tidyr)
library(ggplot2)
# Get the raw data
cDatRaw <- getData()
# Get the list of colours to use for plotting
plotCols <- getPlotCols()
shinyServer(function(input, output, session) {
# =========== BUILDING THE INPUTS ===========
# Create select box input for choosing cancer types
output$cancerTypeUi <- renderUI({
selectizeInput("cancerType", "",
levels(cDatRaw$cancerType),
selected = NULL, multiple = TRUE,
options = list(placeholder = "Select cancer types"))
})
# Create select box input to choose variables to show
output$variablesUi <- renderUI({
selectizeInput("variablesSelect", "Variables to show:",
unique(as.character(cDatRaw$stat)),
selected = unique(cDatRaw$stat), multiple = TRUE,
options = list(placeholder = "Select variables to show"))
})
# Show the years selected (because of the bugs in the slider mentioned below)
output$yearText <- renderText({
if (is.null(input$years)) {
return(formatYearsText(range(cDatRaw$year)))
}
formatYearsText(input$years)
})
# Create slider for selecting year range
# NOTE: there are some minor bugs with sliderInput rendered in renderUI
# https://github.com/rstudio/shiny/issues/587
output$yearUi <- renderUI({
sliderInput("years",
label = "",
min = min(cDatRaw$year), max = max(cDatRaw$year),
value = range(cDatRaw$year),
step = 1,
format = "####")
})
# ============== MANIPULATE THE DATA ================
# The dataset to show/plot, which is the raw data after filtering based on
# the user inputs
cDat <- reactive({
# Add dependency on the update button (only update when button is clicked)
input$updateBtn
# If the app isn't fully loaded yet, just return the raw data
if (!dataValues$appLoaded) {
return(cDatRaw)
}
data <- cDatRaw
# Add all the filters to the data based on the user inputs
# wrap in an isolate() so that the data won't update every time an input
# is changed
isolate({
# Filter years
data %<>%
filter(year >= input$years[1] & year <= input$years[2])
# Filter what variables to show
if (!is.null(input$variablesSelect)) {
data %<>%
filter(stat %in% input$variablesSelect)
}
# Filter cancer types
if (input$subsetType == "specific" & !is.null(input$cancerType)) {
data %<>%
filter(cancerType %in% input$cancerType)
}
# See if the user wants to show data per cancer type or all combined
if (!input$showIndividual) {
data %<>%
group_by(year, stat) %>%
summarise(value =
ifelse(stat[1] != "mortalityRate",
sum(value),
mean(value))) %>%
ungroup %>%
data.frame
}
})
data
})
# The data to show in a table, which is essentially the same data as above
# with all the filters, but formatted differently:
# - Format the numbers to look better in a table
# - Change the data to wide/long format (the filtered data above is long)
cDatTable <- reactive({
data <- cDat()
# In numeric columns show 2 digits past the decimal and don't show
# decimal if the number is a whole integer
data %<>%
mutate(value = formatC(data$value, format = "fg", digits = 2))
# Change the data to wide format if the user wants it
if (input$tableViewForm == "wide") {
data %<>%
spread(stat, value)
}
data
})
# ============= TAB TO SHOW DATA IN TABLE ===========
# Show the data in a table
output$dataTable <- renderTable(
{
cDatTable()
},
include.rownames = FALSE
)
# Allow user to download the data, simply save as csv
output$downloadData <- downloadHandler(
filename = function() {
"cancerData.csv"
},
content = function(file) {
write.table(x = cDatTable(),
file = file,
quote = FALSE, sep = ",", row.names = FALSE)
}
)
# ============= TAB TO PLOT DATA ===========
# Function to build the plot object
buildPlot <- reactive({
# Basic ggplot object
p <-
ggplot(cDat()) +
aes(x = as.factor(year), y = value)
# If showing individual cancer types, group each type together, otherwise
# just connect all the dots as one group
isolate(
if (input$showIndividual) {
p <- p + aes(group = cancerType, col = cancerType)
} else {
p <- p + aes(group = 1)
}
)
# Facet per variable, add points and lines, and make the graph pretty
p <- p +
facet_wrap(~stat, scales = "free_y", ncol = 2) +
geom_point() +
geom_line(show_guide = FALSE) +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
scale_color_manual(values = plotCols) +
theme(legend.position = "bottom") +
guides(color = guide_legend(title = "",
ncol = 4,
override.aes = list(size = 4))) +
xlab("Year") + ylab("") +
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank())
p
})
# Show the plot, use the width/height that javascript calculated
output$dataPlot <-
renderPlot(
{
buildPlot()
},
height = function(){ input$plotDim },
width = function(){ input$plotDim },
units = "px",
res = 100
)
# Allow user to download the plot
output$downloadPlot <- downloadHandler(
filename = function() {
"cancerDataPlot.pdf"
},
content = function(file) {
pdf(file = file,
width = 12,
height = 12)
print(buildPlot())
dev.off()
}
)
# ========== LOADING THE APP ==========
# We need to have a quasi-variable flag to indicate when the app is loaded
dataValues <- reactiveValues(
appLoaded = FALSE
)
# Wait for the years input to be rendered as a proxy to determine when the app
# is loaded. Once loaded, call the javascript funtion to fix the plot area
# (see www/helper-script.js for more information)
observe({
if (dataValues$appLoaded) {
return(NULL)
}
if(!is.null(input$years)) {
dataValues$appLoaded <- TRUE
session$sendCustomMessage(type = "equalizePlotHeight",
message = list(target = "dataPlot",
by = "resultsTab"))
}
})
# Show form content and hide loading message
session$sendCustomMessage(type = "hide",
message = list(id = "loadingContent"))
session$sendCustomMessage(type = "show",
message = list(id = "allContent"))
})
| /hw/hw11_shiny-app/cancer-data/server.R | no_license | abhatia2014/UBC-STAT545 | R | false | false | 6,658 | r | # Dean Attali
# November 21 2014
# This is the server portion of a shiny app shows cancer data in the United
# States
source("helpers.R") # have the helper functions avaiable
library(shiny)
library(magrittr)
library(plyr)
library(dplyr)
library(tidyr)
library(ggplot2)
# Get the raw data
cDatRaw <- getData()
# Get the list of colours to use for plotting
plotCols <- getPlotCols()
shinyServer(function(input, output, session) {
# =========== BUILDING THE INPUTS ===========
# Create select box input for choosing cancer types
output$cancerTypeUi <- renderUI({
selectizeInput("cancerType", "",
levels(cDatRaw$cancerType),
selected = NULL, multiple = TRUE,
options = list(placeholder = "Select cancer types"))
})
# Create select box input to choose variables to show
output$variablesUi <- renderUI({
selectizeInput("variablesSelect", "Variables to show:",
unique(as.character(cDatRaw$stat)),
selected = unique(cDatRaw$stat), multiple = TRUE,
options = list(placeholder = "Select variables to show"))
})
# Show the years selected (because of the bugs in the slider mentioned below)
output$yearText <- renderText({
if (is.null(input$years)) {
return(formatYearsText(range(cDatRaw$year)))
}
formatYearsText(input$years)
})
# Create slider for selecting year range
# NOTE: there are some minor bugs with sliderInput rendered in renderUI
# https://github.com/rstudio/shiny/issues/587
output$yearUi <- renderUI({
sliderInput("years",
label = "",
min = min(cDatRaw$year), max = max(cDatRaw$year),
value = range(cDatRaw$year),
step = 1,
format = "####")
})
# ============== MANIPULATE THE DATA ================
# The dataset to show/plot, which is the raw data after filtering based on
# the user inputs
cDat <- reactive({
# Add dependency on the update button (only update when button is clicked)
input$updateBtn
# If the app isn't fully loaded yet, just return the raw data
if (!dataValues$appLoaded) {
return(cDatRaw)
}
data <- cDatRaw
# Add all the filters to the data based on the user inputs
# wrap in an isolate() so that the data won't update every time an input
# is changed
isolate({
# Filter years
data %<>%
filter(year >= input$years[1] & year <= input$years[2])
# Filter what variables to show
if (!is.null(input$variablesSelect)) {
data %<>%
filter(stat %in% input$variablesSelect)
}
# Filter cancer types
if (input$subsetType == "specific" & !is.null(input$cancerType)) {
data %<>%
filter(cancerType %in% input$cancerType)
}
# See if the user wants to show data per cancer type or all combined
if (!input$showIndividual) {
data %<>%
group_by(year, stat) %>%
summarise(value =
ifelse(stat[1] != "mortalityRate",
sum(value),
mean(value))) %>%
ungroup %>%
data.frame
}
})
data
})
# The data to show in a table, which is essentially the same data as above
# with all the filters, but formatted differently:
# - Format the numbers to look better in a table
# - Change the data to wide/long format (the filtered data above is long)
cDatTable <- reactive({
data <- cDat()
# In numeric columns show 2 digits past the decimal and don't show
# decimal if the number is a whole integer
data %<>%
mutate(value = formatC(data$value, format = "fg", digits = 2))
# Change the data to wide format if the user wants it
if (input$tableViewForm == "wide") {
data %<>%
spread(stat, value)
}
data
})
# ============= TAB TO SHOW DATA IN TABLE ===========
# Show the data in a table
output$dataTable <- renderTable(
{
cDatTable()
},
include.rownames = FALSE
)
# Allow user to download the data, simply save as csv
output$downloadData <- downloadHandler(
filename = function() {
"cancerData.csv"
},
content = function(file) {
write.table(x = cDatTable(),
file = file,
quote = FALSE, sep = ",", row.names = FALSE)
}
)
# ============= TAB TO PLOT DATA ===========
# Function to build the plot object
buildPlot <- reactive({
# Basic ggplot object
p <-
ggplot(cDat()) +
aes(x = as.factor(year), y = value)
# If showing individual cancer types, group each type together, otherwise
# just connect all the dots as one group
isolate(
if (input$showIndividual) {
p <- p + aes(group = cancerType, col = cancerType)
} else {
p <- p + aes(group = 1)
}
)
# Facet per variable, add points and lines, and make the graph pretty
p <- p +
facet_wrap(~stat, scales = "free_y", ncol = 2) +
geom_point() +
geom_line(show_guide = FALSE) +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
scale_color_manual(values = plotCols) +
theme(legend.position = "bottom") +
guides(color = guide_legend(title = "",
ncol = 4,
override.aes = list(size = 4))) +
xlab("Year") + ylab("") +
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank())
p
})
# Show the plot, use the width/height that javascript calculated
output$dataPlot <-
renderPlot(
{
buildPlot()
},
height = function(){ input$plotDim },
width = function(){ input$plotDim },
units = "px",
res = 100
)
# Allow user to download the plot
output$downloadPlot <- downloadHandler(
filename = function() {
"cancerDataPlot.pdf"
},
content = function(file) {
pdf(file = file,
width = 12,
height = 12)
print(buildPlot())
dev.off()
}
)
# ========== LOADING THE APP ==========
# We need to have a quasi-variable flag to indicate when the app is loaded
dataValues <- reactiveValues(
appLoaded = FALSE
)
# Wait for the years input to be rendered as a proxy to determine when the app
# is loaded. Once loaded, call the javascript funtion to fix the plot area
# (see www/helper-script.js for more information)
observe({
if (dataValues$appLoaded) {
return(NULL)
}
if(!is.null(input$years)) {
dataValues$appLoaded <- TRUE
session$sendCustomMessage(type = "equalizePlotHeight",
message = list(target = "dataPlot",
by = "resultsTab"))
}
})
# Show form content and hide loading message
session$sendCustomMessage(type = "hide",
message = list(id = "loadingContent"))
session$sendCustomMessage(type = "show",
message = list(id = "allContent"))
})
|
# nombre del archivo a mandar por correo:
# nombre_apellido_inferencia_profesor.R
# ejemplo:
# juan_perez_inferencia_beltran.R
# No olvide ejecutar las líneas 6 y 11 antes de empezar el resto del trabajo
library(readxl)
# 1. Use la función read_excel para cargar los datos que se encuentran en el archivo excel
misdatos <- read_excel("datos_e.xlsx")
# 2. Para las variables "pib_per_capita" y "esperanza_de_vida" compute lo siguiente
# 2.1 la media cada una (4 pts)
# 2.2 la desviación estandar de cada una (4 pts)
# 2.3 la cantidad de observaciones (n) de cada una (4 pts)
#RESPUESTAS
#2.1
mean(misdatos$pib_per_capita)
mean(misdatos$esperanza_de_vida)
#2.2
sd(misdatos$pib_per_capita)
sd(misdatos$esperanza_de_vida)
#2.3
dim.data.frame(misdatos$pib_per_capita)
dim.data.frame(misdatos$esperanza_de_vida)
# 3. Grafique los histogramas de estas tres variables:
# "pib_per_capita" (2 pto), "esperanza_de_vida" (2 pto) y el logaritmo natural de la variable
# "población" (4 pts).
# Puede usar cualquier función y paquete de R que grafique histogramas
#RESPUESTAS
hist(misdatos$pib_per_capita)
hist(misdatos$esperanza_de_vida)
hist(log(misdatos$poblacion))
| /2019_2/sol1/sol1_estadistica_R_mayer/elisa_magna_estadistica_mayer.R | no_license | ricardomayerb/ico8305 | R | false | false | 1,173 | r | # nombre del archivo a mandar por correo:
# nombre_apellido_inferencia_profesor.R
# ejemplo:
# juan_perez_inferencia_beltran.R
# No olvide ejecutar las líneas 6 y 11 antes de empezar el resto del trabajo
library(readxl)
# 1. Use la función read_excel para cargar los datos que se encuentran en el archivo excel
misdatos <- read_excel("datos_e.xlsx")
# 2. Para las variables "pib_per_capita" y "esperanza_de_vida" compute lo siguiente
# 2.1 la media cada una (4 pts)
# 2.2 la desviación estandar de cada una (4 pts)
# 2.3 la cantidad de observaciones (n) de cada una (4 pts)
#RESPUESTAS
#2.1
mean(misdatos$pib_per_capita)
mean(misdatos$esperanza_de_vida)
#2.2
sd(misdatos$pib_per_capita)
sd(misdatos$esperanza_de_vida)
#2.3
dim.data.frame(misdatos$pib_per_capita)
dim.data.frame(misdatos$esperanza_de_vida)
# 3. Grafique los histogramas de estas tres variables:
# "pib_per_capita" (2 pto), "esperanza_de_vida" (2 pto) y el logaritmo natural de la variable
# "población" (4 pts).
# Puede usar cualquier función y paquete de R que grafique histogramas
#RESPUESTAS
hist(misdatos$pib_per_capita)
hist(misdatos$esperanza_de_vida)
hist(log(misdatos$poblacion))
|
files <- matrix(c(
'nan-random-t5000-model-b10-k0.9-20130528-0703', 'coherence-20130531-0456.txt', 'eval-20130601-1256.txt',
'nan-random-t5000-model-b100-k0.9-20130528-0907', 'coherence-20130529-2149.txt', 'eval-20130530-0258.txt',
'nan-random-t5000-model-b1000-k0.9-20130528-0808', 'coherence-20130529-1044.txt', 'eval-20130529-1229.txt',
'nan-random-t5000-model-b50-k0.9-20130528-0907', 'coherence-20130529-2150.txt', 'eval-20130530-0551.txt',
'nan-random-t5000-model-b500-k0.5-20130528-0654', 'coherence-20130529-0657.txt', 'eval-20130529-0840.txt',
'nan-random-t5000-model-b500-k0.6-20130528-0656', 'coherence-20130529-0709.txt', 'eval-20130529-0901.txt',
'nan-random-t5000-model-b500-k0.7-20130528-0656', 'coherence-20130529-0755.txt', 'eval-20130529-0959.txt',
'nan-random-t5000-model-b500-k0.8-20130528-0703', 'coherence-20130529-0803.txt', 'eval-20130529-1013.txt',
'nan-random-t5000-model-b500-k0.9-20130529-0546', 'coherence-20130530-1952.txt', 'eval-20130530-2209.txt',
'nan-random-t5000-model-b500-k1.0-20130528-0703', 'coherence-20130529-1559.txt', 'eval-20130529-1807.txt',
'nyt-random-t5000-model-b10-k0.9-20130528-0627', 'coherence-20130605-0831.txt', 'eval-20130601-1646.txt',
'nyt-random-t5000-model-b100-k0.9-20130528-0648', 'coherence-20130531-1706.txt', 'eval-20130601-0218.txt',
'nyt-random-t5000-model-b1000-k0.9-20130528-0653', 'coherence-20130530-2115.txt', 'eval-20130530-2324.txt',
'nyt-random-t5000-model-b50-k0.9-20130528-0640', 'coherence-20130530-1948.txt', 'eval-20130531-1256.txt',
'nyt-random-t5000-model-b500-k0.5-20130528-0558', 'coherence-20130529-2150.txt', 'eval-20130530-0022.txt',
'nyt-random-t5000-model-b500-k0.6-20130528-0559', 'coherence-20130529-2152.txt', 'eval-20130530-0030.txt',
'nyt-random-t5000-model-b500-k0.7-20130528-0611', 'coherence-20130530-0723.txt', 'eval-20130530-1001.txt',
'nyt-random-t5000-model-b500-k0.8-20130528-0614', 'coherence-20130530-0723.txt', 'eval-20130530-1025.txt',
'nyt-random-t5000-model-b500-k0.9-20130529-0546', 'coherence-20130531-0455.txt', 'eval-20130531-0756.txt',
'nyt-random-t5000-model-b500-k1.0-20130528-0621', 'coherence-20130531-1702.txt', 'eval-20130531-2002.txt'
), 20, 3, byrow=T)
readEvalFile <- function(dir, file) {
f <- readLines(paste('/Users/jonathan/Desktop/data', dir, file, sep='/'))
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
readCoherenceFile <- function(dir, file) {
f <- readLines(paste('/Users/jonathan/Desktop/data', dir, file, sep='/'))
f <- f[c(1, 3:302)]
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
createPdf <- function(dir, data.eval, data.coh) {
pdf(paste('/Users/jonathan/Desktop/data', dir, "chart.pdf", sep='/'))
par(mfrow=c(2,2), las=1, pin=c(2,2), oma=c(0, 0, 2 ,0))
plot(data.eval$total.docs, data.eval$per.word, xaxt='n', ylab='likelihood', xlab='docs', type='l', main="Per-word log likelihood")
maxx <- round(max(data.eval$total.docs), -5)
aty = c(0, maxx/2, maxx)
axis(1, at=aty, labels=formatC(aty, format="d"))
box()
plot(sort(data.coh$coherence, T), axes=F, ylab='coherence', xlab='topic', type='l', main='Topic Coherence')
axis(2)
box()
plot(sort(data.coh$weight, T), axes=F, ylab='weight', xlab='topic', type='l', main='Topic Weights')
axis(2)
box()
plot(data.coh$weight, data.coh$coherence, axes=F, ylab='coherence', xlab='weight', main='Coherence vs. Weight')
abline(lm(data.coh$coherence~data.coh$weight), lty=2)
axis(2)
box()
title(main=dir, outer=T)
dev.off()
}
makeCharts <- function(ff) {
data.eval <- readEvalFile(ff[1], ff[3])
data.coh <- readCoherenceFile(ff[1], ff[2])
createPdf(ff[1], data.eval, data.coh)
}
apply(files, 1, makeCharts); | /src/main/r/plot-hdpa-data.R | no_license | jesterhazy/hdpa | R | false | false | 3,816 | r | files <- matrix(c(
'nan-random-t5000-model-b10-k0.9-20130528-0703', 'coherence-20130531-0456.txt', 'eval-20130601-1256.txt',
'nan-random-t5000-model-b100-k0.9-20130528-0907', 'coherence-20130529-2149.txt', 'eval-20130530-0258.txt',
'nan-random-t5000-model-b1000-k0.9-20130528-0808', 'coherence-20130529-1044.txt', 'eval-20130529-1229.txt',
'nan-random-t5000-model-b50-k0.9-20130528-0907', 'coherence-20130529-2150.txt', 'eval-20130530-0551.txt',
'nan-random-t5000-model-b500-k0.5-20130528-0654', 'coherence-20130529-0657.txt', 'eval-20130529-0840.txt',
'nan-random-t5000-model-b500-k0.6-20130528-0656', 'coherence-20130529-0709.txt', 'eval-20130529-0901.txt',
'nan-random-t5000-model-b500-k0.7-20130528-0656', 'coherence-20130529-0755.txt', 'eval-20130529-0959.txt',
'nan-random-t5000-model-b500-k0.8-20130528-0703', 'coherence-20130529-0803.txt', 'eval-20130529-1013.txt',
'nan-random-t5000-model-b500-k0.9-20130529-0546', 'coherence-20130530-1952.txt', 'eval-20130530-2209.txt',
'nan-random-t5000-model-b500-k1.0-20130528-0703', 'coherence-20130529-1559.txt', 'eval-20130529-1807.txt',
'nyt-random-t5000-model-b10-k0.9-20130528-0627', 'coherence-20130605-0831.txt', 'eval-20130601-1646.txt',
'nyt-random-t5000-model-b100-k0.9-20130528-0648', 'coherence-20130531-1706.txt', 'eval-20130601-0218.txt',
'nyt-random-t5000-model-b1000-k0.9-20130528-0653', 'coherence-20130530-2115.txt', 'eval-20130530-2324.txt',
'nyt-random-t5000-model-b50-k0.9-20130528-0640', 'coherence-20130530-1948.txt', 'eval-20130531-1256.txt',
'nyt-random-t5000-model-b500-k0.5-20130528-0558', 'coherence-20130529-2150.txt', 'eval-20130530-0022.txt',
'nyt-random-t5000-model-b500-k0.6-20130528-0559', 'coherence-20130529-2152.txt', 'eval-20130530-0030.txt',
'nyt-random-t5000-model-b500-k0.7-20130528-0611', 'coherence-20130530-0723.txt', 'eval-20130530-1001.txt',
'nyt-random-t5000-model-b500-k0.8-20130528-0614', 'coherence-20130530-0723.txt', 'eval-20130530-1025.txt',
'nyt-random-t5000-model-b500-k0.9-20130529-0546', 'coherence-20130531-0455.txt', 'eval-20130531-0756.txt',
'nyt-random-t5000-model-b500-k1.0-20130528-0621', 'coherence-20130531-1702.txt', 'eval-20130531-2002.txt'
), 20, 3, byrow=T)
readEvalFile <- function(dir, file) {
f <- readLines(paste('/Users/jonathan/Desktop/data', dir, file, sep='/'))
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
readCoherenceFile <- function(dir, file) {
f <- readLines(paste('/Users/jonathan/Desktop/data', dir, file, sep='/'))
f <- f[c(1, 3:302)]
f <- gsub("^ *(.*) *$", "\\1", f)
f <- gsub(" +", "\t", f)
tc <- textConnection(f)
d = read.table(tc, header=TRUE, sep="\t")
return (d)
}
createPdf <- function(dir, data.eval, data.coh) {
pdf(paste('/Users/jonathan/Desktop/data', dir, "chart.pdf", sep='/'))
par(mfrow=c(2,2), las=1, pin=c(2,2), oma=c(0, 0, 2 ,0))
plot(data.eval$total.docs, data.eval$per.word, xaxt='n', ylab='likelihood', xlab='docs', type='l', main="Per-word log likelihood")
maxx <- round(max(data.eval$total.docs), -5)
aty = c(0, maxx/2, maxx)
axis(1, at=aty, labels=formatC(aty, format="d"))
box()
plot(sort(data.coh$coherence, T), axes=F, ylab='coherence', xlab='topic', type='l', main='Topic Coherence')
axis(2)
box()
plot(sort(data.coh$weight, T), axes=F, ylab='weight', xlab='topic', type='l', main='Topic Weights')
axis(2)
box()
plot(data.coh$weight, data.coh$coherence, axes=F, ylab='coherence', xlab='weight', main='Coherence vs. Weight')
abline(lm(data.coh$coherence~data.coh$weight), lty=2)
axis(2)
box()
title(main=dir, outer=T)
dev.off()
}
makeCharts <- function(ff) {
data.eval <- readEvalFile(ff[1], ff[3])
data.coh <- readCoherenceFile(ff[1], ff[2])
createPdf(ff[1], data.eval, data.coh)
}
apply(files, 1, makeCharts); |
testlist <- list(testX = c(-3.15282414831747e-172, 2.1657079772533e+121, -4.28180735645116e-215, 3.57880847663814e-219, 8.57112108438856e+243, -2.21286571415676e-53, -8.84274383342004e+106, -4.44218759342859e-266, -1.09369605054127e+143, -1.88529048712039e-22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136), .Dim = c(5L, 5L )))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) | /dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609866593-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 1,130 | r | testlist <- list(testX = c(-3.15282414831747e-172, 2.1657079772533e+121, -4.28180735645116e-215, 3.57880847663814e-219, 8.57112108438856e+243, -2.21286571415676e-53, -8.84274383342004e+106, -4.44218759342859e-266, -1.09369605054127e+143, -1.88529048712039e-22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136), .Dim = c(5L, 5L )))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) |
#week3.R
#q1
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(fileUrl, destfile = "./data.csv", method="curl")
dateDownloaded <- date()
dat <- read.csv("data.csv")
head(dat)
df <- tbl_df(dat)
head(df)
# Create a logical vector that identifies the households on greater than 10 acres
# who sold more than $10,000 worth of agriculture products. Assign that logical
# vector to the variable agricultureLogical. Apply the which() function like this
# to identify the rows of the data frame where the logical vector is TRUE.
# which(agricultureLogical)
# What are the first 3 values that result?
agricultureLogical <- dat$ACR == 3 & dat$AGS == 6
head(which(agricultureLogical), 3)
# q2
library(jpeg)
download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg'
, 'jeff.jpg'
, mode='wb' )
picture <- jpeg::readJPEG('jeff.jpg'
, native=TRUE)
quantile(picture, probs = c(0.3, 0.8) )
#q3
library("data.table")
FGDP <- data.table::fread('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv'
, skip=4
, nrows = 190
, select = c(1, 2, 4, 5)
, col.names=c("CountryCode", "Rank", "Economy", "Total")
)
FEDSTATS_Country <- data.table::fread('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv'
)
mergedDT <- merge(FGDP, FEDSTATS_Country, by = 'CountryCode')
nrow(mergedDT)
mergedDT[order(-Rank)][13,.(Economy)]
#q4
mergedDT[`Income Group` == "High income: OECD"
, lapply(.SD, mean)
, .SDcols = c("Rank")
, by = "Income Group"]
mergedDT[`Income Group` == "High income: nonOECD"
, lapply(.SD, mean)
, .SDcols = c("Rank")
, by = "Income Group"]
# q5
library('dplyr')
breaks <- quantile(mergedDT[, Rank], probs = seq(0, 1, 0.2), na.rm = TRUE)
mergedDT$quantileGDP <- cut(mergedDT[, Rank], breaks = breaks)
mergedDT[`Income Group` == "Lower middle income", .N, by = c("Income Group", "quantileGDP")]
| /getcleandata/week3/week3.R | no_license | theoneandoney/datasciencecoursera | R | false | false | 2,170 | r | #week3.R
#q1
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(fileUrl, destfile = "./data.csv", method="curl")
dateDownloaded <- date()
dat <- read.csv("data.csv")
head(dat)
df <- tbl_df(dat)
head(df)
# Create a logical vector that identifies the households on greater than 10 acres
# who sold more than $10,000 worth of agriculture products. Assign that logical
# vector to the variable agricultureLogical. Apply the which() function like this
# to identify the rows of the data frame where the logical vector is TRUE.
# which(agricultureLogical)
# What are the first 3 values that result?
agricultureLogical <- dat$ACR == 3 & dat$AGS == 6
head(which(agricultureLogical), 3)
# q2
library(jpeg)
download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg'
, 'jeff.jpg'
, mode='wb' )
picture <- jpeg::readJPEG('jeff.jpg'
, native=TRUE)
quantile(picture, probs = c(0.3, 0.8) )
#q3
library("data.table")
FGDP <- data.table::fread('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv'
, skip=4
, nrows = 190
, select = c(1, 2, 4, 5)
, col.names=c("CountryCode", "Rank", "Economy", "Total")
)
FEDSTATS_Country <- data.table::fread('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv'
)
mergedDT <- merge(FGDP, FEDSTATS_Country, by = 'CountryCode')
nrow(mergedDT)
mergedDT[order(-Rank)][13,.(Economy)]
#q4
mergedDT[`Income Group` == "High income: OECD"
, lapply(.SD, mean)
, .SDcols = c("Rank")
, by = "Income Group"]
mergedDT[`Income Group` == "High income: nonOECD"
, lapply(.SD, mean)
, .SDcols = c("Rank")
, by = "Income Group"]
# q5
library('dplyr')
breaks <- quantile(mergedDT[, Rank], probs = seq(0, 1, 0.2), na.rm = TRUE)
mergedDT$quantileGDP <- cut(mergedDT[, Rank], breaks = breaks)
mergedDT[`Income Group` == "Lower middle income", .N, by = c("Income Group", "quantileGDP")]
|
/Talleres/MetodoAitkenPolares.R | no_license | DivaLover/Clase_Analisis_Numerico | R | false | false | 788 | r | ||
dataSet <- "household_power_consumption.txt"
data <- read.table(dataSet, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
Sample<- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(Sample$Date, Sample$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GAP <- as.numeric(Sample$Global_active_power)
SM1 <- as.numeric(Sample$Sub_metering_1)
SM2 <- as.numeric(Sample$Sub_metering_2)
SM3 <- as.numeric(Sample$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, SM1, type="l", ylab="Energy Sub Metering", xlab="")
lines(datetime, SM2, type="l", col="red")
lines(datetime, SM3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() | /plot3.r | no_license | sean1211/ExploratoryDataAnalysis | R | false | false | 767 | r | dataSet <- "household_power_consumption.txt"
data <- read.table(dataSet, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
Sample<- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(Sample$Date, Sample$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GAP <- as.numeric(Sample$Global_active_power)
SM1 <- as.numeric(Sample$Sub_metering_1)
SM2 <- as.numeric(Sample$Sub_metering_2)
SM3 <- as.numeric(Sample$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, SM1, type="l", ylab="Energy Sub Metering", xlab="")
lines(datetime, SM2, type="l", col="red")
lines(datetime, SM3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() |
require(shiny)
require(ggplot2)
require(rCharts)
Auto <- read.csv('../../../data_sets/Auto.csv')
ui <- fluidPage(navbarPage(
title = h2('Auto'),
tabPanel(h4('Boxplot'),
fluidRow(
column(2,
selectInput('cat',
'Category',
c('cylinders', 'year', 'origin'),
selected = 'cylinders'),
selectInput('m',
'Measure',
c('mpg', 'displacement', 'horsepower', 'weight', 'acceleration'),
selected = 'mpg')
),
column(10,
plotOutput('boxplot')
)
)
),
tabPanel(h4('Histogram'),
fluidRow(
column(2,
selectInput('f_hist',
'Feature',
names(Auto)[1:7],
selected = Auto$mpg),
numericInput('bins',
'Bins',
value = 8,
min = 3,
max = 23,
step = 5
),
h6("NOTE: not all bin values will result in a modified plot.")
),
column(10,
plotOutput('hist')
)
)
),
tabPanel(h4('Scatter Plot Matrix'),
fluidRow(
column(2,
checkboxGroupInput('f_scat',
'Features',
c('mpg' = 1,
'cylinders' = 2,
'displacement' = 3,
'horsepower' = 4,
'weight' = 5,
'acceleration' = 6,
'year' = 7,
'origin' = 8),
selected = c(1, 3, 4, 5, 6))
),
column(10,
plotOutput("scat")
)
)
)
)
)
server <- function(input, output){
output$boxplot <- renderPlot({
x <- Auto[,input$cat]
y <- Auto[,input$m]
clr <- as.factor(x)
xlab <- input$cat
ylab <- input$m
g <- ggplot(Auto, aes(as.factor(x), as.numeric(y), fill=clr))
g + geom_boxplot() +
scale_fill_discrete(name = xlab) +
labs(title = toupper(paste(ylab, 'by', xlab)), x = xlab, y = ylab)
})
output$hist <- renderPlot({
bins <- input$bins
if(class(Auto[,input$f_hist]) == "factor") {
hist(as.numeric(as.character(Auto[,input$f_hist])),
xlab = as.character(input$f_hist),
main = '',
col = 'lightblue')
} else {
hist(Auto[,input$f_hist],
breaks = bins,
xlab = as.character(input$f_hist),
main = '',
col = 'lightblue'
)
}
})
output$scat <- renderPlot({
pairs(Auto[,c(as.numeric(input$f_scat))])
})
}
shinyApp(ui = ui, server = server) | /ch02_Statistical_Learning/shiny/auto_All/App.R | no_license | GucciTheCarpenter/ISLR_labs | R | false | false | 3,939 | r | require(shiny)
require(ggplot2)
require(rCharts)
Auto <- read.csv('../../../data_sets/Auto.csv')
ui <- fluidPage(navbarPage(
title = h2('Auto'),
tabPanel(h4('Boxplot'),
fluidRow(
column(2,
selectInput('cat',
'Category',
c('cylinders', 'year', 'origin'),
selected = 'cylinders'),
selectInput('m',
'Measure',
c('mpg', 'displacement', 'horsepower', 'weight', 'acceleration'),
selected = 'mpg')
),
column(10,
plotOutput('boxplot')
)
)
),
tabPanel(h4('Histogram'),
fluidRow(
column(2,
selectInput('f_hist',
'Feature',
names(Auto)[1:7],
selected = Auto$mpg),
numericInput('bins',
'Bins',
value = 8,
min = 3,
max = 23,
step = 5
),
h6("NOTE: not all bin values will result in a modified plot.")
),
column(10,
plotOutput('hist')
)
)
),
tabPanel(h4('Scatter Plot Matrix'),
fluidRow(
column(2,
checkboxGroupInput('f_scat',
'Features',
c('mpg' = 1,
'cylinders' = 2,
'displacement' = 3,
'horsepower' = 4,
'weight' = 5,
'acceleration' = 6,
'year' = 7,
'origin' = 8),
selected = c(1, 3, 4, 5, 6))
),
column(10,
plotOutput("scat")
)
)
)
)
)
server <- function(input, output){
output$boxplot <- renderPlot({
x <- Auto[,input$cat]
y <- Auto[,input$m]
clr <- as.factor(x)
xlab <- input$cat
ylab <- input$m
g <- ggplot(Auto, aes(as.factor(x), as.numeric(y), fill=clr))
g + geom_boxplot() +
scale_fill_discrete(name = xlab) +
labs(title = toupper(paste(ylab, 'by', xlab)), x = xlab, y = ylab)
})
output$hist <- renderPlot({
bins <- input$bins
if(class(Auto[,input$f_hist]) == "factor") {
hist(as.numeric(as.character(Auto[,input$f_hist])),
xlab = as.character(input$f_hist),
main = '',
col = 'lightblue')
} else {
hist(Auto[,input$f_hist],
breaks = bins,
xlab = as.character(input$f_hist),
main = '',
col = 'lightblue'
)
}
})
output$scat <- renderPlot({
pairs(Auto[,c(as.numeric(input$f_scat))])
})
}
shinyApp(ui = ui, server = server) |
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("data.frame","ANY"),
definition = function(reducedDim, clusterLabels, ...){
RD <- as.matrix(reducedDim)
rownames(RD) <- rownames(reducedDim)
if(missing(clusterLabels)){
message('Unclustered data detected.')
clusterLabels <- rep('1', nrow(reducedDim))
}
newSlingshotDataSet(RD, clusterLabels, ...)
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix", "numeric"),
definition = function(reducedDim, clusterLabels, ...){
newSlingshotDataSet(reducedDim, as.character(clusterLabels), ...)
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix","factor"),
definition = function(reducedDim, clusterLabels, ...){
newSlingshotDataSet(reducedDim, as.character(clusterLabels), ...)
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix","ANY"),
definition = function(reducedDim, clusterLabels, ...){
if(missing(clusterLabels)){
message('Unclustered data detected.')
clusterLabels <- rep('1', nrow(reducedDim))
}
newSlingshotDataSet(reducedDim, as.character(clusterLabels), ...)
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix","character"),
definition = function(reducedDim, clusterLabels, ...){
if(nrow(reducedDim) != length(clusterLabels)) {
stop('nrow(reducedDim) must equal length(clusterLabels).')
}
# something requires row and column names. Princurve?
if(is.null(rownames(reducedDim))){
rownames(reducedDim) <- paste('Cell',seq_len(nrow(reducedDim)),
sep='-')
}
if(is.null(colnames(reducedDim))){
colnames(reducedDim) <- paste('Dim',seq_len(ncol(reducedDim)),
sep='-')
}
if(is.null(names(clusterLabels))){
names(clusterLabels) <- rownames(reducedDim)
}
clusW <- table(rownames(reducedDim), clusterLabels)
clusW <- clusW[match(rownames(reducedDim),rownames(clusW)), ,drop=FALSE]
class(clusW) <- 'matrix'
return(newSlingshotDataSet(reducedDim, clusW, ...))
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix","matrix"),
definition = function(reducedDim, clusterLabels,
lineages=list(),
adjacency=matrix(NA,0,0),
curves=list(),
slingParams=list()
){
if(nrow(reducedDim) != nrow(clusterLabels)) {
stop('nrow(reducedDim) must equal nrow(clusterLabels).')
}
# something requires row and column names. Princurve?
if(is.null(rownames(reducedDim))){
rownames(reducedDim) <- paste('Cell',seq_len(nrow(reducedDim)),
sep='-')
}
if(is.null(colnames(reducedDim))){
colnames(reducedDim) <- paste('Dim',seq_len(ncol(reducedDim)),
sep='-')
}
if(is.null(rownames(clusterLabels))){
rownames(clusterLabels) <- rownames(reducedDim)
}
if(is.null(colnames(clusterLabels))){
colnames(clusterLabels) <- seq_len(ncol(clusterLabels))
}
out <- new("SlingshotDataSet",
reducedDim = reducedDim,
clusterLabels = clusterLabels,
lineages = lineages,
adjacency = adjacency,
curves = curves,
slingParams = slingParams
)
return(out)
})
#' @describeIn SlingshotDataSet a short summary of a \code{SlingshotDataSet}
#' object.
#'
#' @param object a \code{SlingshotDataSet} object.
#' @export
setMethod(
f = "show",
signature = "SlingshotDataSet",
definition = function(object) {
cat("class:", class(object), "\n")
if(!is.null(slingParams(object)$embedding) &&
slingParams(object)$embedding){
cat('Embedding of slingshot trajectory\n')
}
df <- data.frame(Samples = nrow(reducedDim(object)),
Dimensions = ncol(reducedDim(object)))
cat('\n')
print(df, row.names = FALSE)
cat('\nlineages:', length(slingLineages(object)), "\n")
for(i in seq_len(length(slingLineages(object)))){
cat('Lineage',i,": ", paste(slingLineages(object)[[i]],' '), "\n",
sep='')
}
cat('\ncurves:', length(slingCurves(object)), "\n")
for(i in seq_len(length(slingCurves(object)))){
cat('Curve',i,": ", "Length: ",
signif(max(slingCurves(object)[[i]]$lambda), digits = 5),
"\tSamples: ", round(sum(slingCurves(object)[[i]]$w),
digits = 2),
"\n", sep='')
}
}
)
# accessor methods
#' @describeIn SlingshotDataSet returns the matrix representing the reduced
#' dimensional dataset.
#' @param x a \code{SlingshotDataSet} object.
#' @importFrom SingleCellExperiment reducedDim
#' @export
setMethod(
f = "reducedDim",
signature = c("SlingshotDataSet", "ANY"),
definition = function(x) x@reducedDim
)
#' @rdname SlingshotDataSet-class
#' @importFrom SingleCellExperiment reducedDims
#' @export
setMethod(
f = "reducedDims",
signature = "SlingshotDataSet",
definition = function(x) x@reducedDim
)
#' @rdname slingReducedDim
#' @export
setMethod(
f = "slingReducedDim",
signature = "PseudotimeOrdering",
definition = function(x) cellData(x)$reducedDim
)
#' @rdname slingReducedDim
#' @export
setMethod(
f = "slingReducedDim",
signature = "SlingshotDataSet",
definition = function(x) x@reducedDim
)
#' @rdname slingReducedDim
#' @export
setMethod(
f = "slingReducedDim",
signature = "SingleCellExperiment",
definition = function(x) slingReducedDim(as.PseudotimeOrdering(x))
)
#' @rdname slingClusterLabels
#' @export
setMethod(
f = "slingClusterLabels",
signature = signature(x="PseudotimeOrdering"),
definition = function(x){
return(cellData(x)$clusterLabels)
}
)
#' @rdname slingClusterLabels
#' @export
setMethod(
f = "slingClusterLabels",
signature = signature(x="SlingshotDataSet"),
definition = function(x){
return(x@clusterLabels)
}
)
#' @rdname slingClusterLabels
#' @importClassesFrom SingleCellExperiment SingleCellExperiment
#' @export
setMethod(
f = "slingClusterLabels",
signature = "SingleCellExperiment",
definition = function(x) slingClusterLabels(as.PseudotimeOrdering(x))
)
#' @rdname slingMST
#' @importFrom S4Vectors metadata metadata<-
#' @param as.df logical, whether to format the output as a \code{data.frame},
#' suitable for plotting with \code{ggplot}.
#' @importFrom igraph V
#' @export
setMethod(
f = "slingMST",
signature = "PseudotimeOrdering",
definition = function(x, as.df = FALSE){
if(!as.df){
return(metadata(x)$mst)
}else{
dfs <- lapply(seq_along(metadata(x)$lineages), function(l){
lin <- metadata(x)$lineages[[l]]
mst <- metadata(x)$mst
centers <- do.call(rbind, V(mst)$coordinates)
rownames(centers) <- V(mst)$name
return(data.frame(centers[lin,], Order = seq_along(lin),
Lineage = l, Cluster = lin))
})
return(do.call(rbind, dfs))
}
}
)
#' @rdname slingMST
#' @export
setMethod(
f = "slingMST",
signature = "SingleCellExperiment",
definition = function(x, ...) slingMST(colData(x)$slingshot, ...)
)
#' @rdname slingMST
#' @export
setMethod(
f = "slingMST",
signature = "SlingshotDataSet",
definition = function(x, as.df = FALSE){
if(!as.df){
return(x@adjacency)
}else{
pto <- as.PseudotimeOrdering(x)
return(slingMST(pto, as.df = TRUE))
}
}
)
#' @rdname slingLineages
#' @export
setMethod(
f = "slingLineages",
signature = "PseudotimeOrdering",
definition = function(x) metadata(x)$lineages
)
#' @rdname slingLineages
#' @export
setMethod(
f = "slingLineages",
signature = "SingleCellExperiment",
definition = function(x) slingLineages(colData(x)$slingshot)
)
#' @rdname slingLineages
#' @export
setMethod(
f = "slingLineages",
signature = "SlingshotDataSet",
definition = function(x) x@lineages
)
#' @rdname slingCurves
#' @param as.df logical, whether to format the output as a \code{data.frame},
#' suitable for plotting with \code{ggplot}.
#' @export
setMethod(
f = "slingCurves",
signature = "PseudotimeOrdering",
definition = function(x, as.df = FALSE){
if(!as.df){
return(metadata(x)$curves)
}else{
dfs <- lapply(seq_along(metadata(x)$curves), function(l){
pc <- metadata(x)$curves[[l]]
data.frame(pc$s, Order = order(pc$ord), Lineage = l)
})
return(do.call(rbind, dfs))
}
}
)
#' @rdname slingCurves
#' @export
setMethod(
f = "slingCurves",
signature = "SingleCellExperiment",
definition = function(x, ...) slingCurves(colData(x)$slingshot, ...)
)
#' @rdname slingCurves
#' @export
setMethod(
f = "slingCurves",
signature = "SlingshotDataSet",
definition = function(x, as.df = FALSE){
if(!as.df){
return(x@curves)
}else{
dfs <- lapply(seq_along(x@curves), function(l){
pc <- x@curves[[l]]
data.frame(pc$s, Order = order(pc$ord), Lineage = l)
})
return(do.call(rbind, dfs))
}
}
)
#' @rdname slingParams
#' @export
setMethod(
f = "slingParams",
signature = "PseudotimeOrdering",
definition = function(x) metadata(x)$slingParams
)
#' @rdname slingParams
#' @export
setMethod(
f = "slingParams",
signature = "SingleCellExperiment",
definition = function(x) slingParams(colData(x)$slingshot)
)
#' @rdname slingParams
#' @export
setMethod(
f = "slingParams",
signature = "SlingshotDataSet",
definition = function(x) x@slingParams
)
#' @rdname slingPseudotime
#' @param na logical. If \code{TRUE} (default), cells that are not assigned to a
#' lineage will have a pseudotime value of \code{NA}. Otherwise, their
#' arclength along each curve will be returned.
#' @importFrom SummarizedExperiment assay assay<-
#' @export
setMethod(
f = "slingPseudotime",
signature = "PseudotimeOrdering",
definition = function(x, na = TRUE){
if(length(slingCurves(x))==0){
stop('No curves detected.')
}
if(na){
return(assay(x, 'pseudotime'))
}else{
pst <- vapply(slingCurves(x), function(pc) {
t <- pc$lambda
return(t)
}, rep(0,nrow(x)))
rownames(pst) <- rownames(x)
colnames(pst) <- names(slingCurves(x))
return(pst)
}
}
)
#' @rdname slingPseudotime
#' @export
setMethod(
f = "slingPseudotime",
signature = "SingleCellExperiment",
definition = function(x, na = TRUE){
return(slingPseudotime(colData(x)$slingshot, na = na))
}
)
#' @rdname slingPseudotime
#' @export
setMethod(
f = "slingPseudotime",
signature = "SlingshotDataSet",
definition = function(x, na = TRUE){
if(length(slingCurves(x))==0){
stop('No curves detected.')
}
pst <- vapply(slingCurves(x), function(pc) {
t <- pc$lambda
if(na){
t[pc$w == 0] <- NA
}
return(t)
}, rep(0,nrow(reducedDim(x))))
rownames(pst) <- rownames(reducedDim(x))
colnames(pst) <- names(slingCurves(x))
return(pst)
}
)
#' @rdname slingPseudotime
#' @param as.probs logical. If \code{FALSE} (default), output will be the
#' weights used to construct the curves, appropriate for downstream analysis
#' of individual lineages (ie. a cell shared between two lineages can have two
#' weights of \code{1}). If \code{TRUE}, output will be scaled to represent
#' probabilistic assignment of cells to lineages (ie. a cell shared between
#' two lineages will have two weights of \code{0.5}).
#' @export
setMethod(
f = "slingCurveWeights",
signature = "PseudotimeOrdering",
definition = function(x, as.probs = FALSE){
if(length(slingCurves(x))==0){
stop('No curves detected.')
}
weights <- assay(x, 'weights')
if(as.probs){
weights <- weights / rowSums(weights)
}
return(weights)
}
)
#' @rdname slingPseudotime
#' @export
setMethod(
f = "slingCurveWeights",
signature = "SingleCellExperiment",
definition = function(x){
return(slingCurveWeights(colData(x)$slingshot))
}
)
#' @rdname slingPseudotime
#' @export
setMethod(
f = "slingCurveWeights",
signature = "SlingshotDataSet",
definition = function(x, as.probs = FALSE){
if(length(slingCurves(x))==0){
stop('No curves detected.')
}
weights <- vapply(slingCurves(x), function(pc) { pc$w },
rep(0, nrow(reducedDim(x))))
rownames(weights) <- rownames(reducedDim(x))
colnames(weights) <- names(slingCurves(x))
if(as.probs){
weights <- weights / rowSums(weights)
}
return(weights)
}
)
#' @rdname SlingshotDataSet
#' @export
setMethod(
f = "SlingshotDataSet",
signature = "SingleCellExperiment",
definition = function(data){
if("slingshot" %in% names(colData(data))){
return(as.SlingshotDataSet(colData(data)$slingshot))
}
if("slingshot" %in% names(data@int_metadata)){
return(data@int_metadata$slingshot)
}
stop('No slingshot results found.')
}
)
#' @rdname SlingshotDataSet
#' @export
setMethod(
f = "SlingshotDataSet",
signature = "SlingshotDataSet",
definition = function(data){
return(data)
}
)
#' @rdname SlingshotDataSet
#' @export
setMethod(
f = "SlingshotDataSet",
signature = "PseudotimeOrdering",
definition = function(data){
return(as.SlingshotDataSet(data))
}
)
##########################
### Internal functions ###
##########################
#' @import stats
#' @import matrixStats
#' @importFrom S4Vectors metadata metadata<-
`.slingParams<-` <- function(x, value) {
metadata(x)$slingParams <- value
x
}
`.slingCurves<-` <- function(x, value) {
metadata(x)$curves <- value
x
}
# to avoid confusion between the clusterLabels argument and function
.getClusterLabels <- function(x){
cellData(x)$clusterLabels
}
.scaleAB <- function(x,a=0,b=1){
((x-min(x,na.rm=TRUE))/(max(x,na.rm=TRUE)-min(x,na.rm=TRUE)))*(b-a)+a
}
.avg_curves <- function(pcurves, X, stretch = 2, approx_points = FALSE){
n <- nrow(pcurves[[1]]$s)
p <- ncol(pcurves[[1]]$s)
max.shared.lambda <- min(vapply(pcurves, function(pcv){max(pcv$lambda)},0))
lambdas.combine <- seq(0, max.shared.lambda, length.out = n)
pcurves.dense <- lapply(pcurves,function(pcv){
vapply(seq_len(p),function(jj){
if(approx_points > 0){
xin_lambda <- seq(min(pcv$lambda), max(pcv$lambda),
length.out = approx_points)
}else{
xin_lambda <- pcv$lambda
}
interpolated <- approx(xin_lambda[pcv$ord],
pcv$s[pcv$ord, jj, drop = FALSE],
xout = lambdas.combine, ties = 'ordered')$y
return(interpolated)
}, rep(0,n))
})
avg <- vapply(seq_len(p),function(jj){
dim.all <- vapply(seq_along(pcurves.dense),function(i){
pcurves.dense[[i]][,jj]
}, rep(0,n))
return(rowMeans(dim.all))
}, rep(0,n))
avg.curve <- project_to_curve(X, avg, stretch=stretch)
if(approx_points > 0){
xout_lambda <- seq(min(avg.curve$lambda),
max(avg.curve$lambda),
length.out = approx_points)
avg.curve$s <- apply(avg.curve$s, 2, function(sjj){
return(approx(x = avg.curve$lambda[avg.curve$ord],
y = sjj[avg.curve$ord],
xout = xout_lambda, ties = 'ordered')$y)
})
avg.curve$ord <- seq_len(approx_points)
}
avg.curve$w <- rowSums(vapply(pcurves, function(p){ p$w }, rep(0,nrow(X))))
return(avg.curve)
}
.cumMin <- function(x,time){
vapply(seq_along(x),function(i){ min(x[time <= time[i]]) }, 0)
}
.percent_shrinkage <- function(crv, share.idx, approx_points = FALSE,
method = 'cosine'){
pst <- crv$lambda
if(approx_points > 0){
pts2wt <- seq(min(crv$lambda), max(crv$lambda),
length.out = approx_points)
}else{
pts2wt <- pst
}
if(method %in% eval(formals(density.default)$kernel)){
dens <- density(0, bw=1, kernel = method)
surv <- list(x = dens$x, y = (sum(dens$y) - cumsum(dens$y))/sum(dens$y))
box.vals <- graphics::boxplot(pst[share.idx], plot = FALSE)$stats
surv$x <- .scaleAB(surv$x, a = box.vals[1], b = box.vals[5])
if(box.vals[1]==box.vals[5]){
pct.l <- rep(0, length(pst))
}else{
pct.l <- approx(surv$x, surv$y, pts2wt, rule = 2,
ties = 'ordered')$y
}
}
if(method == 'tricube'){
tc <- function(x){ ifelse(abs(x) <= 1, (70/81)*((1-abs(x)^3)^3), 0) }
dens <- list(x = seq(-3,3,length.out = 512))
dens$y <- tc(dens$x)
surv <- list(x = dens$x, y = (sum(dens$y) - cumsum(dens$y))/sum(dens$y))
box.vals <- graphics::boxplot(pst[share.idx], plot = FALSE)$stats
surv$x <- .scaleAB(surv$x, a = box.vals[1], b = box.vals[5])
if(box.vals[1]==box.vals[5]){
pct.l <- rep(0, length(pst))
}else{
pct.l <- approx(surv$x, surv$y, pts2wt, rule = 2,
ties = 'ordered')$y
}
}
if(method == 'density'){
bw1 <- bw.SJ(pst)
bw2 <- bw.SJ(pst[share.idx])
bw <- (bw1 + bw2) / 2
d2 <- density(pst[share.idx], bw = bw,
weights = crv$w[share.idx]/sum(crv$w[share.idx]))
d1 <- density(pst, bw = bw, weights = crv$w/sum(crv$w))
scale <- sum(crv$w[share.idx]) / sum(crv$w)
pct.l <- (approx(d2$x,d2$y,xout = pts2wt, yleft = 0,
yright = 0, ties = mean)$y * scale) /
approx(d1$x,d1$y,xout = pts2wt, yleft = 0, yright = 0,
ties = mean)$y
pct.l[is.na(pct.l)] <- 0
pct.l <- .cumMin(pct.l, pts2wt)
}
return(pct.l)
}
.shrink_to_avg <- function(pcurve, avg.curve, pct, X, approx_points = FALSE,
stretch = 2){
n <- nrow(pcurve$s)
p <- ncol(pcurve$s)
if(approx_points > 0){
lam <- seq(min(pcurve$lambda), max(pcurve$lambda),
length.out = approx_points)
avlam <- seq(min(avg.curve$lambda), max(avg.curve$lambda),
length.out = approx_points)
}else{
lam <- pcurve$lambda
avlam <- avg.curve$lambda
}
s <- vapply(seq_len(p),function(jj){
orig.jj <- pcurve$s[,jj]
avg.jj <- approx(x = avlam, y = avg.curve$s[,jj], xout = lam,
rule = 2, ties = mean)$y
return(avg.jj * pct + orig.jj * (1-pct))
}, rep(0,n))
w <- pcurve$w
pcurve <- project_to_curve(X, as.matrix(s[pcurve$ord, ,drop = FALSE]),
stretch = stretch)
pcurve$w <- w
if(approx_points > 0){
xout_lambda <- seq(min(pcurve$lambda), max(pcurve$lambda),
length.out = approx_points)
pcurve$s <- apply(pcurve$s, 2, function(sjj){
return(approx(x = pcurve$lambda[pcurve$ord],
y = sjj[pcurve$ord],
xout = xout_lambda, ties = 'ordered')$y)
})
pcurve$ord <- seq_len(approx_points)
}
return(pcurve)
}
.under <- function(n, nodes){
which.lin <- strsplit(nodes, split='[,]')
nlins <- vapply(which.lin, length, 1)
out <- nodes[vapply(which.lin, function(wl){
all(wl %in% unlist(strsplit(n, split='[,]')))
}, FALSE)]
return(out[out != n])
}
################
### Datasets ###
################
#' @title Bifurcating lineages data
#' @name slingshotExample
#'
#' @usage data("slingshotExample")
#'
#' @description This simulated dataset contains a low-dimensional representation
#' of two bifurcating lineages (\code{rd}) and a vector of cluster labels
#' generated by k-means with \code{K = 5} (\code{cl}).
#'
#' @format \code{rd} is a matrix of coordinates in two dimensions, representing
#' 140 cells. \code{cl} is a numeric vector of 140 corresponding cluster
#' labels for each cell.
#' @source Simulated data provided with the \code{slingshot} package.
#'
#' @examples
#' data("slingshotExample")
#' rd <- slingshotExample$rd
#' cl <- slingshotExample$cl
#' slingshot(rd, cl)
"slingshotExample"
| /R/AllHelperFunctions.R | no_license | tangbozeng/slingshot | R | false | false | 21,776 | r | #' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("data.frame","ANY"),
definition = function(reducedDim, clusterLabels, ...){
RD <- as.matrix(reducedDim)
rownames(RD) <- rownames(reducedDim)
if(missing(clusterLabels)){
message('Unclustered data detected.')
clusterLabels <- rep('1', nrow(reducedDim))
}
newSlingshotDataSet(RD, clusterLabels, ...)
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix", "numeric"),
definition = function(reducedDim, clusterLabels, ...){
newSlingshotDataSet(reducedDim, as.character(clusterLabels), ...)
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix","factor"),
definition = function(reducedDim, clusterLabels, ...){
newSlingshotDataSet(reducedDim, as.character(clusterLabels), ...)
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix","ANY"),
definition = function(reducedDim, clusterLabels, ...){
if(missing(clusterLabels)){
message('Unclustered data detected.')
clusterLabels <- rep('1', nrow(reducedDim))
}
newSlingshotDataSet(reducedDim, as.character(clusterLabels), ...)
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix","character"),
definition = function(reducedDim, clusterLabels, ...){
if(nrow(reducedDim) != length(clusterLabels)) {
stop('nrow(reducedDim) must equal length(clusterLabels).')
}
# something requires row and column names. Princurve?
if(is.null(rownames(reducedDim))){
rownames(reducedDim) <- paste('Cell',seq_len(nrow(reducedDim)),
sep='-')
}
if(is.null(colnames(reducedDim))){
colnames(reducedDim) <- paste('Dim',seq_len(ncol(reducedDim)),
sep='-')
}
if(is.null(names(clusterLabels))){
names(clusterLabels) <- rownames(reducedDim)
}
clusW <- table(rownames(reducedDim), clusterLabels)
clusW <- clusW[match(rownames(reducedDim),rownames(clusW)), ,drop=FALSE]
class(clusW) <- 'matrix'
return(newSlingshotDataSet(reducedDim, clusW, ...))
})
#' @rdname newSlingshotDataSet
#' @export
setMethod(
f = "newSlingshotDataSet",
signature = signature("matrix","matrix"),
definition = function(reducedDim, clusterLabels,
lineages=list(),
adjacency=matrix(NA,0,0),
curves=list(),
slingParams=list()
){
if(nrow(reducedDim) != nrow(clusterLabels)) {
stop('nrow(reducedDim) must equal nrow(clusterLabels).')
}
# something requires row and column names. Princurve?
if(is.null(rownames(reducedDim))){
rownames(reducedDim) <- paste('Cell',seq_len(nrow(reducedDim)),
sep='-')
}
if(is.null(colnames(reducedDim))){
colnames(reducedDim) <- paste('Dim',seq_len(ncol(reducedDim)),
sep='-')
}
if(is.null(rownames(clusterLabels))){
rownames(clusterLabels) <- rownames(reducedDim)
}
if(is.null(colnames(clusterLabels))){
colnames(clusterLabels) <- seq_len(ncol(clusterLabels))
}
out <- new("SlingshotDataSet",
reducedDim = reducedDim,
clusterLabels = clusterLabels,
lineages = lineages,
adjacency = adjacency,
curves = curves,
slingParams = slingParams
)
return(out)
})
#' @describeIn SlingshotDataSet a short summary of a \code{SlingshotDataSet}
#' object.
#'
#' @param object a \code{SlingshotDataSet} object.
#' @export
setMethod(
f = "show",
signature = "SlingshotDataSet",
definition = function(object) {
cat("class:", class(object), "\n")
if(!is.null(slingParams(object)$embedding) &&
slingParams(object)$embedding){
cat('Embedding of slingshot trajectory\n')
}
df <- data.frame(Samples = nrow(reducedDim(object)),
Dimensions = ncol(reducedDim(object)))
cat('\n')
print(df, row.names = FALSE)
cat('\nlineages:', length(slingLineages(object)), "\n")
for(i in seq_len(length(slingLineages(object)))){
cat('Lineage',i,": ", paste(slingLineages(object)[[i]],' '), "\n",
sep='')
}
cat('\ncurves:', length(slingCurves(object)), "\n")
for(i in seq_len(length(slingCurves(object)))){
cat('Curve',i,": ", "Length: ",
signif(max(slingCurves(object)[[i]]$lambda), digits = 5),
"\tSamples: ", round(sum(slingCurves(object)[[i]]$w),
digits = 2),
"\n", sep='')
}
}
)
# accessor methods
#' @describeIn SlingshotDataSet returns the matrix representing the reduced
#' dimensional dataset.
#' @param x a \code{SlingshotDataSet} object.
#' @importFrom SingleCellExperiment reducedDim
#' @export
setMethod(
f = "reducedDim",
signature = c("SlingshotDataSet", "ANY"),
definition = function(x) x@reducedDim
)
#' @rdname SlingshotDataSet-class
#' @importFrom SingleCellExperiment reducedDims
#' @export
setMethod(
f = "reducedDims",
signature = "SlingshotDataSet",
definition = function(x) x@reducedDim
)
#' @rdname slingReducedDim
#' @export
setMethod(
f = "slingReducedDim",
signature = "PseudotimeOrdering",
definition = function(x) cellData(x)$reducedDim
)
#' @rdname slingReducedDim
#' @export
setMethod(
f = "slingReducedDim",
signature = "SlingshotDataSet",
definition = function(x) x@reducedDim
)
#' @rdname slingReducedDim
#' @export
setMethod(
f = "slingReducedDim",
signature = "SingleCellExperiment",
definition = function(x) slingReducedDim(as.PseudotimeOrdering(x))
)
#' @rdname slingClusterLabels
#' @export
setMethod(
f = "slingClusterLabels",
signature = signature(x="PseudotimeOrdering"),
definition = function(x){
return(cellData(x)$clusterLabels)
}
)
#' @rdname slingClusterLabels
#' @export
setMethod(
f = "slingClusterLabels",
signature = signature(x="SlingshotDataSet"),
definition = function(x){
return(x@clusterLabels)
}
)
#' @rdname slingClusterLabels
#' @importClassesFrom SingleCellExperiment SingleCellExperiment
#' @export
setMethod(
f = "slingClusterLabels",
signature = "SingleCellExperiment",
definition = function(x) slingClusterLabels(as.PseudotimeOrdering(x))
)
#' @rdname slingMST
#' @importFrom S4Vectors metadata metadata<-
#' @param as.df logical, whether to format the output as a \code{data.frame},
#' suitable for plotting with \code{ggplot}.
#' @importFrom igraph V
#' @export
setMethod(
f = "slingMST",
signature = "PseudotimeOrdering",
definition = function(x, as.df = FALSE){
if(!as.df){
return(metadata(x)$mst)
}else{
dfs <- lapply(seq_along(metadata(x)$lineages), function(l){
lin <- metadata(x)$lineages[[l]]
mst <- metadata(x)$mst
centers <- do.call(rbind, V(mst)$coordinates)
rownames(centers) <- V(mst)$name
return(data.frame(centers[lin,], Order = seq_along(lin),
Lineage = l, Cluster = lin))
})
return(do.call(rbind, dfs))
}
}
)
#' @rdname slingMST
#' @export
setMethod(
f = "slingMST",
signature = "SingleCellExperiment",
definition = function(x, ...) slingMST(colData(x)$slingshot, ...)
)
#' @rdname slingMST
#' @export
setMethod(
f = "slingMST",
signature = "SlingshotDataSet",
definition = function(x, as.df = FALSE){
if(!as.df){
return(x@adjacency)
}else{
pto <- as.PseudotimeOrdering(x)
return(slingMST(pto, as.df = TRUE))
}
}
)
#' @rdname slingLineages
#' @export
setMethod(
f = "slingLineages",
signature = "PseudotimeOrdering",
definition = function(x) metadata(x)$lineages
)
#' @rdname slingLineages
#' @export
setMethod(
f = "slingLineages",
signature = "SingleCellExperiment",
definition = function(x) slingLineages(colData(x)$slingshot)
)
#' @rdname slingLineages
#' @export
setMethod(
f = "slingLineages",
signature = "SlingshotDataSet",
definition = function(x) x@lineages
)
#' @rdname slingCurves
#' @param as.df logical, whether to format the output as a \code{data.frame},
#' suitable for plotting with \code{ggplot}.
#' @export
setMethod(
f = "slingCurves",
signature = "PseudotimeOrdering",
definition = function(x, as.df = FALSE){
if(!as.df){
return(metadata(x)$curves)
}else{
dfs <- lapply(seq_along(metadata(x)$curves), function(l){
pc <- metadata(x)$curves[[l]]
data.frame(pc$s, Order = order(pc$ord), Lineage = l)
})
return(do.call(rbind, dfs))
}
}
)
#' @rdname slingCurves
#' @export
setMethod(
f = "slingCurves",
signature = "SingleCellExperiment",
definition = function(x, ...) slingCurves(colData(x)$slingshot, ...)
)
#' @rdname slingCurves
#' @export
setMethod(
f = "slingCurves",
signature = "SlingshotDataSet",
definition = function(x, as.df = FALSE){
if(!as.df){
return(x@curves)
}else{
dfs <- lapply(seq_along(x@curves), function(l){
pc <- x@curves[[l]]
data.frame(pc$s, Order = order(pc$ord), Lineage = l)
})
return(do.call(rbind, dfs))
}
}
)
#' @rdname slingParams
#' @export
setMethod(
f = "slingParams",
signature = "PseudotimeOrdering",
definition = function(x) metadata(x)$slingParams
)
#' @rdname slingParams
#' @export
setMethod(
f = "slingParams",
signature = "SingleCellExperiment",
definition = function(x) slingParams(colData(x)$slingshot)
)
#' @rdname slingParams
#' @export
setMethod(
f = "slingParams",
signature = "SlingshotDataSet",
definition = function(x) x@slingParams
)
#' @rdname slingPseudotime
#' @param na logical. If \code{TRUE} (default), cells that are not assigned to a
#' lineage will have a pseudotime value of \code{NA}. Otherwise, their
#' arclength along each curve will be returned.
#' @importFrom SummarizedExperiment assay assay<-
#' @export
setMethod(
f = "slingPseudotime",
signature = "PseudotimeOrdering",
definition = function(x, na = TRUE){
if(length(slingCurves(x))==0){
stop('No curves detected.')
}
if(na){
return(assay(x, 'pseudotime'))
}else{
pst <- vapply(slingCurves(x), function(pc) {
t <- pc$lambda
return(t)
}, rep(0,nrow(x)))
rownames(pst) <- rownames(x)
colnames(pst) <- names(slingCurves(x))
return(pst)
}
}
)
#' @rdname slingPseudotime
#' @export
setMethod(
f = "slingPseudotime",
signature = "SingleCellExperiment",
definition = function(x, na = TRUE){
return(slingPseudotime(colData(x)$slingshot, na = na))
}
)
#' @rdname slingPseudotime
#' @export
setMethod(
f = "slingPseudotime",
signature = "SlingshotDataSet",
definition = function(x, na = TRUE){
if(length(slingCurves(x))==0){
stop('No curves detected.')
}
pst <- vapply(slingCurves(x), function(pc) {
t <- pc$lambda
if(na){
t[pc$w == 0] <- NA
}
return(t)
}, rep(0,nrow(reducedDim(x))))
rownames(pst) <- rownames(reducedDim(x))
colnames(pst) <- names(slingCurves(x))
return(pst)
}
)
#' @rdname slingPseudotime
#' @param as.probs logical. If \code{FALSE} (default), output will be the
#' weights used to construct the curves, appropriate for downstream analysis
#' of individual lineages (ie. a cell shared between two lineages can have two
#' weights of \code{1}). If \code{TRUE}, output will be scaled to represent
#' probabilistic assignment of cells to lineages (ie. a cell shared between
#' two lineages will have two weights of \code{0.5}).
#' @export
setMethod(
f = "slingCurveWeights",
signature = "PseudotimeOrdering",
definition = function(x, as.probs = FALSE){
if(length(slingCurves(x))==0){
stop('No curves detected.')
}
weights <- assay(x, 'weights')
if(as.probs){
weights <- weights / rowSums(weights)
}
return(weights)
}
)
#' @rdname slingPseudotime
#' @export
setMethod(
f = "slingCurveWeights",
signature = "SingleCellExperiment",
definition = function(x){
return(slingCurveWeights(colData(x)$slingshot))
}
)
#' @rdname slingPseudotime
#' @export
setMethod(
f = "slingCurveWeights",
signature = "SlingshotDataSet",
definition = function(x, as.probs = FALSE){
if(length(slingCurves(x))==0){
stop('No curves detected.')
}
weights <- vapply(slingCurves(x), function(pc) { pc$w },
rep(0, nrow(reducedDim(x))))
rownames(weights) <- rownames(reducedDim(x))
colnames(weights) <- names(slingCurves(x))
if(as.probs){
weights <- weights / rowSums(weights)
}
return(weights)
}
)
#' @rdname SlingshotDataSet
#' @export
setMethod(
f = "SlingshotDataSet",
signature = "SingleCellExperiment",
definition = function(data){
if("slingshot" %in% names(colData(data))){
return(as.SlingshotDataSet(colData(data)$slingshot))
}
if("slingshot" %in% names(data@int_metadata)){
return(data@int_metadata$slingshot)
}
stop('No slingshot results found.')
}
)
#' @rdname SlingshotDataSet
#' @export
setMethod(
f = "SlingshotDataSet",
signature = "SlingshotDataSet",
definition = function(data){
return(data)
}
)
#' @rdname SlingshotDataSet
#' @export
setMethod(
f = "SlingshotDataSet",
signature = "PseudotimeOrdering",
definition = function(data){
return(as.SlingshotDataSet(data))
}
)
##########################
### Internal functions ###
##########################
#' @import stats
#' @import matrixStats
#' @importFrom S4Vectors metadata metadata<-
`.slingParams<-` <- function(x, value) {
metadata(x)$slingParams <- value
x
}
`.slingCurves<-` <- function(x, value) {
metadata(x)$curves <- value
x
}
# to avoid confusion between the clusterLabels argument and function
.getClusterLabels <- function(x){
cellData(x)$clusterLabels
}
.scaleAB <- function(x,a=0,b=1){
((x-min(x,na.rm=TRUE))/(max(x,na.rm=TRUE)-min(x,na.rm=TRUE)))*(b-a)+a
}
.avg_curves <- function(pcurves, X, stretch = 2, approx_points = FALSE){
n <- nrow(pcurves[[1]]$s)
p <- ncol(pcurves[[1]]$s)
max.shared.lambda <- min(vapply(pcurves, function(pcv){max(pcv$lambda)},0))
lambdas.combine <- seq(0, max.shared.lambda, length.out = n)
pcurves.dense <- lapply(pcurves,function(pcv){
vapply(seq_len(p),function(jj){
if(approx_points > 0){
xin_lambda <- seq(min(pcv$lambda), max(pcv$lambda),
length.out = approx_points)
}else{
xin_lambda <- pcv$lambda
}
interpolated <- approx(xin_lambda[pcv$ord],
pcv$s[pcv$ord, jj, drop = FALSE],
xout = lambdas.combine, ties = 'ordered')$y
return(interpolated)
}, rep(0,n))
})
avg <- vapply(seq_len(p),function(jj){
dim.all <- vapply(seq_along(pcurves.dense),function(i){
pcurves.dense[[i]][,jj]
}, rep(0,n))
return(rowMeans(dim.all))
}, rep(0,n))
avg.curve <- project_to_curve(X, avg, stretch=stretch)
if(approx_points > 0){
xout_lambda <- seq(min(avg.curve$lambda),
max(avg.curve$lambda),
length.out = approx_points)
avg.curve$s <- apply(avg.curve$s, 2, function(sjj){
return(approx(x = avg.curve$lambda[avg.curve$ord],
y = sjj[avg.curve$ord],
xout = xout_lambda, ties = 'ordered')$y)
})
avg.curve$ord <- seq_len(approx_points)
}
avg.curve$w <- rowSums(vapply(pcurves, function(p){ p$w }, rep(0,nrow(X))))
return(avg.curve)
}
.cumMin <- function(x,time){
vapply(seq_along(x),function(i){ min(x[time <= time[i]]) }, 0)
}
.percent_shrinkage <- function(crv, share.idx, approx_points = FALSE,
method = 'cosine'){
pst <- crv$lambda
if(approx_points > 0){
pts2wt <- seq(min(crv$lambda), max(crv$lambda),
length.out = approx_points)
}else{
pts2wt <- pst
}
if(method %in% eval(formals(density.default)$kernel)){
dens <- density(0, bw=1, kernel = method)
surv <- list(x = dens$x, y = (sum(dens$y) - cumsum(dens$y))/sum(dens$y))
box.vals <- graphics::boxplot(pst[share.idx], plot = FALSE)$stats
surv$x <- .scaleAB(surv$x, a = box.vals[1], b = box.vals[5])
if(box.vals[1]==box.vals[5]){
pct.l <- rep(0, length(pst))
}else{
pct.l <- approx(surv$x, surv$y, pts2wt, rule = 2,
ties = 'ordered')$y
}
}
if(method == 'tricube'){
tc <- function(x){ ifelse(abs(x) <= 1, (70/81)*((1-abs(x)^3)^3), 0) }
dens <- list(x = seq(-3,3,length.out = 512))
dens$y <- tc(dens$x)
surv <- list(x = dens$x, y = (sum(dens$y) - cumsum(dens$y))/sum(dens$y))
box.vals <- graphics::boxplot(pst[share.idx], plot = FALSE)$stats
surv$x <- .scaleAB(surv$x, a = box.vals[1], b = box.vals[5])
if(box.vals[1]==box.vals[5]){
pct.l <- rep(0, length(pst))
}else{
pct.l <- approx(surv$x, surv$y, pts2wt, rule = 2,
ties = 'ordered')$y
}
}
if(method == 'density'){
bw1 <- bw.SJ(pst)
bw2 <- bw.SJ(pst[share.idx])
bw <- (bw1 + bw2) / 2
d2 <- density(pst[share.idx], bw = bw,
weights = crv$w[share.idx]/sum(crv$w[share.idx]))
d1 <- density(pst, bw = bw, weights = crv$w/sum(crv$w))
scale <- sum(crv$w[share.idx]) / sum(crv$w)
pct.l <- (approx(d2$x,d2$y,xout = pts2wt, yleft = 0,
yright = 0, ties = mean)$y * scale) /
approx(d1$x,d1$y,xout = pts2wt, yleft = 0, yright = 0,
ties = mean)$y
pct.l[is.na(pct.l)] <- 0
pct.l <- .cumMin(pct.l, pts2wt)
}
return(pct.l)
}
.shrink_to_avg <- function(pcurve, avg.curve, pct, X, approx_points = FALSE,
stretch = 2){
n <- nrow(pcurve$s)
p <- ncol(pcurve$s)
if(approx_points > 0){
lam <- seq(min(pcurve$lambda), max(pcurve$lambda),
length.out = approx_points)
avlam <- seq(min(avg.curve$lambda), max(avg.curve$lambda),
length.out = approx_points)
}else{
lam <- pcurve$lambda
avlam <- avg.curve$lambda
}
s <- vapply(seq_len(p),function(jj){
orig.jj <- pcurve$s[,jj]
avg.jj <- approx(x = avlam, y = avg.curve$s[,jj], xout = lam,
rule = 2, ties = mean)$y
return(avg.jj * pct + orig.jj * (1-pct))
}, rep(0,n))
w <- pcurve$w
pcurve <- project_to_curve(X, as.matrix(s[pcurve$ord, ,drop = FALSE]),
stretch = stretch)
pcurve$w <- w
if(approx_points > 0){
xout_lambda <- seq(min(pcurve$lambda), max(pcurve$lambda),
length.out = approx_points)
pcurve$s <- apply(pcurve$s, 2, function(sjj){
return(approx(x = pcurve$lambda[pcurve$ord],
y = sjj[pcurve$ord],
xout = xout_lambda, ties = 'ordered')$y)
})
pcurve$ord <- seq_len(approx_points)
}
return(pcurve)
}
.under <- function(n, nodes){
which.lin <- strsplit(nodes, split='[,]')
nlins <- vapply(which.lin, length, 1)
out <- nodes[vapply(which.lin, function(wl){
all(wl %in% unlist(strsplit(n, split='[,]')))
}, FALSE)]
return(out[out != n])
}
################
### Datasets ###
################
#' @title Bifurcating lineages data
#' @name slingshotExample
#'
#' @usage data("slingshotExample")
#'
#' @description This simulated dataset contains a low-dimensional representation
#' of two bifurcating lineages (\code{rd}) and a vector of cluster labels
#' generated by k-means with \code{K = 5} (\code{cl}).
#'
#' @format \code{rd} is a matrix of coordinates in two dimensions, representing
#' 140 cells. \code{cl} is a numeric vector of 140 corresponding cluster
#' labels for each cell.
#' @source Simulated data provided with the \code{slingshot} package.
#'
#' @examples
#' data("slingshotExample")
#' rd <- slingshotExample$rd
#' cl <- slingshotExample$cl
#' slingshot(rd, cl)
"slingshotExample"
|
# -------------------------------------
# Post Processing for the TZA 2010
# data. Here we make some changes to
# the data to make it possible to
# perform further analysis. This file
# is separate from the raw data processing
# file in order to make transparent any
# changes that were made to the data
# -------------------------------------
if(Sys.info()["user"] == "Tomas"){
path2Data <- "C:/Users/Tomas/Documents/LEI/pro-gap/TZA/"
} else {
path2Data <- "N:/Internationaal Beleid (IB)/Projecten/2285000066 Africa Maize Yield Gap/SurveyData/Code/TZA"
}
# source the data
suppressMessages(source(file.path(path2Data, "/TZA_2010.R")))
# -------------------------------------
# For some questions respondents answered
# NA, it is not certain how these responses
# should be treated. Often we assume that
# an NA is equivalent to NO/0
# -------------------------------------
TZA2010$SACCO <- ifelse(TZA2010$SACCO %in% 1, 1, 0) # assume NA -> no SACCO
TZA2010$death <- ifelse(TZA2010$death %in% 1, 1, 0) # assume NA -> no death
TZA2010$one_crop <- ifelse(TZA2010$one_crop %in% 1, 1, 0) # assume NA -> no crops
TZA2010$inter_crop <- ifelse(TZA2010$inter_crop %in% 1, 1, 0) # assume NA -> no intercropping
TZA2010$hybrd <- ifelse(TZA2010$hybrd %in% 2, 1, 0) # assume NA -> no hybrid seeds
TZA2010$title <- ifelse(TZA2010$title %in% 1, 1, 0) # assume NA -> no title
TZA2010$irrig <- ifelse(TZA2010$irrig %in% 1, 1, 0) # assume NA -> no irrigation
TZA2010$manure <- ifelse(TZA2010$manure %in% 1, 1, 0) # assume NA -> no manure
TZA2010$N <- ifelse(is.na(TZA2010$N), 0, TZA2010$N) # assume NA -> no nitrogen
TZA2010$P <- ifelse(is.na(TZA2010$P), 0, TZA2010$P) # assume NA -> no Phosphorous
TZA2010$pest <- ifelse(TZA2010$pest %in% 1, 1, 0) # assume NA -> no pesticide
TZA2010$trans <- ifelse(TZA2010$trans %in% 1, 1, 0) # assume NA -> no transportation for crop
rm("path2Data")
| /TZA/TZA_2010PP.R | no_license | tom13878/pro-gap | R | false | false | 1,881 | r | # -------------------------------------
# Post Processing for the TZA 2010
# data. Here we make some changes to
# the data to make it possible to
# perform further analysis. This file
# is separate from the raw data processing
# file in order to make transparent any
# changes that were made to the data
# -------------------------------------
if(Sys.info()["user"] == "Tomas"){
path2Data <- "C:/Users/Tomas/Documents/LEI/pro-gap/TZA/"
} else {
path2Data <- "N:/Internationaal Beleid (IB)/Projecten/2285000066 Africa Maize Yield Gap/SurveyData/Code/TZA"
}
# source the data
suppressMessages(source(file.path(path2Data, "/TZA_2010.R")))
# -------------------------------------
# For some questions respondents answered
# NA, it is not certain how these responses
# should be treated. Often we assume that
# an NA is equivalent to NO/0
# -------------------------------------
TZA2010$SACCO <- ifelse(TZA2010$SACCO %in% 1, 1, 0) # assume NA -> no SACCO
TZA2010$death <- ifelse(TZA2010$death %in% 1, 1, 0) # assume NA -> no death
TZA2010$one_crop <- ifelse(TZA2010$one_crop %in% 1, 1, 0) # assume NA -> no crops
TZA2010$inter_crop <- ifelse(TZA2010$inter_crop %in% 1, 1, 0) # assume NA -> no intercropping
TZA2010$hybrd <- ifelse(TZA2010$hybrd %in% 2, 1, 0) # assume NA -> no hybrid seeds
TZA2010$title <- ifelse(TZA2010$title %in% 1, 1, 0) # assume NA -> no title
TZA2010$irrig <- ifelse(TZA2010$irrig %in% 1, 1, 0) # assume NA -> no irrigation
TZA2010$manure <- ifelse(TZA2010$manure %in% 1, 1, 0) # assume NA -> no manure
TZA2010$N <- ifelse(is.na(TZA2010$N), 0, TZA2010$N) # assume NA -> no nitrogen
TZA2010$P <- ifelse(is.na(TZA2010$P), 0, TZA2010$P) # assume NA -> no Phosphorous
TZA2010$pest <- ifelse(TZA2010$pest %in% 1, 1, 0) # assume NA -> no pesticide
TZA2010$trans <- ifelse(TZA2010$trans %in% 1, 1, 0) # assume NA -> no transportation for crop
rm("path2Data")
|
library(tsne) # For 2D spatial arrangement of points
library(rjson) # For saving/loading xy coordinates in json
# using the development version of ggvis from github.
# install with devtools package and `devtools::install_github("rstudio/ggvis")`
library(ggvis)
# Data import -------------------------------------------------------------
full_tab = read.table(
"data/table.tsv",
header = TRUE,
sep = "\t",
stringsAsFactors = FALSE
)
# Load broad categories
raw_categories = read.csv(
"data/16topics.csv",
header = TRUE,
stringsAsFactors = FALSE
)
categories = structure(raw_categories[[2]], names = raw_categories[[1]])
# Load narrow categories
subcategories = read.csv(
"data/87topics.csv",
header = FALSE,
stringsAsFactors = FALSE
)[,2]
# X-Y coordinates ---------------------------------------------------------
# # Hellinger distance is Euclidean distance of sqrt(p).
# # Taking the square root makes the difference between .001 and .002 matter more
# # than the difference between .501 and .502
# # Distances are calculated using the lda100 columns.
# distances = dist(sqrt(full_tab[, grep("^lda100", colnames(full_tab))]))
#
# # Compute and save t-SNE for 2-D position/layout.
# # t-SNE tries to ensure that similar documents are close together, but doesn't
# # care about precisely how far apart dissimilar documents are.
# # Makes nice clusters.
# xy = tsne(distances, whiten = TRUE, min_cost = 1.5)
#
# # Save coordinates in json for later javascript extraction
# write(
# toJSON(
# structure(as.data.frame(t(xy)), names = as.character(full_tab$primaryKey))
# ),
# file = "data/xy.json"
# )
#
# Load xy coordinates and convert from json
xy = do.call(rbind, fromJSON(file = "data/xy.json"))
# Combine data sources ----------------------------------------------------
dat = cbind(
structure(as.data.frame(xy), names = c("x", "y")),
full_tab,
title = paste0(full_tab$TI, full_tab$Title_for_TM),
id = 1:nrow(full_tab)
)
# Make topic ID columns into factors
dat$maxtopic100selected_id = as.factor(dat$maxtopic100selected_id)
dat$maxtopic20selected_id = as.factor(dat$maxtopic20selected_id)
# Build the tooltips ------------------------------------------------------
# Based loosly on koundy's Stack Overflow answer at
# http://stackoverflow.com/a/24528087/783153
tooltip <- function(x) {
if(is.null(x)){
return(NULL)}
else{
# Identify the row of `dat` corresponding to the user-selected point using
# the`id` column.
row <- dat[dat$id == x$id, ]
# Paste together an HTML string for the tooltip to render.
paste0(
"<i><b>",
row$title,
"</b></i><br>Group ",
row$maxtopic20selected_id,
": <b>",
names(categories)[as.integer(as.character(row$maxtopic20selected_id))],
"</b><br>Subgroup ",
row$maxtopic100selected_id,
": <b>",
subcategories[row$maxtopic100selected_id],
'</b><br><a href = "www.google.com">link.</a>'
)
}
}
# Build the plot ----------------------------------------------------------
avg_size = 10 # The average point should be this large
floor_size = 2 # Points should all be at least this large
# The %>% ("pipe") operator lets us chain a bunch of commands together.
# Start with the data, hand it to ggvis(), then hand results to a function that
# sets up the points, then hand results to a function that removes the legend,
# then to a function that sets up the tooltips on hover.
dat %>%
ggvis(
x = ~x,
y = ~y,
fill = ~maxtopic20selected_id,
shape = ~maxtopic20selected_id,
key := ~id,
stroke := "white",
strokeWidth := .5
) %>%
layer_points(
size := input_checkboxgroup(
choices = categories,
map = function(x){
# Mapping from checkbox to point size
if(length(x) == 0){
return(avg_size)
}
# Determine how closely affiliated each point is with the selected boxes
affinities = lapply(
x,
function(check){
# Pull out the relevant columns
columns = grep(
paste0("lda020selected_topicWeights_\\.?", check, "$"),
colnames(dat)
)
dat[ , columns]
}
)
# Multiply affinities together to identify points that are affiliated
# with all the checked boxes.
products = apply(do.call(cbind, affinities), 1, prod)
# Return a vector of sizes (areas) for all the points
floor_size + products / mean(products) * (avg_size - floor_size)
}
)
) %>%
hide_legend(scales = c("shape", "fill")) %>%
add_tooltip(tooltip, on = "hover") # On hover, call the `tooltip` fn
| /R/ggvis.R | permissive | lwasser/TopicViz | R | false | false | 4,721 | r | library(tsne) # For 2D spatial arrangement of points
library(rjson) # For saving/loading xy coordinates in json
# using the development version of ggvis from github.
# install with devtools package and `devtools::install_github("rstudio/ggvis")`
library(ggvis)
# Data import -------------------------------------------------------------
full_tab = read.table(
"data/table.tsv",
header = TRUE,
sep = "\t",
stringsAsFactors = FALSE
)
# Load broad categories
raw_categories = read.csv(
"data/16topics.csv",
header = TRUE,
stringsAsFactors = FALSE
)
categories = structure(raw_categories[[2]], names = raw_categories[[1]])
# Load narrow categories
subcategories = read.csv(
"data/87topics.csv",
header = FALSE,
stringsAsFactors = FALSE
)[,2]
# X-Y coordinates ---------------------------------------------------------
# # Hellinger distance is Euclidean distance of sqrt(p).
# # Taking the square root makes the difference between .001 and .002 matter more
# # than the difference between .501 and .502
# # Distances are calculated using the lda100 columns.
# distances = dist(sqrt(full_tab[, grep("^lda100", colnames(full_tab))]))
#
# # Compute and save t-SNE for 2-D position/layout.
# # t-SNE tries to ensure that similar documents are close together, but doesn't
# # care about precisely how far apart dissimilar documents are.
# # Makes nice clusters.
# xy = tsne(distances, whiten = TRUE, min_cost = 1.5)
#
# # Save coordinates in json for later javascript extraction
# write(
# toJSON(
# structure(as.data.frame(t(xy)), names = as.character(full_tab$primaryKey))
# ),
# file = "data/xy.json"
# )
#
# Load xy coordinates and convert from json
xy = do.call(rbind, fromJSON(file = "data/xy.json"))
# Combine data sources ----------------------------------------------------
dat = cbind(
structure(as.data.frame(xy), names = c("x", "y")),
full_tab,
title = paste0(full_tab$TI, full_tab$Title_for_TM),
id = 1:nrow(full_tab)
)
# Make topic ID columns into factors
dat$maxtopic100selected_id = as.factor(dat$maxtopic100selected_id)
dat$maxtopic20selected_id = as.factor(dat$maxtopic20selected_id)
# Build the tooltips ------------------------------------------------------
# Based loosly on koundy's Stack Overflow answer at
# http://stackoverflow.com/a/24528087/783153
tooltip <- function(x) {
if(is.null(x)){
return(NULL)}
else{
# Identify the row of `dat` corresponding to the user-selected point using
# the`id` column.
row <- dat[dat$id == x$id, ]
# Paste together an HTML string for the tooltip to render.
paste0(
"<i><b>",
row$title,
"</b></i><br>Group ",
row$maxtopic20selected_id,
": <b>",
names(categories)[as.integer(as.character(row$maxtopic20selected_id))],
"</b><br>Subgroup ",
row$maxtopic100selected_id,
": <b>",
subcategories[row$maxtopic100selected_id],
'</b><br><a href = "www.google.com">link.</a>'
)
}
}
# Build the plot ----------------------------------------------------------
avg_size = 10 # The average point should be this large
floor_size = 2 # Points should all be at least this large
# The %>% ("pipe") operator lets us chain a bunch of commands together.
# Start with the data, hand it to ggvis(), then hand results to a function that
# sets up the points, then hand results to a function that removes the legend,
# then to a function that sets up the tooltips on hover.
dat %>%
ggvis(
x = ~x,
y = ~y,
fill = ~maxtopic20selected_id,
shape = ~maxtopic20selected_id,
key := ~id,
stroke := "white",
strokeWidth := .5
) %>%
layer_points(
size := input_checkboxgroup(
choices = categories,
map = function(x){
# Mapping from checkbox to point size
if(length(x) == 0){
return(avg_size)
}
# Determine how closely affiliated each point is with the selected boxes
affinities = lapply(
x,
function(check){
# Pull out the relevant columns
columns = grep(
paste0("lda020selected_topicWeights_\\.?", check, "$"),
colnames(dat)
)
dat[ , columns]
}
)
# Multiply affinities together to identify points that are affiliated
# with all the checked boxes.
products = apply(do.call(cbind, affinities), 1, prod)
# Return a vector of sizes (areas) for all the points
floor_size + products / mean(products) * (avg_size - floor_size)
}
)
) %>%
hide_legend(scales = c("shape", "fill")) %>%
add_tooltip(tooltip, on = "hover") # On hover, call the `tooltip` fn
|
# Install packages if we need it
if (!require("corrplot")) install.packages("corrplot")
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("dplyr")) install.packages("dplyr")
if (!require("stringr")) install.packages("stringr")
if (!require("tidyr")) install.packages("tidyr")
if (!require("gridExtra"))install.packages("gridExtra")
if (!require("caret")) install.packages("caret")
# Load librarys
library (corrplot)
library (ggplot2)
library (dplyr)
library (stringr)
library (tidyr)
library (gridExtra)
library (caret)
# Clear global environment
rm(list = ls())
# Receving names of file in dir "data_Q3_2016"
file_names <- list.files(path="data_Q3_2016",pattern="*.csv")
# Set up current dir
setwd("data_Q3_2016")
# Clear data
dataset <- NULL
# Read all other files
for(file_name in file_names){
# Read next file
data <- read.csv(file_name)
# Merger data
dataset <- rbind.data.frame(dataset,data)
}
# Write appended files
write.csv(dataset,"hard_drive_short.csv",quote=FALSE,row.names=FALSE)
# Remove dataset,data
remove(dataset)
remove(data)
# Read database
dataset <- read.csv("hard_drive_short.csv")
View(dataset)
# Choose columns for work
hard_drive <- dataset[c("date","serial_number","model","capacity_bytes",
"failure","smart_9_raw","smart_9_normalized")]
remove(dataset)
View(hard_drive)
# Temporary work database for work
test_hard_drive <- hard_drive[c("date","model","failure")]
# Normalized capacity
test_hard_drive$T_capacity_bytes <- hard_drive$capacity_bytes/ 1e12
# For sure that model is character type
test_hard_drive$model = as.character(test_hard_drive$model)
# Spliting "model"="firm"+"model_number"
# Insert " " before first digit in a test_hard_drive$model
t1 <- str_replace(test_hard_drive$model, pattern = "[0-9]",
paste(" ",str_extract(test_hard_drive$model, pattern = "[0-9]") ))
# Spliting each model on two parts
# Spliting symbol is first " "
t2 <- str_split_fixed(t1,pattern=" ",n=2)
# Add two new colums to test_hard_drive
# Firm is firm name
# model_number is the number of hard drive model
test_hard_drive <- mutate(test_hard_drive,firm = t2[,1],
model_number = t2[,2])
# Deleting extra " " in model_number
test_hard_drive$model_number <- str_replace(test_hard_drive$model_number,pattern = " ","")
View(test_hard_drive)
# Form factor "firm" and "failure"
test_hard_drive$firm <- as.factor(test_hard_drive$firm)
test_hard_drive$failure <- as.factor(test_hard_drive$failure)
# Structure
str(test_hard_drive)
# Dependence capacity (in TB) on firm
# Facet - failure (0 - work, 1 - not) (Picture)
ggplot(test_hard_drive, aes(x=firm,y=T_capacity_bytes,color=firm))+geom_jitter()+facet_wrap(~failure,nrow=2,scales = "free")
# Dependence number of record on each firm +
# Facet on failure (0 - work, 1 - not) (Picture)
ggplot(test_hard_drive, aes(firm, fill = firm ) ) + geom_bar()+facet_wrap(~failure,nrow=2,scales = "free")
# Number of record for each firm (table)
firms <- count (test_hard_drive,firm,failure)
View(firms)
| /hard_drive.R | no_license | MiG-Kharkov/Hard_Drive | R | false | false | 3,226 | r | # Install packages if we need it
if (!require("corrplot")) install.packages("corrplot")
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("dplyr")) install.packages("dplyr")
if (!require("stringr")) install.packages("stringr")
if (!require("tidyr")) install.packages("tidyr")
if (!require("gridExtra"))install.packages("gridExtra")
if (!require("caret")) install.packages("caret")
# Load librarys
library (corrplot)
library (ggplot2)
library (dplyr)
library (stringr)
library (tidyr)
library (gridExtra)
library (caret)
# Clear global environment
rm(list = ls())
# Receving names of file in dir "data_Q3_2016"
file_names <- list.files(path="data_Q3_2016",pattern="*.csv")
# Set up current dir
setwd("data_Q3_2016")
# Clear data
dataset <- NULL
# Read all other files
for(file_name in file_names){
# Read next file
data <- read.csv(file_name)
# Merger data
dataset <- rbind.data.frame(dataset,data)
}
# Write appended files
write.csv(dataset,"hard_drive_short.csv",quote=FALSE,row.names=FALSE)
# Remove dataset,data
remove(dataset)
remove(data)
# Read database
dataset <- read.csv("hard_drive_short.csv")
View(dataset)
# Choose columns for work
hard_drive <- dataset[c("date","serial_number","model","capacity_bytes",
"failure","smart_9_raw","smart_9_normalized")]
remove(dataset)
View(hard_drive)
# Temporary work database for work
test_hard_drive <- hard_drive[c("date","model","failure")]
# Normalized capacity
test_hard_drive$T_capacity_bytes <- hard_drive$capacity_bytes/ 1e12
# For sure that model is character type
test_hard_drive$model = as.character(test_hard_drive$model)
# Spliting "model"="firm"+"model_number"
# Insert " " before first digit in a test_hard_drive$model
t1 <- str_replace(test_hard_drive$model, pattern = "[0-9]",
paste(" ",str_extract(test_hard_drive$model, pattern = "[0-9]") ))
# Spliting each model on two parts
# Spliting symbol is first " "
t2 <- str_split_fixed(t1,pattern=" ",n=2)
# Add two new colums to test_hard_drive
# Firm is firm name
# model_number is the number of hard drive model
test_hard_drive <- mutate(test_hard_drive,firm = t2[,1],
model_number = t2[,2])
# Deleting extra " " in model_number
test_hard_drive$model_number <- str_replace(test_hard_drive$model_number,pattern = " ","")
View(test_hard_drive)
# Form factor "firm" and "failure"
test_hard_drive$firm <- as.factor(test_hard_drive$firm)
test_hard_drive$failure <- as.factor(test_hard_drive$failure)
# Structure
str(test_hard_drive)
# Dependence capacity (in TB) on firm
# Facet - failure (0 - work, 1 - not) (Picture)
ggplot(test_hard_drive, aes(x=firm,y=T_capacity_bytes,color=firm))+geom_jitter()+facet_wrap(~failure,nrow=2,scales = "free")
# Dependence number of record on each firm +
# Facet on failure (0 - work, 1 - not) (Picture)
ggplot(test_hard_drive, aes(firm, fill = firm ) ) + geom_bar()+facet_wrap(~failure,nrow=2,scales = "free")
# Number of record for each firm (table)
firms <- count (test_hard_drive,firm,failure)
View(firms)
|
# Copyright (C) 2009
# Sebastien Dejean, Institut de Mathematiques, Universite de Toulouse et CNRS (UMR 5219), France
# Ignacio Gonzalez, Genopole Toulouse Midi-Pyrenees, France
# Kim-Anh Le Cao, French National Institute for Agricultural Research and
# ARC Centre of Excellence ins Bioinformatics, Institute for Molecular Bioscience, University of Queensland, Australia
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
nipals <-
function (X, ncomp = 1, reconst = FALSE, max.iter = 500, tol = 1e-09)
{
#-- X matrix
if (is.data.frame(X)) X = as.matrix(X)
if (!is.matrix(X) || is.character(X))
stop("'X' must be a numeric matrix.", call. = FALSE)
if (any(apply(X, 1, is.infinite)))
stop("infinite values in 'X'.", call. = FALSE)
nc = ncol(X)
nr = nrow(X)
#-- put a names on the rows and columns of X --#
X.names = colnames(X)
if (is.null(X.names)) X.names = paste("V", 1:ncol(X), sep = "")
ind.names = rownames(X)
if (is.null(ind.names)) ind.names = 1:nrow(X)
#-- ncomp
if (is.null(ncomp) || !is.numeric(ncomp) || ncomp < 1 || !is.finite(ncomp))
stop("invalid value for 'ncomp'.", call. = FALSE)
ncomp = round(ncomp)
#-- reconst
if (!is.logical(reconst))
stop("'reconst' must be a logical constant (TRUE or FALSE).",
call. = FALSE)
#-- max.iter
if (is.null(max.iter) || max.iter < 1 || !is.finite(max.iter))
stop("invalid value for 'max.iter'.", call. = FALSE)
max.iter = round(max.iter)
#-- tol
if (is.null(tol) || tol < 0 || !is.finite(tol))
stop("invalid value for 'tol'.", call. = FALSE)
#-- end checking --#
#------------------#
#-- pca approach -----------------------------------------------------------#
#---------------------------------------------------------------------------#
#-- initialisation des matrices --#
p = matrix(nrow = nc, ncol = ncomp)
t.mat = matrix(nrow = nr, ncol = ncomp)
eig = vector("numeric", length = ncomp)
nc.ones = rep(1, nc)
nr.ones = rep(1, nr)
is.na.X = is.na(X)
na.X = FALSE
if (any(is.na.X)) na.X = TRUE
#-- boucle sur h --#
for (h in 1:ncomp) {
th = X[, which.max(apply(X, 2, var, na.rm = TRUE))]
if (any(is.na(th))) th[is.na(th)] = 0
ph.old = rep(1 / sqrt(nc), nc)
ph.new = vector("numeric", length = nc)
iter = 1
diff = 1
if (na.X) {
X.aux = X
X.aux[is.na.X] = 0
}
while (diff > tol & iter <= max.iter) {
if (na.X) {
ph.new = crossprod(X.aux, th)
Th = drop(th) %o% nc.ones
Th[is.na.X] = 0
th.cross = crossprod(Th)
ph.new = ph.new / diag(th.cross)
}
else {
ph.new = crossprod(X, th) / drop(crossprod(th))
}
ph.new = ph.new / drop(sqrt(crossprod(ph.new)))
if (na.X) {
th = X.aux %*% ph.new
P = drop(ph.new) %o% nr.ones
P[t(is.na.X)] = 0
ph.cross = crossprod(P)
th = th / diag(ph.cross)
}
else {
th = X %*% ph.new / drop(crossprod(ph.new))
}
diff = drop(sum((ph.new - ph.old)^2, na.rm = TRUE))
ph.old = ph.new
iter = iter + 1
}
if (iter > max.iter)
warning(paste("Maximum number of iterations reached for comp.", h))
X = X - th %*% t(ph.new)
p[, h] = ph.new
t.mat[, h] = th
eig[h] = sum(th * th, na.rm = TRUE)
}
eig = sqrt(eig)
t.mat = scale(t.mat, center = FALSE, scale = eig)
attr(t.mat, "scaled:scale") = NULL
result = list(eig = eig, p = p, t = t.mat)
if (reconst) {
X.hat = matrix(0, nrow = nr, ncol = nc)
for (h in 1:ncomp) {
X.hat = X.hat + eig[h] * t.mat[, h] %*% t(p[, h])
}
colnames(X.hat) = colnames(X)
rownames(X.hat) = rownames(X)
result$rec = X.hat
}
return(invisible(result))
}
| /mixOmics/R/nipals.R | no_license | ingted/R-Examples | R | false | false | 4,887 | r | # Copyright (C) 2009
# Sebastien Dejean, Institut de Mathematiques, Universite de Toulouse et CNRS (UMR 5219), France
# Ignacio Gonzalez, Genopole Toulouse Midi-Pyrenees, France
# Kim-Anh Le Cao, French National Institute for Agricultural Research and
# ARC Centre of Excellence ins Bioinformatics, Institute for Molecular Bioscience, University of Queensland, Australia
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
nipals <-
function (X, ncomp = 1, reconst = FALSE, max.iter = 500, tol = 1e-09)
{
#-- X matrix
if (is.data.frame(X)) X = as.matrix(X)
if (!is.matrix(X) || is.character(X))
stop("'X' must be a numeric matrix.", call. = FALSE)
if (any(apply(X, 1, is.infinite)))
stop("infinite values in 'X'.", call. = FALSE)
nc = ncol(X)
nr = nrow(X)
#-- put a names on the rows and columns of X --#
X.names = colnames(X)
if (is.null(X.names)) X.names = paste("V", 1:ncol(X), sep = "")
ind.names = rownames(X)
if (is.null(ind.names)) ind.names = 1:nrow(X)
#-- ncomp
if (is.null(ncomp) || !is.numeric(ncomp) || ncomp < 1 || !is.finite(ncomp))
stop("invalid value for 'ncomp'.", call. = FALSE)
ncomp = round(ncomp)
#-- reconst
if (!is.logical(reconst))
stop("'reconst' must be a logical constant (TRUE or FALSE).",
call. = FALSE)
#-- max.iter
if (is.null(max.iter) || max.iter < 1 || !is.finite(max.iter))
stop("invalid value for 'max.iter'.", call. = FALSE)
max.iter = round(max.iter)
#-- tol
if (is.null(tol) || tol < 0 || !is.finite(tol))
stop("invalid value for 'tol'.", call. = FALSE)
#-- end checking --#
#------------------#
#-- pca approach -----------------------------------------------------------#
#---------------------------------------------------------------------------#
#-- initialisation des matrices --#
p = matrix(nrow = nc, ncol = ncomp)
t.mat = matrix(nrow = nr, ncol = ncomp)
eig = vector("numeric", length = ncomp)
nc.ones = rep(1, nc)
nr.ones = rep(1, nr)
is.na.X = is.na(X)
na.X = FALSE
if (any(is.na.X)) na.X = TRUE
#-- boucle sur h --#
for (h in 1:ncomp) {
th = X[, which.max(apply(X, 2, var, na.rm = TRUE))]
if (any(is.na(th))) th[is.na(th)] = 0
ph.old = rep(1 / sqrt(nc), nc)
ph.new = vector("numeric", length = nc)
iter = 1
diff = 1
if (na.X) {
X.aux = X
X.aux[is.na.X] = 0
}
while (diff > tol & iter <= max.iter) {
if (na.X) {
ph.new = crossprod(X.aux, th)
Th = drop(th) %o% nc.ones
Th[is.na.X] = 0
th.cross = crossprod(Th)
ph.new = ph.new / diag(th.cross)
}
else {
ph.new = crossprod(X, th) / drop(crossprod(th))
}
ph.new = ph.new / drop(sqrt(crossprod(ph.new)))
if (na.X) {
th = X.aux %*% ph.new
P = drop(ph.new) %o% nr.ones
P[t(is.na.X)] = 0
ph.cross = crossprod(P)
th = th / diag(ph.cross)
}
else {
th = X %*% ph.new / drop(crossprod(ph.new))
}
diff = drop(sum((ph.new - ph.old)^2, na.rm = TRUE))
ph.old = ph.new
iter = iter + 1
}
if (iter > max.iter)
warning(paste("Maximum number of iterations reached for comp.", h))
X = X - th %*% t(ph.new)
p[, h] = ph.new
t.mat[, h] = th
eig[h] = sum(th * th, na.rm = TRUE)
}
eig = sqrt(eig)
t.mat = scale(t.mat, center = FALSE, scale = eig)
attr(t.mat, "scaled:scale") = NULL
result = list(eig = eig, p = p, t = t.mat)
if (reconst) {
X.hat = matrix(0, nrow = nr, ncol = nc)
for (h in 1:ncomp) {
X.hat = X.hat + eig[h] * t.mat[, h] %*% t(p[, h])
}
colnames(X.hat) = colnames(X)
rownames(X.hat) = rownames(X)
result$rec = X.hat
}
return(invisible(result))
}
|
#install.packages('vars')
library("DataCombine")
library('ggplot2')
library("corrplot")
library("tidyverse")
library("dplyr")
library("openxlsx")
library("tseries")
library('fpp2') # For forecasting
library('dynlm') # To estimate ARDL models
library('urca') # For the Dickey Fuller test
library('corrplot')# For plotting correlation matrices
library('quadprog')# For quadratic optimization
library('forecast')
library('readxl') # To read Excel files
library('fpp2') # For forecasting
library('tseries') # To estimate ARMA models
library('dynlm') # To estimate ARDL models
library('urca') # For the Dickey Fuller test
library('corrplot')# For plotting correlation matrices
library('quadprog')# For quadratic optimization
library('forecast')# Lots of handy forecasting routines
library('vars') # VARs
library('zoo')
library('lubridate')
acf(data_1$sp500_52week_change)
acf(data_1$CCIw)
pacf(data_1$CCIw)
pacf(data_1$sp500_52week_change)
data_1 <- read.xlsx("WEI.xlsx", sheet = 2, detectDates = TRUE)
data <- read.xlsx("WEI.xlsx", sheet = 2, detectDates = TRUE)
sp500data <- read.csv("GSPC.csv")
sp500_newdata <- read.csv("sp500newdata.csv")
sp500data <- sp500data %>%
mutate(average_high_low = (High + Low) / 2)
sp500data <- sp500data %>%
mutate(average_open_close = (Open + Close) / 2)
sp500_newdata <- sp500_newdata %>%
mutate(average_open_close = (Open + Close) / 2)
data <- data %>% cbind(sp500data$average_open_close)
colnames(data)[11] <- "average_open_close"
BBchange <- PercChange(data = data, Var = "BB", NewVar = "BBchange")
BBchange <- BBchange$BBchange
data$BBchange <- BBchange
M1change <- PercChange(data = data, Var = "M1", NewVar = "M1change")
M1change <- M1change$M1change
data$M1change <- M1change
WEIchange <- PercChange(data = data, Var = "WEI", NewVar = 'WEIchange')
WEIchange <- WEIchange$WEIchange
data$WEIchange <- WEIchange
sp500_52week_change <- PercChange(data = sp500_newdata, Var = "average_open_close", NewVar = "sp500_52week_change", slideBy = -52)
sp500_52week_change <- sp500_52week_change$sp500_52week_change
sp500_52week_change <- sp500_52week_change[!is.na(sp500_52week_change)]
data_1$sp500_52week_change <- sp500_52week_change
sp_500_52week_diff <- diff(sp500_newdata$average_open_close, lag = 52)
data_1$sp_500_52week_diff <- sp_500_52week_diff
WEI <- ts(data_1$WEI, start = 2008, frequency = 52)
#read CSV file and obtain data from 2007-2020, with values around 0
CCI <- read.csv('CCI.csv')
CCI_data = CCI %>% slice(3:nrow(CCI)) %>% mutate(percentage = Value - 100)
CCI_2007 <- ts(CCI_data[,9],start = 2007,frequency=12)
#Take difference with respect to the value of last year
diff_CCI = diff(CCI_2007, 12)
diff_CCI = ts(as.vector(diff_CCI), start = 2008, frequency = 12)
# Merge low and high freq time series
lowfreq <- zoo(diff_CCI,time(diff_CCI))
highfreq <- zoo(WEI,time(WEI))
merged <- merge(lowfreq,highfreq)
# Approximate the NAs and output at the dates of the WEI
CCIw <- na.approx(merged$lowfreq, xout = time(WEI),rule=2)
CCIw <- ts(CCIw,start = 2008,frequency=52)
data_1$CCIw =as.vector(CCIw)
# Corrplot of all the relevant variables
correlation <- cor(select(data_1, 4:8, 11:13))
corrplot(correlation, method = "color", na.remove = TRUE)
#preparing all time series
WEI_365 <- ts(data_1$WEI, decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
CCIw_365 <- ts(data_1$CCIw, decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
sp500_52week_change_365 <- ts(data_1$sp500_52week_change, decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
sp_500_52week_diff_365 <- ts(data_1$sp_500_52week_diff, decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
noise<-ts(rnorm(length(CCIw_365))*sqrt(sd((CCIw_365)/100)),decimal_date(ymd("2008-01-05")),frequency=365.25/7)
CCIn <- CCIw_365+noise
#WEI <- ts(data_1$WEI, decimal_date(ymd("2008-01-05")), frequency = 52)
#CCIw <- ts(data_1$CCIw, decimal_date(ymd("2008-01-05")), frequency = 52)
#sp500_52week_change <- ts(data_1$sp500_52week_change, decimal_date(ymd("2008-01-05")), frequency = 52)
#sp_500_52week_diff <- ts(data_1$sp_500_52week_diff, decimal_date(ymd("2008-01-05")), frequency = 52)
#forecasting with arma
fit_1 <- Arima(WEI_365, order = c(2,0,3))
fARMA_1 <- forecast(fit_1,h=208)
autoplot(fARMA_1)
fit_2 <- Arima(WEI, order = c(5,0,4)) #figure 5
fARMA_2 <- forecast(fit_2,h=208)
autoplot(fARMA_2)
fit_3 = Arima(WEI, order = c(52,0,3), fixed=c(NA,NA,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,NA,NA,NA,NA,NA,NA))
fARMA_3 <- forecast(fit_3,h=208)
autoplot(fARMA_3)
#forecasting with VAR
Y <- cbind(WEI_365, CCIw_365 , sp500_52week_change_365 )
VAR3 <- VAR(Y,p=3,type = c('const'))
fVAR3 <- forecast(VAR3, h=208)
autoplot(fVAR3$forecast$WEI)
VAR3$varresult$WEI$coefficients
#comparing forecasts
autoplot(fARMA_1$mean,series="ARMA(2,3)")+ autolayer(fVAR4$forecast$WEI,series="VAR(4)")+labs(y="WEI")
#+L(CCIw_365,(1:4))
#ARDL model
ARDL4 <- dynlm(WEI_365 ~L(WEI_365,(1:4)) +L(sp500_52week_change_365 ,(1:4)) + L(CCIw_365,(1:4)))
summ<-summary(ARDL4)
print(summ$coefficients,digits=1)
Y <- cbind(WEI_365, CCIw_365, sp500_52week_change_365 )
VAR4 <- VAR(Y,p=3,type = c('const'))
corder1 <- order(names(VAR4$varresult$WEI$coefficients))
corder2 <- order(names(summ$coefficients[,1]))
coefVAR <- cbind(VAR4$varresult$WEI$coefficients[corder1],
summ$coefficients[corder2])
colnames(coefVAR)<- c("VAR(4)","ARDL(4,4,4)")
print(coefVAR,digits=3)
#in and out of sample ARMA
es <- as.Date("2008/1/5") # Estimation start
fs <- as.Date("2016/1/2") # First forecast
fe <- as.Date("2020/2/1")# Final forecast
maxARp <- 6 # Consider AR(p) models with p=1,...,maxARlag
# Helper function to get dates into helpful format c(yr,qtr)
convert_date <- function(date){
c(as.numeric(format(date,'%Y')),
ceiling(as.numeric(format(date,'%W'))))
# Use %W for weeks and do not divide by 3.
}
#MSE of the ARMA models
es <- as.Date("2008/1/5") # Estimation start
fs <- as.Date("2016/1/2") # First forecast
fe <- as.Date("2020/03/21")# Final forecast
convert_date <- function(date){
c(as.numeric(format(date,'%Y')),
ceiling(as.numeric(format(date,'%W'))))
# Use %W for weeks and do not divide by 3.
}
dates <- seq(fs,fe,by="week") # (or "week"...)
n <- length(dates) # number of forecasts
qF <- convert_date(fs)
qL <- convert_date(fe)
target <- window(WEI_365,start=qF,end=qL)
in_out_ARMA = function(hor, p, q){
fc <- ts(data=matrix(NA,n,1),start=qF,frequency=365.25/7)
fce <- ts(data=matrix(NA,n,1),start=qF,frequency=365.25/7)
for (i_d in seq(1,n)){
# Define estimation sample (ends h periods before 1st forecast)
# Start at the first forecast date,
# Then move back h+1 quarters back in time
est <- seq(dates[i_d],length=hor+1, by = "-1 week")[hor+1]
# Now define the data we can use to estimate the model
yest <- window(WEI_365,end=convert_date(est))
# Fit the AR models using Arima
fit <- Arima(yest,order=c(p,0,q)) #Fit model
fc[i_d,1] <- forecast(fit,h=hor)$mean[hor]#Get forecast
fce[i_d,1] <- fc[i_d,1]-target[i_d] #Get forecast error
}
results <- list()
results$fc <- fc
results$fce <- fce
results$target <- target
return(results)
}
h_all <- c(26,52,104) # Which horizons to consider
lh <- length(h_all)
mseARMA <- matrix(NA,lh,3) # Full sample
p = c(2,3,5)
q = c(3,0,4)
parameters = as.data.frame(cbind(p,q))
for (p in 1:3){
for (i in seq(1,lh)){
fcARMA <- in_out_ARMA(h_all[i],parameters[p,1],parameters[p,2])
mseARMA[i,p] <- colMeans(fcARMA$fce^2, na.rm = T)
}
}
rownames(mseARMA) <- c("26-step","52-step","104-step")
colnames(mseARMA) <- c('ARMA(2,3)','ARMA(3,0)','ARMA(5,4)')
mseARMA
# Absolute error
h_all <- c(26,52,104) # Which horizons to consider
lh <- length(h_all)
abeARMA <- matrix(NA,lh,3)
p = c(2,3,5)
q = c(3,0,4)
parameters = as.data.frame(cbind(p,q))
for (p in 1:3){
for (i in seq(1,lh)){
fcARMA <- in_out_ARMA(h_all[i],parameters[p,1],parameters[p,2])
abeARMA[i,p] <- colMeans(abs(fcARMA$fce), na.rm = T)
}
}
rownames(abeARMA) <- c("26-step","52-step","104-step")
colnames(abeARMA) <- c('ARMA(2,3)','ARMA(3,0)','ARMA(5,4)')
abeARMA
#IRF analysis
Y <- cbind(sp500_52week_change_365 , CCIw_365 , WEI_365)
colnames(Y) <- c('CCI','SP500', 'WEI' )
VARmodel <- VAR(Y,p=3,type=c("const"))
roots(VARmodel) # computes eigenvalues of companion matrix
irf_WEI <- irf(VARmodel,impulse=c("SP500"),
response=c("WEI"),ortho=T, n.ahead = 208)
plot(irf_WEI,plot.type=c("single"))
irf_CCI <- irf(VARmodel,impulse=c("SP500"),
response=c("CCI"),ortho=T, n.ahead = 208)
plot(irf_CCI,plot.type=c("single"))
irf_WEI_CCI <- irf(VARmodel,impulse=c("CCI"),
response=c("WEI"),ortho=T, n.ahead = 208)
plot(irf_WEI_CCI,plot.type=c("single"))
Y <- cbind(CCIw_365 , sp500_52week_change_365 , WEI_365)
colnames(Y) <- c('CCI', 'SP500', 'WEI')
VARmodel_ic <- VARselect(Y,type=c("const"),lag.max=8)
ic <- as.data.frame(t(VARmodel_ic$criteria))
ic
ggplot(data=ic, aes(x=seq(1,8),y=`SC(n)`))+geom_line()+ylab("BIC")+xlab("VAR(p)")
ggplot(data=ic, aes(x=seq(1,8),y=`AIC(n)`))+geom_line()+ylab("AIC")+xlab("VAR(p)")
#restricted VAR
p1 <- 6;
VARr <- VAR( Y,p=p1,type=c("const"))
nseries <- 3;
#mones <- matrix(1,nrow = nseries,ncol=nseries)
#mzero <- matrix(0,nrow = nseries,ncol=nseries)
vones <- matrix(1,nrow = nseries,ncol=1)
lag1mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE) # lag matrix cols = cci, sp500 and WEI. Rows are the same but indicate the equation. E.g. if [1,3] = 1 then the CCI equation will include lag 1 of the WEI
lag2mat <- matrix(c(0, 0, 0,
0, 0, 0,
0, 0, 0)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag3mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag4mat <- matrix(c(0, 0, 0,
0, 0, 0,
0, 0, 0)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag5mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag6mat <- matrix(c(0, 0, 0,
0, 0, 0,
0, 0, 0)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag7mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag8mat <- matrix(c(0, 0, 0,
0, 0, 0,
0, 0, 0)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag9mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE)
restrict <- matrix(cbind(lag1mat, lag2mat, lag3mat, lag4mat, lag5mat, lag6mat, vones), nrow = 3, ncol = p1*3+1) # order is: lag 1, ..., lag p and then the constant
VARr <- restrict(VARr, method = "man", resmat = restrict)
# Somehow BIC has to be calculated by hand
resid <- residuals(VARr)
T <- length(resid[,1])
BIC <- log(det(t(resid)%*%resid/T)) + (log(T)/T)*sum(restrict)
BIC
fVARr <- forecast(VARr, h=200)
autoplot(fVARr$forecast$WEI)
VARr$varresult$WEI$coefficients
# You can check that now the third lag is omitted by typing
summary(VARr)
roots(VARr)
irf_WEI <- irf(VARr,impulse=c("SP500"),
response=c("WEI"),ortho=T, n.ahead = 300)
plot(irf_WEI,plot.type=c("single"))
irf_CCI <- irf(VARr,impulse=c("SP500"),
response=c("CCI"),ortho=T, n.ahead = 300)
plot(irf_CCI,plot.type=c("single"))
irf_WEI_CCI <- irf(VARr,impulse=c("CCI"),
response=c("WEI"),ortho=T, n.ahead = 300)
plot(irf_WEI_CCI,plot.type=c("single"))
#Ftest <- matrix(NA,4,2)
#lags <- 4 # number of lags
#nvar <- 3 # number of variables
#for (i in seq(4)){
# y <- ardl.list[[i]]$residuals
# T <- length(y)
# # Fit ARDL models with and without lags of y
# fit1 <- dynlm(y ~ L(y,(1:lags)) + L(dgnp_T,(1:i)) + L(ddef_T,(1:i)) + L(ffr_T,(1:i)))
# fit2 <- dynlm(y ~ L(dgnp_T,(1:i)) + L(ddef_T,(1:i)) + L(ffr_T,(1:i)))
# SSR1 <- sum(fit1$residuals^2)
# SSR0 <- sum(fit2$residuals^2)
# Ftest[i,1] <- ((SSR0-SSR1)/lags)/(SSR1/(T-lags-nvar*i))
# Ftest[i,2] <- qf(0.95,lags,T-lags-nvar*i)
#}
#print(Ftest)
#
fit_1 <- Arima(WEI_365, order = c(2,0,3))
fARMA_1 <- forecast(fit_1,h=208)
autoplot(fARMA_1)
Y <- cbind(WEI_365, CCIw_365 , sp500_52week_change_365 )
VAR4 <- VAR(Y,p=3,type = c('const'))
fVAR4 <- forecast(VAR4, h=208)
autoplot(fVAR4$forecast$WEI)
VAR4$varresult$WEI$coefficients
fcombined = matrix(0,length(fARMA_1$mean),6)
for (i in 1:208){
fcombined[i,2] = 0.5*as.numeric(fVAR4$forecast$WEI_365$mean[i])+0.5*as.numeric(fARMA_1$mean[i])
fcombined[i,3] = 0.5*as.numeric(fVAR4$forecast$WEI_365$lower[i,1])+0.5*as.numeric(fARMA_1$lower[i,1])
fcombined[i,4] = 0.5*as.numeric(fVAR4$forecast$WEI_365$lower[i,2])+0.5*as.numeric(fARMA_1$lower[i,2])
fcombined[i,5] = 0.5*as.numeric(fVAR4$forecast$WEI_365$upper[i,1])+0.5*as.numeric(fARMA_1$upper[i,1])
fcombined[i,6] = 0.5*as.numeric(fVAR4$forecast$WEI_365$upper[i,2])+0.5*as.numeric(fARMA_1$upper[i,2])
}
combinedForecast_1 = ts( c(as.vector(WEI_365),fcombined[,2]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
combinedForecast_low1 = ts( c(as.vector(WEI_365),fcombined[,3]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
combinedForecast_low2 = ts( c(as.vector(WEI_365),fcombined[,4]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
combinedForecast_high1 = ts( c(as.vector(WEI_365),fcombined[,5]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
combinedForecast_high2 = ts( c(as.vector(WEI_365),fcombined[,6]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
ts.plot(combinedForecast_low1, combinedForecast_low2, combinedForecast_high1, combinedForecast_high2, combinedForecast_1,
col= c('#4842f5','#00b5af','#4842f5', '#00b5af','#000000'), ylab = 'WEI', main = 'Combined Var(3) and ARMA(2,3) froecasts')
legend('bottomleft', legend = c('95% low', '80 low', '95% high' ,'80% high','forecast'), col = c('#4842f5','#00b5af','#4842f5', '#00b5af','#000000'), lty=1)
fcombined2 = matrix(0,636,2)
for (i in 4:639){
fcombined2[i-3,2] = 0.5*as.numeric(VAR4$varresult$WEI_365$fitted.values[i-3])+0.5*as.numeric(fit_1$fitted[i])
}
residuals_combined = c()
for(i in 4:639){
residuals_combined[i-3] = as.vector(WEI_365)[i] - fcombined2[i-3,2]
}
SSR_c = sum(residuals_combined^2)
SSR_VAR = sum(as.numeric(VAR4$varresult$WEI_365$residuals)^2)
SSR_ARMA = sum(as.numeric(fit_1$residuals)[4:639]^2)
SSR = matrix(c(SSR_c, SSR_VAR, SSR_ARMA),1,3)
rownames(SSR) <- c("SSR")
colnames(SSR) <- c('Combined','VAR(3)','ARMA(2,3)')
SSR
| /forecasting.R | no_license | yannickpichardo/dynamicecon-report | R | false | false | 15,353 | r |
#install.packages('vars')
library("DataCombine")
library('ggplot2')
library("corrplot")
library("tidyverse")
library("dplyr")
library("openxlsx")
library("tseries")
library('fpp2') # For forecasting
library('dynlm') # To estimate ARDL models
library('urca') # For the Dickey Fuller test
library('corrplot')# For plotting correlation matrices
library('quadprog')# For quadratic optimization
library('forecast')
library('readxl') # To read Excel files
library('fpp2') # For forecasting
library('tseries') # To estimate ARMA models
library('dynlm') # To estimate ARDL models
library('urca') # For the Dickey Fuller test
library('corrplot')# For plotting correlation matrices
library('quadprog')# For quadratic optimization
library('forecast')# Lots of handy forecasting routines
library('vars') # VARs
library('zoo')
library('lubridate')
acf(data_1$sp500_52week_change)
acf(data_1$CCIw)
pacf(data_1$CCIw)
pacf(data_1$sp500_52week_change)
data_1 <- read.xlsx("WEI.xlsx", sheet = 2, detectDates = TRUE)
data <- read.xlsx("WEI.xlsx", sheet = 2, detectDates = TRUE)
sp500data <- read.csv("GSPC.csv")
sp500_newdata <- read.csv("sp500newdata.csv")
sp500data <- sp500data %>%
mutate(average_high_low = (High + Low) / 2)
sp500data <- sp500data %>%
mutate(average_open_close = (Open + Close) / 2)
sp500_newdata <- sp500_newdata %>%
mutate(average_open_close = (Open + Close) / 2)
data <- data %>% cbind(sp500data$average_open_close)
colnames(data)[11] <- "average_open_close"
BBchange <- PercChange(data = data, Var = "BB", NewVar = "BBchange")
BBchange <- BBchange$BBchange
data$BBchange <- BBchange
M1change <- PercChange(data = data, Var = "M1", NewVar = "M1change")
M1change <- M1change$M1change
data$M1change <- M1change
WEIchange <- PercChange(data = data, Var = "WEI", NewVar = 'WEIchange')
WEIchange <- WEIchange$WEIchange
data$WEIchange <- WEIchange
sp500_52week_change <- PercChange(data = sp500_newdata, Var = "average_open_close", NewVar = "sp500_52week_change", slideBy = -52)
sp500_52week_change <- sp500_52week_change$sp500_52week_change
sp500_52week_change <- sp500_52week_change[!is.na(sp500_52week_change)]
data_1$sp500_52week_change <- sp500_52week_change
sp_500_52week_diff <- diff(sp500_newdata$average_open_close, lag = 52)
data_1$sp_500_52week_diff <- sp_500_52week_diff
WEI <- ts(data_1$WEI, start = 2008, frequency = 52)
#read CSV file and obtain data from 2007-2020, with values around 0
CCI <- read.csv('CCI.csv')
CCI_data = CCI %>% slice(3:nrow(CCI)) %>% mutate(percentage = Value - 100)
CCI_2007 <- ts(CCI_data[,9],start = 2007,frequency=12)
#Take difference with respect to the value of last year
diff_CCI = diff(CCI_2007, 12)
diff_CCI = ts(as.vector(diff_CCI), start = 2008, frequency = 12)
# Merge low and high freq time series
lowfreq <- zoo(diff_CCI,time(diff_CCI))
highfreq <- zoo(WEI,time(WEI))
merged <- merge(lowfreq,highfreq)
# Approximate the NAs and output at the dates of the WEI
CCIw <- na.approx(merged$lowfreq, xout = time(WEI),rule=2)
CCIw <- ts(CCIw,start = 2008,frequency=52)
data_1$CCIw =as.vector(CCIw)
# Corrplot of all the relevant variables
correlation <- cor(select(data_1, 4:8, 11:13))
corrplot(correlation, method = "color", na.remove = TRUE)
#preparing all time series
WEI_365 <- ts(data_1$WEI, decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
CCIw_365 <- ts(data_1$CCIw, decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
sp500_52week_change_365 <- ts(data_1$sp500_52week_change, decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
sp_500_52week_diff_365 <- ts(data_1$sp_500_52week_diff, decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
noise<-ts(rnorm(length(CCIw_365))*sqrt(sd((CCIw_365)/100)),decimal_date(ymd("2008-01-05")),frequency=365.25/7)
CCIn <- CCIw_365+noise
#WEI <- ts(data_1$WEI, decimal_date(ymd("2008-01-05")), frequency = 52)
#CCIw <- ts(data_1$CCIw, decimal_date(ymd("2008-01-05")), frequency = 52)
#sp500_52week_change <- ts(data_1$sp500_52week_change, decimal_date(ymd("2008-01-05")), frequency = 52)
#sp_500_52week_diff <- ts(data_1$sp_500_52week_diff, decimal_date(ymd("2008-01-05")), frequency = 52)
#forecasting with arma
fit_1 <- Arima(WEI_365, order = c(2,0,3))
fARMA_1 <- forecast(fit_1,h=208)
autoplot(fARMA_1)
fit_2 <- Arima(WEI, order = c(5,0,4)) #figure 5
fARMA_2 <- forecast(fit_2,h=208)
autoplot(fARMA_2)
fit_3 = Arima(WEI, order = c(52,0,3), fixed=c(NA,NA,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,NA,NA,NA,NA,NA,NA))
fARMA_3 <- forecast(fit_3,h=208)
autoplot(fARMA_3)
#forecasting with VAR
Y <- cbind(WEI_365, CCIw_365 , sp500_52week_change_365 )
VAR3 <- VAR(Y,p=3,type = c('const'))
fVAR3 <- forecast(VAR3, h=208)
autoplot(fVAR3$forecast$WEI)
VAR3$varresult$WEI$coefficients
#comparing forecasts
autoplot(fARMA_1$mean,series="ARMA(2,3)")+ autolayer(fVAR4$forecast$WEI,series="VAR(4)")+labs(y="WEI")
#+L(CCIw_365,(1:4))
#ARDL model
ARDL4 <- dynlm(WEI_365 ~L(WEI_365,(1:4)) +L(sp500_52week_change_365 ,(1:4)) + L(CCIw_365,(1:4)))
summ<-summary(ARDL4)
print(summ$coefficients,digits=1)
Y <- cbind(WEI_365, CCIw_365, sp500_52week_change_365 )
VAR4 <- VAR(Y,p=3,type = c('const'))
corder1 <- order(names(VAR4$varresult$WEI$coefficients))
corder2 <- order(names(summ$coefficients[,1]))
coefVAR <- cbind(VAR4$varresult$WEI$coefficients[corder1],
summ$coefficients[corder2])
colnames(coefVAR)<- c("VAR(4)","ARDL(4,4,4)")
print(coefVAR,digits=3)
#in and out of sample ARMA
es <- as.Date("2008/1/5") # Estimation start
fs <- as.Date("2016/1/2") # First forecast
fe <- as.Date("2020/2/1")# Final forecast
maxARp <- 6 # Consider AR(p) models with p=1,...,maxARlag
# Helper function to get dates into helpful format c(yr,qtr)
convert_date <- function(date){
c(as.numeric(format(date,'%Y')),
ceiling(as.numeric(format(date,'%W'))))
# Use %W for weeks and do not divide by 3.
}
#MSE of the ARMA models
es <- as.Date("2008/1/5") # Estimation start
fs <- as.Date("2016/1/2") # First forecast
fe <- as.Date("2020/03/21")# Final forecast
convert_date <- function(date){
c(as.numeric(format(date,'%Y')),
ceiling(as.numeric(format(date,'%W'))))
# Use %W for weeks and do not divide by 3.
}
dates <- seq(fs,fe,by="week") # (or "week"...)
n <- length(dates) # number of forecasts
qF <- convert_date(fs)
qL <- convert_date(fe)
target <- window(WEI_365,start=qF,end=qL)
in_out_ARMA = function(hor, p, q){
fc <- ts(data=matrix(NA,n,1),start=qF,frequency=365.25/7)
fce <- ts(data=matrix(NA,n,1),start=qF,frequency=365.25/7)
for (i_d in seq(1,n)){
# Define estimation sample (ends h periods before 1st forecast)
# Start at the first forecast date,
# Then move back h+1 quarters back in time
est <- seq(dates[i_d],length=hor+1, by = "-1 week")[hor+1]
# Now define the data we can use to estimate the model
yest <- window(WEI_365,end=convert_date(est))
# Fit the AR models using Arima
fit <- Arima(yest,order=c(p,0,q)) #Fit model
fc[i_d,1] <- forecast(fit,h=hor)$mean[hor]#Get forecast
fce[i_d,1] <- fc[i_d,1]-target[i_d] #Get forecast error
}
results <- list()
results$fc <- fc
results$fce <- fce
results$target <- target
return(results)
}
h_all <- c(26,52,104) # Which horizons to consider
lh <- length(h_all)
mseARMA <- matrix(NA,lh,3) # Full sample
p = c(2,3,5)
q = c(3,0,4)
parameters = as.data.frame(cbind(p,q))
for (p in 1:3){
for (i in seq(1,lh)){
fcARMA <- in_out_ARMA(h_all[i],parameters[p,1],parameters[p,2])
mseARMA[i,p] <- colMeans(fcARMA$fce^2, na.rm = T)
}
}
rownames(mseARMA) <- c("26-step","52-step","104-step")
colnames(mseARMA) <- c('ARMA(2,3)','ARMA(3,0)','ARMA(5,4)')
mseARMA
# Absolute error
h_all <- c(26,52,104) # Which horizons to consider
lh <- length(h_all)
abeARMA <- matrix(NA,lh,3)
p = c(2,3,5)
q = c(3,0,4)
parameters = as.data.frame(cbind(p,q))
for (p in 1:3){
for (i in seq(1,lh)){
fcARMA <- in_out_ARMA(h_all[i],parameters[p,1],parameters[p,2])
abeARMA[i,p] <- colMeans(abs(fcARMA$fce), na.rm = T)
}
}
rownames(abeARMA) <- c("26-step","52-step","104-step")
colnames(abeARMA) <- c('ARMA(2,3)','ARMA(3,0)','ARMA(5,4)')
abeARMA
#IRF analysis
Y <- cbind(sp500_52week_change_365 , CCIw_365 , WEI_365)
colnames(Y) <- c('CCI','SP500', 'WEI' )
VARmodel <- VAR(Y,p=3,type=c("const"))
roots(VARmodel) # computes eigenvalues of companion matrix
irf_WEI <- irf(VARmodel,impulse=c("SP500"),
response=c("WEI"),ortho=T, n.ahead = 208)
plot(irf_WEI,plot.type=c("single"))
irf_CCI <- irf(VARmodel,impulse=c("SP500"),
response=c("CCI"),ortho=T, n.ahead = 208)
plot(irf_CCI,plot.type=c("single"))
irf_WEI_CCI <- irf(VARmodel,impulse=c("CCI"),
response=c("WEI"),ortho=T, n.ahead = 208)
plot(irf_WEI_CCI,plot.type=c("single"))
Y <- cbind(CCIw_365 , sp500_52week_change_365 , WEI_365)
colnames(Y) <- c('CCI', 'SP500', 'WEI')
VARmodel_ic <- VARselect(Y,type=c("const"),lag.max=8)
ic <- as.data.frame(t(VARmodel_ic$criteria))
ic
ggplot(data=ic, aes(x=seq(1,8),y=`SC(n)`))+geom_line()+ylab("BIC")+xlab("VAR(p)")
ggplot(data=ic, aes(x=seq(1,8),y=`AIC(n)`))+geom_line()+ylab("AIC")+xlab("VAR(p)")
#restricted VAR
p1 <- 6;
VARr <- VAR( Y,p=p1,type=c("const"))
nseries <- 3;
#mones <- matrix(1,nrow = nseries,ncol=nseries)
#mzero <- matrix(0,nrow = nseries,ncol=nseries)
vones <- matrix(1,nrow = nseries,ncol=1)
lag1mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE) # lag matrix cols = cci, sp500 and WEI. Rows are the same but indicate the equation. E.g. if [1,3] = 1 then the CCI equation will include lag 1 of the WEI
lag2mat <- matrix(c(0, 0, 0,
0, 0, 0,
0, 0, 0)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag3mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag4mat <- matrix(c(0, 0, 0,
0, 0, 0,
0, 0, 0)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag5mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag6mat <- matrix(c(0, 0, 0,
0, 0, 0,
0, 0, 0)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag7mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag8mat <- matrix(c(0, 0, 0,
0, 0, 0,
0, 0, 0)
,nrow = nseries,ncol=nseries, byrow = TRUE)
lag9mat <- matrix(c(1, 1, 1,
1, 1, 1,
1, 1, 1)
,nrow = nseries,ncol=nseries, byrow = TRUE)
restrict <- matrix(cbind(lag1mat, lag2mat, lag3mat, lag4mat, lag5mat, lag6mat, vones), nrow = 3, ncol = p1*3+1) # order is: lag 1, ..., lag p and then the constant
VARr <- restrict(VARr, method = "man", resmat = restrict)
# Somehow BIC has to be calculated by hand
resid <- residuals(VARr)
T <- length(resid[,1])
BIC <- log(det(t(resid)%*%resid/T)) + (log(T)/T)*sum(restrict)
BIC
fVARr <- forecast(VARr, h=200)
autoplot(fVARr$forecast$WEI)
VARr$varresult$WEI$coefficients
# You can check that now the third lag is omitted by typing
summary(VARr)
roots(VARr)
irf_WEI <- irf(VARr,impulse=c("SP500"),
response=c("WEI"),ortho=T, n.ahead = 300)
plot(irf_WEI,plot.type=c("single"))
irf_CCI <- irf(VARr,impulse=c("SP500"),
response=c("CCI"),ortho=T, n.ahead = 300)
plot(irf_CCI,plot.type=c("single"))
irf_WEI_CCI <- irf(VARr,impulse=c("CCI"),
response=c("WEI"),ortho=T, n.ahead = 300)
plot(irf_WEI_CCI,plot.type=c("single"))
#Ftest <- matrix(NA,4,2)
#lags <- 4 # number of lags
#nvar <- 3 # number of variables
#for (i in seq(4)){
# y <- ardl.list[[i]]$residuals
# T <- length(y)
# # Fit ARDL models with and without lags of y
# fit1 <- dynlm(y ~ L(y,(1:lags)) + L(dgnp_T,(1:i)) + L(ddef_T,(1:i)) + L(ffr_T,(1:i)))
# fit2 <- dynlm(y ~ L(dgnp_T,(1:i)) + L(ddef_T,(1:i)) + L(ffr_T,(1:i)))
# SSR1 <- sum(fit1$residuals^2)
# SSR0 <- sum(fit2$residuals^2)
# Ftest[i,1] <- ((SSR0-SSR1)/lags)/(SSR1/(T-lags-nvar*i))
# Ftest[i,2] <- qf(0.95,lags,T-lags-nvar*i)
#}
#print(Ftest)
#
fit_1 <- Arima(WEI_365, order = c(2,0,3))
fARMA_1 <- forecast(fit_1,h=208)
autoplot(fARMA_1)
Y <- cbind(WEI_365, CCIw_365 , sp500_52week_change_365 )
VAR4 <- VAR(Y,p=3,type = c('const'))
fVAR4 <- forecast(VAR4, h=208)
autoplot(fVAR4$forecast$WEI)
VAR4$varresult$WEI$coefficients
fcombined = matrix(0,length(fARMA_1$mean),6)
for (i in 1:208){
fcombined[i,2] = 0.5*as.numeric(fVAR4$forecast$WEI_365$mean[i])+0.5*as.numeric(fARMA_1$mean[i])
fcombined[i,3] = 0.5*as.numeric(fVAR4$forecast$WEI_365$lower[i,1])+0.5*as.numeric(fARMA_1$lower[i,1])
fcombined[i,4] = 0.5*as.numeric(fVAR4$forecast$WEI_365$lower[i,2])+0.5*as.numeric(fARMA_1$lower[i,2])
fcombined[i,5] = 0.5*as.numeric(fVAR4$forecast$WEI_365$upper[i,1])+0.5*as.numeric(fARMA_1$upper[i,1])
fcombined[i,6] = 0.5*as.numeric(fVAR4$forecast$WEI_365$upper[i,2])+0.5*as.numeric(fARMA_1$upper[i,2])
}
combinedForecast_1 = ts( c(as.vector(WEI_365),fcombined[,2]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
combinedForecast_low1 = ts( c(as.vector(WEI_365),fcombined[,3]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
combinedForecast_low2 = ts( c(as.vector(WEI_365),fcombined[,4]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
combinedForecast_high1 = ts( c(as.vector(WEI_365),fcombined[,5]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
combinedForecast_high2 = ts( c(as.vector(WEI_365),fcombined[,6]), decimal_date(ymd("2008-01-05")), frequency = 365.25/7)
ts.plot(combinedForecast_low1, combinedForecast_low2, combinedForecast_high1, combinedForecast_high2, combinedForecast_1,
col= c('#4842f5','#00b5af','#4842f5', '#00b5af','#000000'), ylab = 'WEI', main = 'Combined Var(3) and ARMA(2,3) froecasts')
legend('bottomleft', legend = c('95% low', '80 low', '95% high' ,'80% high','forecast'), col = c('#4842f5','#00b5af','#4842f5', '#00b5af','#000000'), lty=1)
fcombined2 = matrix(0,636,2)
for (i in 4:639){
fcombined2[i-3,2] = 0.5*as.numeric(VAR4$varresult$WEI_365$fitted.values[i-3])+0.5*as.numeric(fit_1$fitted[i])
}
residuals_combined = c()
for(i in 4:639){
residuals_combined[i-3] = as.vector(WEI_365)[i] - fcombined2[i-3,2]
}
SSR_c = sum(residuals_combined^2)
SSR_VAR = sum(as.numeric(VAR4$varresult$WEI_365$residuals)^2)
SSR_ARMA = sum(as.numeric(fit_1$residuals)[4:639]^2)
SSR = matrix(c(SSR_c, SSR_VAR, SSR_ARMA),1,3)
rownames(SSR) <- c("SSR")
colnames(SSR) <- c('Combined','VAR(3)','ARMA(2,3)')
SSR
|
## set the working directory and read the household data
setwd("~/GitHub/datasciencecoursera/datasciencecoursera/ExData_Plotting1")
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
data <- read.table(unz(temp, "household_power_consumption.txt"),sep=";", header=TRUE, na.strings="?")
unlink(temp)
## convert the character date into a proper date class
data$DateTime <- paste(data$Date, data$Time, sep=" ")
data$DateTime <- strptime(data$DateTime, format="%d/%m/%Y %H:%M:%S", tz="GMT")
## now select only the 2 days in Feb. 2007 we're looking for
begDate <- strptime(c("2007-02-01 00:00:00 GMT"), format=c("%Y-%m-%d %H:%M:%S"), tz="GMT")
endDate <- strptime(c("2007-02-03 00:00:00 GMT"), format=c("%Y-%m-%d %H:%M:%S"), tz="GMT")
data <- subset(data, DateTime >= begDate & DateTime < endDate)
##
## create plot 1
##
hist(data$Global_active_power, ylab = "Frequency",
main = "Global Active Power", xlab="Global Active Power (kilowats)",
col="red")
## copy my plot to a PNG file
dev.copy(png, file = "plot1.png", width=480, height=480)
dev.off() ## close the PNG device! | /plot1.R | no_license | candjmail/ExData_Plotting1 | R | false | false | 1,181 | r | ## set the working directory and read the household data
setwd("~/GitHub/datasciencecoursera/datasciencecoursera/ExData_Plotting1")
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
data <- read.table(unz(temp, "household_power_consumption.txt"),sep=";", header=TRUE, na.strings="?")
unlink(temp)
## convert the character date into a proper date class
data$DateTime <- paste(data$Date, data$Time, sep=" ")
data$DateTime <- strptime(data$DateTime, format="%d/%m/%Y %H:%M:%S", tz="GMT")
## now select only the 2 days in Feb. 2007 we're looking for
begDate <- strptime(c("2007-02-01 00:00:00 GMT"), format=c("%Y-%m-%d %H:%M:%S"), tz="GMT")
endDate <- strptime(c("2007-02-03 00:00:00 GMT"), format=c("%Y-%m-%d %H:%M:%S"), tz="GMT")
data <- subset(data, DateTime >= begDate & DateTime < endDate)
##
## create plot 1
##
hist(data$Global_active_power, ylab = "Frequency",
main = "Global Active Power", xlab="Global Active Power (kilowats)",
col="red")
## copy my plot to a PNG file
dev.copy(png, file = "plot1.png", width=480, height=480)
dev.off() ## close the PNG device! |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{stri_opts_collator}
\alias{stri_opts_collator}
\title{Generate a List with Collator Settings}
\usage{
stri_opts_collator(locale = NULL, strength = 3L,
alternate_shifted = FALSE, french = FALSE, uppercase_first = NA,
case_level = FALSE, normalization = FALSE, numeric = FALSE, ...)
}
\arguments{
\item{locale}{single string, \code{NULL} or
\code{""} for default locale}
\item{strength}{single integer in \{1,2,3,4\}, which defines collation strength;
\code{1} for the most permissive collation rules, \code{4} for the most
strict ones}
\item{alternate_shifted}{single logical value; \code{FALSE}
treats all the code points with non-ignorable primary weights in the same way,
\code{TRUE} causes code points with primary weights that are equal or below
the variable top value to be ignored on primary level and moved to the quaternary level}
\item{french}{single logical value; used in Canadian French;
\code{TRUE} results in secondary weights being considered backwards}
\item{uppercase_first}{single logical value; \code{NA}
orders upper and lower case letters in accordance to their tertiary weights,
\code{TRUE} forces upper case letters to sort before lower case letters,
\code{FALSE} does the opposite}
\item{case_level}{single logical value;
controls whether an extra case level (positioned before the third level) is generated or not}
\item{normalization}{single logical value; if \code{TRUE},
then incremental check is performed to see whether the input data is in
the FCD form. If the data is not in the FCD form, incremental NFD
normalization is performed}
\item{numeric}{single logical value;
when turned on, this attribute generates a collation key for
the numeric value of substrings of digits;
this is a way to get '100' to sort AFTER '2'}
\item{...}{any other arguments to this function are purposely ignored}
}
\value{
Returns a named list object; missing settings are left with default values.
}
\description{
A convenience function to tune the \pkg{ICU} Collator's behavior,
e.g. in \code{\link{stri_compare}}, \code{\link{stri_order}},
\code{\link{stri_unique}}, \code{\link{stri_duplicated}},
as well as \code{\link{stri_detect_coll}}
and other \link{stringi-search-coll} functions.
}
\details{
\pkg{ICU}'s \emph{collator} performs a locale-aware, natural-language
alike string comparison.
This is a more reliable way of establishing relationships between
string than that provided by base \R, and definitely
one that is more complex and appropriate than ordinary byte-comparison.
A note on collation \code{strength}:
generally, \code{strength} set to 4 is
the least permissive.
Set to 2 to ignore case differences.
Set to 1 to also ignore diacritical differences.
The strings are Unicode-normalized before the comparison.
}
\examples{
stri_cmp("number100", "number2")
stri_cmp("number100", "number2", opts_collator=stri_opts_collator(numeric=TRUE))
stri_cmp("number100", "number2", numeric=TRUE) # equivalent
stri_cmp("above mentioned", "above-mentioned")
stri_cmp("above mentioned", "above-mentioned", alternate_shifted=TRUE)
}
\references{
\emph{Collation} -- ICU User Guide,
\url{http://userguide.icu-project.org/collation}
\emph{ICU Collation Service Architecture} -- ICU User Guide,
\url{http://userguide.icu-project.org/collation/architecture}
\emph{\code{icu::Collator} Class Reference} -- ICU4C API Documentation,
\url{http://www.icu-project.org/apiref/icu4c/classicu_1_1Collator.html}
}
\seealso{
Other locale_sensitive: \code{\link{\%s!==\%}},
\code{\link{\%s!=\%}}, \code{\link{\%s<=\%}},
\code{\link{\%s<\%}}, \code{\link{\%s===\%}},
\code{\link{\%s==\%}}, \code{\link{\%s>=\%}},
\code{\link{\%s>\%}}, \code{\link{\%stri!==\%}},
\code{\link{\%stri!=\%}}, \code{\link{\%stri<=\%}},
\code{\link{\%stri<\%}}, \code{\link{\%stri===\%}},
\code{\link{\%stri==\%}}, \code{\link{\%stri>=\%}},
\code{\link{\%stri>\%}}; \code{\link{stri_cmp}},
\code{\link{stri_cmp_eq}}, \code{\link{stri_cmp_equiv}},
\code{\link{stri_cmp_ge}}, \code{\link{stri_cmp_gt}},
\code{\link{stri_cmp_le}}, \code{\link{stri_cmp_lt}},
\code{\link{stri_cmp_neq}},
\code{\link{stri_cmp_nequiv}},
\code{\link{stri_compare}};
\code{\link{stri_count_boundaries}},
\code{\link{stri_count_words}};
\code{\link{stri_duplicated}},
\code{\link{stri_duplicated_any}};
\code{\link{stri_enc_detect2}};
\code{\link{stri_extract_all_words}},
\code{\link{stri_extract_first_words}},
\code{\link{stri_extract_last_words}};
\code{\link{stri_locate_all_boundaries}},
\code{\link{stri_locate_all_words}},
\code{\link{stri_locate_first_boundaries}},
\code{\link{stri_locate_first_words}},
\code{\link{stri_locate_last_boundaries}},
\code{\link{stri_locate_last_words}};
\code{\link{stri_order}}, \code{\link{stri_sort}};
\code{\link{stri_split_boundaries}};
\code{\link{stri_trans_tolower}},
\code{\link{stri_trans_totitle}},
\code{\link{stri_trans_toupper}};
\code{\link{stri_unique}}; \code{\link{stri_wrap}};
\code{\link{stringi-locale}};
\code{\link{stringi-search-boundaries}};
\code{\link{stringi-search-coll}}
Other search_coll: \code{\link{stringi-search-coll}};
\code{\link{stringi-search}}
}
| /stringi/man/stri_opts_collator.Rd | permissive | jackieli123723/clearlinux | R | false | false | 5,229 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{stri_opts_collator}
\alias{stri_opts_collator}
\title{Generate a List with Collator Settings}
\usage{
stri_opts_collator(locale = NULL, strength = 3L,
alternate_shifted = FALSE, french = FALSE, uppercase_first = NA,
case_level = FALSE, normalization = FALSE, numeric = FALSE, ...)
}
\arguments{
\item{locale}{single string, \code{NULL} or
\code{""} for default locale}
\item{strength}{single integer in \{1,2,3,4\}, which defines collation strength;
\code{1} for the most permissive collation rules, \code{4} for the most
strict ones}
\item{alternate_shifted}{single logical value; \code{FALSE}
treats all the code points with non-ignorable primary weights in the same way,
\code{TRUE} causes code points with primary weights that are equal or below
the variable top value to be ignored on primary level and moved to the quaternary level}
\item{french}{single logical value; used in Canadian French;
\code{TRUE} results in secondary weights being considered backwards}
\item{uppercase_first}{single logical value; \code{NA}
orders upper and lower case letters in accordance to their tertiary weights,
\code{TRUE} forces upper case letters to sort before lower case letters,
\code{FALSE} does the opposite}
\item{case_level}{single logical value;
controls whether an extra case level (positioned before the third level) is generated or not}
\item{normalization}{single logical value; if \code{TRUE},
then incremental check is performed to see whether the input data is in
the FCD form. If the data is not in the FCD form, incremental NFD
normalization is performed}
\item{numeric}{single logical value;
when turned on, this attribute generates a collation key for
the numeric value of substrings of digits;
this is a way to get '100' to sort AFTER '2'}
\item{...}{any other arguments to this function are purposely ignored}
}
\value{
Returns a named list object; missing settings are left with default values.
}
\description{
A convenience function to tune the \pkg{ICU} Collator's behavior,
e.g. in \code{\link{stri_compare}}, \code{\link{stri_order}},
\code{\link{stri_unique}}, \code{\link{stri_duplicated}},
as well as \code{\link{stri_detect_coll}}
and other \link{stringi-search-coll} functions.
}
\details{
\pkg{ICU}'s \emph{collator} performs a locale-aware, natural-language
alike string comparison.
This is a more reliable way of establishing relationships between
string than that provided by base \R, and definitely
one that is more complex and appropriate than ordinary byte-comparison.
A note on collation \code{strength}:
generally, \code{strength} set to 4 is
the least permissive.
Set to 2 to ignore case differences.
Set to 1 to also ignore diacritical differences.
The strings are Unicode-normalized before the comparison.
}
\examples{
stri_cmp("number100", "number2")
stri_cmp("number100", "number2", opts_collator=stri_opts_collator(numeric=TRUE))
stri_cmp("number100", "number2", numeric=TRUE) # equivalent
stri_cmp("above mentioned", "above-mentioned")
stri_cmp("above mentioned", "above-mentioned", alternate_shifted=TRUE)
}
\references{
\emph{Collation} -- ICU User Guide,
\url{http://userguide.icu-project.org/collation}
\emph{ICU Collation Service Architecture} -- ICU User Guide,
\url{http://userguide.icu-project.org/collation/architecture}
\emph{\code{icu::Collator} Class Reference} -- ICU4C API Documentation,
\url{http://www.icu-project.org/apiref/icu4c/classicu_1_1Collator.html}
}
\seealso{
Other locale_sensitive: \code{\link{\%s!==\%}},
\code{\link{\%s!=\%}}, \code{\link{\%s<=\%}},
\code{\link{\%s<\%}}, \code{\link{\%s===\%}},
\code{\link{\%s==\%}}, \code{\link{\%s>=\%}},
\code{\link{\%s>\%}}, \code{\link{\%stri!==\%}},
\code{\link{\%stri!=\%}}, \code{\link{\%stri<=\%}},
\code{\link{\%stri<\%}}, \code{\link{\%stri===\%}},
\code{\link{\%stri==\%}}, \code{\link{\%stri>=\%}},
\code{\link{\%stri>\%}}; \code{\link{stri_cmp}},
\code{\link{stri_cmp_eq}}, \code{\link{stri_cmp_equiv}},
\code{\link{stri_cmp_ge}}, \code{\link{stri_cmp_gt}},
\code{\link{stri_cmp_le}}, \code{\link{stri_cmp_lt}},
\code{\link{stri_cmp_neq}},
\code{\link{stri_cmp_nequiv}},
\code{\link{stri_compare}};
\code{\link{stri_count_boundaries}},
\code{\link{stri_count_words}};
\code{\link{stri_duplicated}},
\code{\link{stri_duplicated_any}};
\code{\link{stri_enc_detect2}};
\code{\link{stri_extract_all_words}},
\code{\link{stri_extract_first_words}},
\code{\link{stri_extract_last_words}};
\code{\link{stri_locate_all_boundaries}},
\code{\link{stri_locate_all_words}},
\code{\link{stri_locate_first_boundaries}},
\code{\link{stri_locate_first_words}},
\code{\link{stri_locate_last_boundaries}},
\code{\link{stri_locate_last_words}};
\code{\link{stri_order}}, \code{\link{stri_sort}};
\code{\link{stri_split_boundaries}};
\code{\link{stri_trans_tolower}},
\code{\link{stri_trans_totitle}},
\code{\link{stri_trans_toupper}};
\code{\link{stri_unique}}; \code{\link{stri_wrap}};
\code{\link{stringi-locale}};
\code{\link{stringi-search-boundaries}};
\code{\link{stringi-search-coll}}
Other search_coll: \code{\link{stringi-search-coll}};
\code{\link{stringi-search}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doubleclickbidmanager_functions.R
\name{queries.listqueries}
\alias{queries.listqueries}
\title{Retrieves stored queries.}
\usage{
queries.listqueries()
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item
}
Set \code{options(googleAuthR.scopes.selected = c()}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
#' @importFrom googleAuthR gar_api_generator
}
\seealso{
\href{https://developers.google.com/bid-manager/}{Google Documentation}
}
| /googledoubleclickbidmanagerv1.auto/man/queries.listqueries.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 696 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doubleclickbidmanager_functions.R
\name{queries.listqueries}
\alias{queries.listqueries}
\title{Retrieves stored queries.}
\usage{
queries.listqueries()
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item
}
Set \code{options(googleAuthR.scopes.selected = c()}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
#' @importFrom googleAuthR gar_api_generator
}
\seealso{
\href{https://developers.google.com/bid-manager/}{Google Documentation}
}
|
# Copyright 2015-2015 Steven E. Pav. All Rights Reserved.
# Author: Steven E. Pav
#
# This file is part of PDQutils.
#
# PDQutils is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PDQutils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PDQutils. If not, see <http://www.gnu.org/licenses/>.
# Created: 2015.02.07
# Copyright: Steven E. Pav, 2015
# Author: Steven E. Pav
# Comments: Steven E. Pav
# for the Hermite Polynomials
require(orthopolynom)
require(moments)
# suppose raw.moments[k] is the kth raw moment of X;
# here we will compute the kth raw moment of X+del.
# n.b.
# E[(x+del)^k] = E[x^k + del choose(k,1) x^{k-1} + ... del^k ]
.shift_moments <- function(raw.moments,del) {
nmom <- length(raw.moments)-1
shf.moments <- raw.moments
shf.moments[2] <- raw.moments[2] + del
for (k in 2:nmom) {
tot <- 0
for (j in 0:k) {
tot <- tot + choose(k, j) * del^(k-j) * raw.moments[j+1]
}
shf.moments[k+1] <- tot
}
return(shf.moments)
}
# suppose raw.moments[k] is the kth raw moment of X;
# here we will compute the kth raw moment of a * X.
# n.b.
# E[(ax)^k] = a^k E[x^k]
.scale_moments <- function(raw.moments,k) {
nmom <- length(raw.moments)-1
scl.moments <- raw.moments * (k^(0:nmom))
return(scl.moments)
}
.gca_setup <- function(x,raw.moments,support=NULL, basis=c('normal','gamma','beta','arcsine','wigner'), basepar=NULL) {
basis <- tolower(match.arg(basis))
# the zeroth moment
raw.moments <- c(1,raw.moments)
# guess support:#FOLDUP
if (is.null(support)) {
support <- switch(basis,
"normal"=c(-Inf,Inf),
"gamma"=c(0,Inf),
"beta"=c(0,1),
"arcsine"=c(-1,1),
"wigner"=c(-1,1))
}#UNFOLD
support <- sort(support)
# make these special cases of beta:#FOLDUP
if (basis == 'arcsine') {
basepar = list(shape1=0.5,shape2=0.5)
basis = 'beta'
} else if (basis == 'wigner') {
basepar = list(shape1=1.5,shape2=1.5)
basis = 'beta'
}#UNFOLD
# shift, scale X, modify the moments, compute final scaling factor#FOLDUP
if (basis == 'normal') {
mu <- raw.moments[2]
sigma <- sqrt(raw.moments[3] - mu^2)
x <- (x - mu) / sigma
moments <- .shift_moments(raw.moments,-mu)
moments <- .scale_moments(moments,1/sigma)
scalby <- 1/sigma
support <- (support - mu)/sigma
} else if (basis == 'gamma') {
llim = min(support)
x <- (x - llim)
moments <- .shift_moments(raw.moments,-llim)
support <- (support - llim)
scalby <- 1
} else if (basis == 'beta') {
ulim = max(support)
llim = min(support)
mu <- 0.5 * (ulim + llim)
sigma <- 0.5 * (ulim - llim)
x <- (x - mu) / sigma
moments <- .shift_moments(raw.moments,-mu)
moments <- .scale_moments(moments,1/sigma)
scalby <- 1/sigma
support <- c(-1,1)
} else { stop('badCode') }#UNFOLD # nocov
# guess the base distribution parameters, from the moments?#FOLDUP
if (is.null(basepar)) {
if (basis == 'gamma') {
# first two uncentered moments for gamma are k theta and k theta^2 + (k theta)^2
theta <- (moments[3]/moments[2]) - moments[2]
k <- moments[2] / theta
basepar <- list(shape=k,scale=theta)
} else if (basis == 'beta') {
# compute a and b
mu <- moments[2]
s2 <- moments[3] - moments[2]^2
# shift back to [0,1]
mu <- (mu + 1) / 2
s2 <- s2 / 4
# second moment
mu2 <- s2 + mu^2
# solve for b, a
b <- (mu - mu2) * (1 - mu) / s2
a <- b * mu / (1-mu)
# n.b. the reverse
basepar <- list(shape2=b,shape1=a)
}
}#UNFOLD
# rescale gammas#FOLDUP
if (basis == 'gamma') {
x <- x / basepar$scale
moments <- .scale_moments(moments,1/basepar$scale)
scalby <- scalby / basepar$scale
support <- support / basepar$scale
basepar$scale <- 1
}#UNFOLD
order.max <- length(moments)-1
orders <- seq(0,order.max)
if (basis == 'normal') {
wt <- dnorm
# the orthogonal polynomials
poly <- orthopolynom::hermite.he.polynomials(order.max, normalized=FALSE)
hn <- factorial(orders)
intpoly <- c(function(y) { as.numeric(poly[[1]]) * pnorm(y) },
lapply(poly[1:(order.max)],function(pol) { function(y) { -dnorm(y) * as.function(pol)(y) } }) )
} else if (basis == 'gamma') {
alpha <- basepar$shape - 1
wt <- function(x) { dgamma(x,shape=alpha+1,scale=1) }
poly <- orthopolynom::glaguerre.polynomials(order.max, alpha, normalized=FALSE)
hn <- exp(lgamma(alpha + 1 + orders) - lgamma(alpha+1) - lfactorial(orders))
ipoly <- orthopolynom::glaguerre.polynomials(order.max-1, alpha+1, normalized=FALSE)
intpoly <- c(function(y) { as.numeric(poly[[1]]) * pgamma(y,shape=alpha+1,scale=1) },
lapply(1:(order.max),
function(idx) { function(y) { ((alpha+1)/idx) * dgamma(y,shape=alpha+2,scale=1) * as.function(ipoly[[idx]])(y) } }) )
} else if (basis == 'beta') {
palpha <- basepar$shape2 - 1
pbeta <- basepar$shape1 - 1
wt <- function(x) { 0.5 * dbeta(0.5 * (x+1),shape2=palpha+1,shape1=pbeta+1) }
poly <- orthopolynom::jacobi.p.polynomials(order.max, alpha=palpha, beta=pbeta, normalized=FALSE)
hn <- exp(lgamma(orders + palpha + 1) + lgamma(orders + pbeta + 1) - lfactorial(orders) - lgamma(orders + palpha + pbeta + 1) -
lbeta(palpha+1,pbeta+1) - log(2*orders+palpha+pbeta+1))
ipoly <- orthopolynom::jacobi.p.polynomials(order.max-1, alpha=palpha+1, beta=pbeta+1, normalized=FALSE)
intpoly <- c(function(y) { as.numeric(poly[[1]]) * pbeta(0.5 * (y+1),shape2=palpha+1,shape1=pbeta+1) },
lapply(1:(order.max),
function(idx) { function(y) {
(-2/idx) * exp(lbeta(palpha+2,pbeta+2) - lbeta(palpha+1,pbeta+1)) *
(0.5 * dbeta(0.5 * (x+1),shape1=palpha+2,shape2=pbeta+2)) *
as.function(ipoly[[idx]])(y) } }))
} else { stop(paste('badCode: distribution',basis,'unknown')) } # nocov
retval <- list(x=x,full_moments=moments,support=support,scalby=scalby,
order.max=order.max,orders=orders,
wt=wt,poly=poly,hn=hn,intpoly=intpoly)
}
#' @title Approximate density and distribution via Gram-Charlier A expansion.
#'
#' @description
#'
#' Approximate the probability density or cumulative distribution function of a distribution via its raw moments.
#'
#' @template details-gca
#'
#' @usage
#'
#' dapx_gca(x, raw.moments, support=NULL,
#' basis=c('normal','gamma','beta','arcsine','wigner'),
#' basepar=NULL, log=FALSE)
#'
#' papx_gca(q, raw.moments, support=NULL,
#' basis=c('normal','gamma','beta','arcsine','wigner'),
#' basepar=NULL, lower.tail=TRUE, log.p=FALSE)
#'
#' @param x where to evaluate the approximate density.
#' @param q where to evaluate the approximate distribution.
#' @param raw.moments an atomic array of the 1st through kth raw moments
#' of the probability distribution.
#' @param support the support of the density function. It is assumed
#' that the density is zero on the complement of this open interval.
#' This defaults to \code{c(-Inf,Inf)} for the normal basis,
#' \code{c(0,Inf)} for the gamma basis, and
#' \code{c(0,1)} for the Beta, and
#' \code{c(-1,1)} for the arcsine and wigner.
#' @param basis the basis under which to perform the approximation. \code{'normal'}
#' gives the classical 'A' series expansion around the PDF and CDF of the normal
#' distribution via Hermite polynomials. \code{'gamma'} expands around a
#' gamma distribution with parameters \code{basepar$shape} and
#' \code{basepar$scale}.
#' \code{'beta'} expands around a beta distribution with parameters
#' \code{basepar$shape1} and \code{basepar$shape2}.
#' @param basepar the parameters for the base distribution approximation.
#' If \code{NULL}, the shape and rate are inferred from the first two moments
#' and/or from the \code{support} as appropriate.
#' @param log logical; if TRUE, densities \eqn{f} are given
#' as \eqn{\mbox{log}(f)}{log(f)}.
#' @param log.p logical; if TRUE, probabilities p are given
#' as \eqn{\mbox{log}(p)}{log(p)}.
#' @param lower.tail whether to compute the lower tail. If false, we approximate the survival function.
#' @return The approximate density at \code{x}, or the approximate CDF at
#' @keywords distribution
#' @seealso \code{\link{qapx_cf}}
#' @export
#' @template ref-Jaschke
#' @template ref-Blinnikov
#' @aliases papx_gca
#' @note
#'
#' Monotonicity of the CDF is not guaranteed.
#'
#' @examples
#' # normal distribution:
#' xvals <- seq(-2,2,length.out=501)
#' d1 <- dapx_gca(xvals, c(0,1,0,3,0), basis='normal')
#' d2 <- dnorm(xvals)
#' # they should match:
#' d1 - d2
#'
#' qvals <- seq(-2,2,length.out=501)
#' p1 <- papx_gca(qvals, c(0,1,0,3,0))
#' p2 <- pnorm(qvals)
#' p1 - p2
#'
#' xvals <- seq(-6,6,length.out=501)
#' mu <- 2
#' sigma <- 3
#' raw.moments <- c(2,13,62,475,3182)
#' d1 <- dapx_gca(xvals, raw.moments, basis='normal')
#' d2 <- dnorm(xvals,mean=mu,sd=sigma)
#' \dontrun{
#' plot(xvals,d1)
#' lines(xvals,d2,col='red')
#' }
#' p1 <- papx_gca(xvals, raw.moments, basis='normal')
#' p2 <- pnorm(xvals,mean=mu,sd=sigma)
#' \dontrun{
#' plot(xvals,p1)
#' lines(xvals,p2,col='red')
#' }
#'
#' # for a one-sided distribution, like the chi-square
#' chidf <- 30
#' ords <- seq(1,9)
#' raw.moments <- exp(ords * log(2) + lgamma((chidf/2) + ords) - lgamma(chidf/2))
#' xvals <- seq(0.3,10,length.out=501)
#' d1g <- dapx_gca(xvals, raw.moments, support=c(0,Inf), basis='gamma')
#' d2 <- dchisq(xvals,df=chidf)
#' \dontrun{
#' plot(xvals,d1g)
#' lines(xvals,d2,col='red')
#' }
#'
#' p1g <- papx_gca(xvals, raw.moments, support=c(0,Inf), basis='gamma')
#' p2 <- pchisq(xvals,df=chidf)
#' \dontrun{
#' plot(xvals,p1g)
#' lines(xvals,p2,col='red')
#' }
#'
#' # for a one-sided distribution, like the log-normal
#' mu <- 2
#' sigma <- 1
#' ords <- seq(1,8)
#' raw.moments <- exp(ords * mu + 0.5 * (sigma*ords)^2)
#' xvals <- seq(0.5,10,length.out=501)
#' d1g <- dapx_gca(xvals, raw.moments, support=c(0,Inf), basis='gamma')
#' d2 <- dnorm(log(xvals),mean=mu,sd=sigma) / xvals
#' \dontrun{
#' plot(xvals,d1g)
#' lines(xvals,d2,col='red')
#' }
#' @template etc
dapx_gca <- function(x,raw.moments,support=NULL,basis=c('normal','gamma','beta','arcsine','wigner'),basepar=NULL,
log=FALSE) {#FOLDUP
basis <- tolower(match.arg(basis))
gca <- .gca_setup(x,raw.moments,support,basis,basepar)
wx <- gca$wt(gca$x)
retval <- (as.numeric(gca$poly[[1]]) / gca$hn[1]) * wx
for (iii in c(1:gca$order.max)) {
ci <- (sum(coef(gca$poly[[iii+1]]) * gca$full_moments[1:(iii+1)])) / gca$hn[iii+1]
retval <- retval + ci * wx * (as.function(gca$poly[[iii+1]])(gca$x))
}
# adjust back from standardized
retval <- retval * gca$scalby
# sanity check; shall I throw a warning?
retval <- pmax(0,retval)
# support support
if (is.finite(min(gca$support))) {
retval[gca$x <= min(gca$support)] <- 0
}
if (is.finite(max(gca$support))) {
retval[gca$x >= max(gca$support)] <- 0
}
# must be a better way to do this ...
if (log)
retval <- log(retval)
return(retval)
}#UNFOLD
#' @export
papx_gca <- function(q,raw.moments,support=NULL,basis=c('normal','gamma','beta','arcsine','wigner'),basepar=NULL,
lower.tail=TRUE,log.p=FALSE) {#FOLDUP
basis <- tolower(match.arg(basis))
gca <- .gca_setup(q,raw.moments,support,basis,basepar)
retval <- 0
for (iii in c(0:gca$order.max)) {
ci <- (sum(coef(gca$poly[[iii+1]]) * gca$full_moments[1:(iii+1)])) / gca$hn[iii+1]
retval <- retval + ci * gca$intpoly[[iii+1]](gca$x)
}
# sanity check; shall I throw a warning?
retval <- pmin(1,pmax(0,retval))
# support support
if (is.finite(min(gca$support))) {
retval[gca$x <= min(gca$support)] <- 0
}
if (is.finite(max(gca$support))) {
retval[gca$x >= max(gca$support)] <- 1
}
# must be a better way to do these ...
if (!lower.tail) {
retval <- 1 - retval
}
if (log.p)
retval <- log(retval)
return(retval)
}#UNFOLD
#for vim modeline: (do not edit)
# vim:fdm=marker:fmr=FOLDUP,UNFOLD:cms=#%s:syn=r:ft=r
| /R/gram_charlier.r | no_license | cran/PDQutils | R | false | false | 12,255 | r | # Copyright 2015-2015 Steven E. Pav. All Rights Reserved.
# Author: Steven E. Pav
#
# This file is part of PDQutils.
#
# PDQutils is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PDQutils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PDQutils. If not, see <http://www.gnu.org/licenses/>.
# Created: 2015.02.07
# Copyright: Steven E. Pav, 2015
# Author: Steven E. Pav
# Comments: Steven E. Pav
# for the Hermite Polynomials
require(orthopolynom)
require(moments)
# suppose raw.moments[k] is the kth raw moment of X;
# here we will compute the kth raw moment of X+del.
# n.b.
# E[(x+del)^k] = E[x^k + del choose(k,1) x^{k-1} + ... del^k ]
.shift_moments <- function(raw.moments,del) {
nmom <- length(raw.moments)-1
shf.moments <- raw.moments
shf.moments[2] <- raw.moments[2] + del
for (k in 2:nmom) {
tot <- 0
for (j in 0:k) {
tot <- tot + choose(k, j) * del^(k-j) * raw.moments[j+1]
}
shf.moments[k+1] <- tot
}
return(shf.moments)
}
# suppose raw.moments[k] is the kth raw moment of X;
# here we will compute the kth raw moment of a * X.
# n.b.
# E[(ax)^k] = a^k E[x^k]
.scale_moments <- function(raw.moments,k) {
nmom <- length(raw.moments)-1
scl.moments <- raw.moments * (k^(0:nmom))
return(scl.moments)
}
.gca_setup <- function(x,raw.moments,support=NULL, basis=c('normal','gamma','beta','arcsine','wigner'), basepar=NULL) {
basis <- tolower(match.arg(basis))
# the zeroth moment
raw.moments <- c(1,raw.moments)
# guess support:#FOLDUP
if (is.null(support)) {
support <- switch(basis,
"normal"=c(-Inf,Inf),
"gamma"=c(0,Inf),
"beta"=c(0,1),
"arcsine"=c(-1,1),
"wigner"=c(-1,1))
}#UNFOLD
support <- sort(support)
# make these special cases of beta:#FOLDUP
if (basis == 'arcsine') {
basepar = list(shape1=0.5,shape2=0.5)
basis = 'beta'
} else if (basis == 'wigner') {
basepar = list(shape1=1.5,shape2=1.5)
basis = 'beta'
}#UNFOLD
# shift, scale X, modify the moments, compute final scaling factor#FOLDUP
if (basis == 'normal') {
mu <- raw.moments[2]
sigma <- sqrt(raw.moments[3] - mu^2)
x <- (x - mu) / sigma
moments <- .shift_moments(raw.moments,-mu)
moments <- .scale_moments(moments,1/sigma)
scalby <- 1/sigma
support <- (support - mu)/sigma
} else if (basis == 'gamma') {
llim = min(support)
x <- (x - llim)
moments <- .shift_moments(raw.moments,-llim)
support <- (support - llim)
scalby <- 1
} else if (basis == 'beta') {
ulim = max(support)
llim = min(support)
mu <- 0.5 * (ulim + llim)
sigma <- 0.5 * (ulim - llim)
x <- (x - mu) / sigma
moments <- .shift_moments(raw.moments,-mu)
moments <- .scale_moments(moments,1/sigma)
scalby <- 1/sigma
support <- c(-1,1)
} else { stop('badCode') }#UNFOLD # nocov
# guess the base distribution parameters, from the moments?#FOLDUP
if (is.null(basepar)) {
if (basis == 'gamma') {
# first two uncentered moments for gamma are k theta and k theta^2 + (k theta)^2
theta <- (moments[3]/moments[2]) - moments[2]
k <- moments[2] / theta
basepar <- list(shape=k,scale=theta)
} else if (basis == 'beta') {
# compute a and b
mu <- moments[2]
s2 <- moments[3] - moments[2]^2
# shift back to [0,1]
mu <- (mu + 1) / 2
s2 <- s2 / 4
# second moment
mu2 <- s2 + mu^2
# solve for b, a
b <- (mu - mu2) * (1 - mu) / s2
a <- b * mu / (1-mu)
# n.b. the reverse
basepar <- list(shape2=b,shape1=a)
}
}#UNFOLD
# rescale gammas#FOLDUP
if (basis == 'gamma') {
x <- x / basepar$scale
moments <- .scale_moments(moments,1/basepar$scale)
scalby <- scalby / basepar$scale
support <- support / basepar$scale
basepar$scale <- 1
}#UNFOLD
order.max <- length(moments)-1
orders <- seq(0,order.max)
if (basis == 'normal') {
wt <- dnorm
# the orthogonal polynomials
poly <- orthopolynom::hermite.he.polynomials(order.max, normalized=FALSE)
hn <- factorial(orders)
intpoly <- c(function(y) { as.numeric(poly[[1]]) * pnorm(y) },
lapply(poly[1:(order.max)],function(pol) { function(y) { -dnorm(y) * as.function(pol)(y) } }) )
} else if (basis == 'gamma') {
alpha <- basepar$shape - 1
wt <- function(x) { dgamma(x,shape=alpha+1,scale=1) }
poly <- orthopolynom::glaguerre.polynomials(order.max, alpha, normalized=FALSE)
hn <- exp(lgamma(alpha + 1 + orders) - lgamma(alpha+1) - lfactorial(orders))
ipoly <- orthopolynom::glaguerre.polynomials(order.max-1, alpha+1, normalized=FALSE)
intpoly <- c(function(y) { as.numeric(poly[[1]]) * pgamma(y,shape=alpha+1,scale=1) },
lapply(1:(order.max),
function(idx) { function(y) { ((alpha+1)/idx) * dgamma(y,shape=alpha+2,scale=1) * as.function(ipoly[[idx]])(y) } }) )
} else if (basis == 'beta') {
palpha <- basepar$shape2 - 1
pbeta <- basepar$shape1 - 1
wt <- function(x) { 0.5 * dbeta(0.5 * (x+1),shape2=palpha+1,shape1=pbeta+1) }
poly <- orthopolynom::jacobi.p.polynomials(order.max, alpha=palpha, beta=pbeta, normalized=FALSE)
hn <- exp(lgamma(orders + palpha + 1) + lgamma(orders + pbeta + 1) - lfactorial(orders) - lgamma(orders + palpha + pbeta + 1) -
lbeta(palpha+1,pbeta+1) - log(2*orders+palpha+pbeta+1))
ipoly <- orthopolynom::jacobi.p.polynomials(order.max-1, alpha=palpha+1, beta=pbeta+1, normalized=FALSE)
intpoly <- c(function(y) { as.numeric(poly[[1]]) * pbeta(0.5 * (y+1),shape2=palpha+1,shape1=pbeta+1) },
lapply(1:(order.max),
function(idx) { function(y) {
(-2/idx) * exp(lbeta(palpha+2,pbeta+2) - lbeta(palpha+1,pbeta+1)) *
(0.5 * dbeta(0.5 * (x+1),shape1=palpha+2,shape2=pbeta+2)) *
as.function(ipoly[[idx]])(y) } }))
} else { stop(paste('badCode: distribution',basis,'unknown')) } # nocov
retval <- list(x=x,full_moments=moments,support=support,scalby=scalby,
order.max=order.max,orders=orders,
wt=wt,poly=poly,hn=hn,intpoly=intpoly)
}
#' @title Approximate density and distribution via Gram-Charlier A expansion.
#'
#' @description
#'
#' Approximate the probability density or cumulative distribution function of a distribution via its raw moments.
#'
#' @template details-gca
#'
#' @usage
#'
#' dapx_gca(x, raw.moments, support=NULL,
#' basis=c('normal','gamma','beta','arcsine','wigner'),
#' basepar=NULL, log=FALSE)
#'
#' papx_gca(q, raw.moments, support=NULL,
#' basis=c('normal','gamma','beta','arcsine','wigner'),
#' basepar=NULL, lower.tail=TRUE, log.p=FALSE)
#'
#' @param x where to evaluate the approximate density.
#' @param q where to evaluate the approximate distribution.
#' @param raw.moments an atomic array of the 1st through kth raw moments
#' of the probability distribution.
#' @param support the support of the density function. It is assumed
#' that the density is zero on the complement of this open interval.
#' This defaults to \code{c(-Inf,Inf)} for the normal basis,
#' \code{c(0,Inf)} for the gamma basis, and
#' \code{c(0,1)} for the Beta, and
#' \code{c(-1,1)} for the arcsine and wigner.
#' @param basis the basis under which to perform the approximation. \code{'normal'}
#' gives the classical 'A' series expansion around the PDF and CDF of the normal
#' distribution via Hermite polynomials. \code{'gamma'} expands around a
#' gamma distribution with parameters \code{basepar$shape} and
#' \code{basepar$scale}.
#' \code{'beta'} expands around a beta distribution with parameters
#' \code{basepar$shape1} and \code{basepar$shape2}.
#' @param basepar the parameters for the base distribution approximation.
#' If \code{NULL}, the shape and rate are inferred from the first two moments
#' and/or from the \code{support} as appropriate.
#' @param log logical; if TRUE, densities \eqn{f} are given
#' as \eqn{\mbox{log}(f)}{log(f)}.
#' @param log.p logical; if TRUE, probabilities p are given
#' as \eqn{\mbox{log}(p)}{log(p)}.
#' @param lower.tail whether to compute the lower tail. If false, we approximate the survival function.
#' @return The approximate density at \code{x}, or the approximate CDF at
#' @keywords distribution
#' @seealso \code{\link{qapx_cf}}
#' @export
#' @template ref-Jaschke
#' @template ref-Blinnikov
#' @aliases papx_gca
#' @note
#'
#' Monotonicity of the CDF is not guaranteed.
#'
#' @examples
#' # normal distribution:
#' xvals <- seq(-2,2,length.out=501)
#' d1 <- dapx_gca(xvals, c(0,1,0,3,0), basis='normal')
#' d2 <- dnorm(xvals)
#' # they should match:
#' d1 - d2
#'
#' qvals <- seq(-2,2,length.out=501)
#' p1 <- papx_gca(qvals, c(0,1,0,3,0))
#' p2 <- pnorm(qvals)
#' p1 - p2
#'
#' xvals <- seq(-6,6,length.out=501)
#' mu <- 2
#' sigma <- 3
#' raw.moments <- c(2,13,62,475,3182)
#' d1 <- dapx_gca(xvals, raw.moments, basis='normal')
#' d2 <- dnorm(xvals,mean=mu,sd=sigma)
#' \dontrun{
#' plot(xvals,d1)
#' lines(xvals,d2,col='red')
#' }
#' p1 <- papx_gca(xvals, raw.moments, basis='normal')
#' p2 <- pnorm(xvals,mean=mu,sd=sigma)
#' \dontrun{
#' plot(xvals,p1)
#' lines(xvals,p2,col='red')
#' }
#'
#' # for a one-sided distribution, like the chi-square
#' chidf <- 30
#' ords <- seq(1,9)
#' raw.moments <- exp(ords * log(2) + lgamma((chidf/2) + ords) - lgamma(chidf/2))
#' xvals <- seq(0.3,10,length.out=501)
#' d1g <- dapx_gca(xvals, raw.moments, support=c(0,Inf), basis='gamma')
#' d2 <- dchisq(xvals,df=chidf)
#' \dontrun{
#' plot(xvals,d1g)
#' lines(xvals,d2,col='red')
#' }
#'
#' p1g <- papx_gca(xvals, raw.moments, support=c(0,Inf), basis='gamma')
#' p2 <- pchisq(xvals,df=chidf)
#' \dontrun{
#' plot(xvals,p1g)
#' lines(xvals,p2,col='red')
#' }
#'
#' # for a one-sided distribution, like the log-normal
#' mu <- 2
#' sigma <- 1
#' ords <- seq(1,8)
#' raw.moments <- exp(ords * mu + 0.5 * (sigma*ords)^2)
#' xvals <- seq(0.5,10,length.out=501)
#' d1g <- dapx_gca(xvals, raw.moments, support=c(0,Inf), basis='gamma')
#' d2 <- dnorm(log(xvals),mean=mu,sd=sigma) / xvals
#' \dontrun{
#' plot(xvals,d1g)
#' lines(xvals,d2,col='red')
#' }
#' @template etc
dapx_gca <- function(x,raw.moments,support=NULL,basis=c('normal','gamma','beta','arcsine','wigner'),basepar=NULL,
log=FALSE) {#FOLDUP
basis <- tolower(match.arg(basis))
gca <- .gca_setup(x,raw.moments,support,basis,basepar)
wx <- gca$wt(gca$x)
retval <- (as.numeric(gca$poly[[1]]) / gca$hn[1]) * wx
for (iii in c(1:gca$order.max)) {
ci <- (sum(coef(gca$poly[[iii+1]]) * gca$full_moments[1:(iii+1)])) / gca$hn[iii+1]
retval <- retval + ci * wx * (as.function(gca$poly[[iii+1]])(gca$x))
}
# adjust back from standardized
retval <- retval * gca$scalby
# sanity check; shall I throw a warning?
retval <- pmax(0,retval)
# support support
if (is.finite(min(gca$support))) {
retval[gca$x <= min(gca$support)] <- 0
}
if (is.finite(max(gca$support))) {
retval[gca$x >= max(gca$support)] <- 0
}
# must be a better way to do this ...
if (log)
retval <- log(retval)
return(retval)
}#UNFOLD
#' @export
papx_gca <- function(q,raw.moments,support=NULL,basis=c('normal','gamma','beta','arcsine','wigner'),basepar=NULL,
lower.tail=TRUE,log.p=FALSE) {#FOLDUP
basis <- tolower(match.arg(basis))
gca <- .gca_setup(q,raw.moments,support,basis,basepar)
retval <- 0
for (iii in c(0:gca$order.max)) {
ci <- (sum(coef(gca$poly[[iii+1]]) * gca$full_moments[1:(iii+1)])) / gca$hn[iii+1]
retval <- retval + ci * gca$intpoly[[iii+1]](gca$x)
}
# sanity check; shall I throw a warning?
retval <- pmin(1,pmax(0,retval))
# support support
if (is.finite(min(gca$support))) {
retval[gca$x <= min(gca$support)] <- 0
}
if (is.finite(max(gca$support))) {
retval[gca$x >= max(gca$support)] <- 1
}
# must be a better way to do these ...
if (!lower.tail) {
retval <- 1 - retval
}
if (log.p)
retval <- log(retval)
return(retval)
}#UNFOLD
#for vim modeline: (do not edit)
# vim:fdm=marker:fmr=FOLDUP,UNFOLD:cms=#%s:syn=r:ft=r
|
## 1. makeCacheMatrix: This function creates a special "matrix" object
## that can cache its inverse.
## 2. cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
## Create and return special object for inverse matrix caching.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
# Sets matrix
set <- function(y) {
# Set matrix
x <<- y
# Remove cache
inverse <<- NULL
}
# Returns matrix
get <- function() x
# Sets cache
setInverse <- function(i) inverse <<- i
# Returns from cache
getInverse <- function() inverse
# Returns matrix object
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Return cached "solve" for matrix, if no cache then inverse matrix and store it in the cache.
cacheSolve <- function(x, ...) {
# Get inverse matrix
inverse <- x$getInverse()
# Return inverse matrix if cached
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
# Solve matrix
i <- solve(data, ...)
# Cache inverse matrix
x$setInverse(i)
i
}
| /cachematrix.R | no_license | edtsech/ProgrammingAssignment2 | R | false | false | 1,315 | r | ## 1. makeCacheMatrix: This function creates a special "matrix" object
## that can cache its inverse.
## 2. cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
## Create and return special object for inverse matrix caching.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
# Sets matrix
set <- function(y) {
# Set matrix
x <<- y
# Remove cache
inverse <<- NULL
}
# Returns matrix
get <- function() x
# Sets cache
setInverse <- function(i) inverse <<- i
# Returns from cache
getInverse <- function() inverse
# Returns matrix object
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Return cached "solve" for matrix, if no cache then inverse matrix and store it in the cache.
cacheSolve <- function(x, ...) {
# Get inverse matrix
inverse <- x$getInverse()
# Return inverse matrix if cached
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
# Solve matrix
i <- solve(data, ...)
# Cache inverse matrix
x$setInverse(i)
i
}
|
generateUsersBehaviourBrowsing <- function(){
gev = rgev(1, xi = -1.324516e+00, mu = 4.783323e+08, beta = 1.839221e+07)
gevcel = ceiling(gev)
gevmin = pmin(492361442, gevcel)
total_instructions = pmax(400742607, gevmin)
# instruction arrivals in cpu percentage
instruction_arrivals <- rgpd(1337, xi = 0.87049, mu = -0.02547, beta = 0.08006)
for(i in 1:length(instruction_arrivals)) {
if(instruction_arrivals[i] < 0){
instruction_arrivals[i] <- 0
}
}
sum = sum(instruction_arrivals)
ratio = total_instructions / sum
reminder = 0
for(i in 1:length(instruction_arrivals)) {
instruction_arrivals[i] = instruction_arrivals[i] * ratio + reminder
reminder = instruction_arrivals[i] %% 1000000
# instruction arrivals in million of instructions
instruction_arrivals[i] = instruction_arrivals[i] %/% 1000000
}
return(instruction_arrivals)
}
generateUsersBehaviourBidding <- function(){
rgl = rgl(1, med = 4.877112e+08, iqr = 1.735356e+07, chi = -9.483909e-01, xi = 9.777355e-01)
rglcel = ceiling(rgl)
rglmin = pmin(492361442, rglcel)
total_instructions = pmax(400742607, rglmin)
# instruction arrivals in cpu percentage
instruction_arrivals <- rgpd(1337, xi = 0.02413, mu = -0.02484, beta = 0.07879)
for(i in 1:length(instruction_arrivals)) {
if(instruction_arrivals[i] < 0){
instruction_arrivals[i] <- 0
}
}
sum = sum(instruction_arrivals)
ratio = total_instructions / sum
reminder = 0
for(i in 1:length(instruction_arrivals)) {
instruction_arrivals[i] = instruction_arrivals[i] * ratio + reminder
reminder = instruction_arrivals[i] %% 1000000
# instruction arrivals in million of instructions
instruction_arrivals[i] = instruction_arrivals[i] %/% 1000000
}
return(instruction_arrivals)
}
generateUsersBehaviour <- function(userProfile){
library('fExtremes')
library('gldist')
instruction_arrivals_rate <- NULL
switch(userProfile,
Browsing={ instruction_arrivals_rate <- generateUsersBehaviourBrowsing() },
Bidding={ instruction_arrivals_rate <- generateUsersBehaviourBidding() }
)
return(instruction_arrivals_rate)
}
| /WebApp/rubisGenerateUsersBehavior.R | no_license | tabash7/ECommerceApp-1 | R | false | false | 2,317 | r | generateUsersBehaviourBrowsing <- function(){
gev = rgev(1, xi = -1.324516e+00, mu = 4.783323e+08, beta = 1.839221e+07)
gevcel = ceiling(gev)
gevmin = pmin(492361442, gevcel)
total_instructions = pmax(400742607, gevmin)
# instruction arrivals in cpu percentage
instruction_arrivals <- rgpd(1337, xi = 0.87049, mu = -0.02547, beta = 0.08006)
for(i in 1:length(instruction_arrivals)) {
if(instruction_arrivals[i] < 0){
instruction_arrivals[i] <- 0
}
}
sum = sum(instruction_arrivals)
ratio = total_instructions / sum
reminder = 0
for(i in 1:length(instruction_arrivals)) {
instruction_arrivals[i] = instruction_arrivals[i] * ratio + reminder
reminder = instruction_arrivals[i] %% 1000000
# instruction arrivals in million of instructions
instruction_arrivals[i] = instruction_arrivals[i] %/% 1000000
}
return(instruction_arrivals)
}
generateUsersBehaviourBidding <- function(){
rgl = rgl(1, med = 4.877112e+08, iqr = 1.735356e+07, chi = -9.483909e-01, xi = 9.777355e-01)
rglcel = ceiling(rgl)
rglmin = pmin(492361442, rglcel)
total_instructions = pmax(400742607, rglmin)
# instruction arrivals in cpu percentage
instruction_arrivals <- rgpd(1337, xi = 0.02413, mu = -0.02484, beta = 0.07879)
for(i in 1:length(instruction_arrivals)) {
if(instruction_arrivals[i] < 0){
instruction_arrivals[i] <- 0
}
}
sum = sum(instruction_arrivals)
ratio = total_instructions / sum
reminder = 0
for(i in 1:length(instruction_arrivals)) {
instruction_arrivals[i] = instruction_arrivals[i] * ratio + reminder
reminder = instruction_arrivals[i] %% 1000000
# instruction arrivals in million of instructions
instruction_arrivals[i] = instruction_arrivals[i] %/% 1000000
}
return(instruction_arrivals)
}
generateUsersBehaviour <- function(userProfile){
library('fExtremes')
library('gldist')
instruction_arrivals_rate <- NULL
switch(userProfile,
Browsing={ instruction_arrivals_rate <- generateUsersBehaviourBrowsing() },
Bidding={ instruction_arrivals_rate <- generateUsersBehaviourBidding() }
)
return(instruction_arrivals_rate)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/np_oauth.R
\name{get_pngs_and_show}
\alias{get_pngs_and_show}
\title{Download PNG for the icons in a list from get_icon_by_term()}
\usage{
get_pngs_and_show(icon_lists)
}
\arguments{
\item{icon_lists}{List of details about icons from Noun Project supplied by
the function get_icon_by_term() (may be useful for other functions too)}
}
\value{
group of images in a PNG format
}
\description{
Download PNG for the icons in a list from get_icon_by_term()
}
\examples{
elephants <- get_pngs_and_show(icon_lists) # download icons
magick::image_append(elephants) # show icons - two elephants
}
| /man/get_pngs_and_show.Rd | no_license | isabelletot/nounprojectR | R | false | true | 667 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/np_oauth.R
\name{get_pngs_and_show}
\alias{get_pngs_and_show}
\title{Download PNG for the icons in a list from get_icon_by_term()}
\usage{
get_pngs_and_show(icon_lists)
}
\arguments{
\item{icon_lists}{List of details about icons from Noun Project supplied by
the function get_icon_by_term() (may be useful for other functions too)}
}
\value{
group of images in a PNG format
}
\description{
Download PNG for the icons in a list from get_icon_by_term()
}
\examples{
elephants <- get_pngs_and_show(icon_lists) # download icons
magick::image_append(elephants) # show icons - two elephants
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/rearlist-utils.R
\name{linkedTo}
\alias{linkedTo}
\alias{linkedTo,GRanges-method}
\alias{linkedTo,Rearrangement-method}
\alias{linkedTo,RearrangementList-method}
\title{A region linked by improperly paired reads}
\usage{
linkedTo(x)
\S4method{linkedTo}{GRanges}(x)
\S4method{linkedTo}{Rearrangement}(x)
\S4method{linkedTo}{RearrangementList}(x)
}
\arguments{
\item{x}{a \code{GRanges} object with value `linked.to` in the `mcols`}
}
\description{
A region linked by improperly paired reads
}
| /man/linkedTo.Rd | no_license | cancer-genomics/trellis | R | false | true | 590 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/rearlist-utils.R
\name{linkedTo}
\alias{linkedTo}
\alias{linkedTo,GRanges-method}
\alias{linkedTo,Rearrangement-method}
\alias{linkedTo,RearrangementList-method}
\title{A region linked by improperly paired reads}
\usage{
linkedTo(x)
\S4method{linkedTo}{GRanges}(x)
\S4method{linkedTo}{Rearrangement}(x)
\S4method{linkedTo}{RearrangementList}(x)
}
\arguments{
\item{x}{a \code{GRanges} object with value `linked.to` in the `mcols`}
}
\description{
A region linked by improperly paired reads
}
|
perInst <- function(inst, sub.est, real, suffix) {
save_file <- paste0(new_df_dir, "/", inst, "-", suffix)
if (file.exists(save_file)) return()
dat <- lapply(sub.est, function(sub) {
sub[[inst]]
})
dat <- ML.dataframe(dat)
dat <- cbind(dat, real)
assign(inst, dat)
save(list = inst, file = save_file)
return(dat)
}
perFile <- function(fname) {
suffix <- sub(".*-", "", fname)
insts <- paste0("HNG_", multiples)
# Have we done all the things for this file? Then skip this iteration
skipit <- lapply(insts, function(inst) {
save_file <- paste0(new_df_dir, "/", inst, "-", suffix)
file.exists(save_file)
})
skipit <- do.call(c, skipit)
if (all(skipit)) return()
cat("Instsplit for", fname, "\n")
load(fname)
out <- lapply(insts, perInst, sub.est = sub.est, real = real, suffix = suffix)
names(out) <- insts
return(out)
}
rr <- c(1:20)
for (i in rr) {
fpat <- paste0("sub_est_new-1.")
files <- list.files(path = new_est_dir, pattern = fpat, full.names = T)
null <- c.lapply(files, perFile)
print(warnings())
}
| /addsim/instsplit.R | no_license | bamonroe/code-ch4 | R | false | false | 1,057 | r | perInst <- function(inst, sub.est, real, suffix) {
save_file <- paste0(new_df_dir, "/", inst, "-", suffix)
if (file.exists(save_file)) return()
dat <- lapply(sub.est, function(sub) {
sub[[inst]]
})
dat <- ML.dataframe(dat)
dat <- cbind(dat, real)
assign(inst, dat)
save(list = inst, file = save_file)
return(dat)
}
perFile <- function(fname) {
suffix <- sub(".*-", "", fname)
insts <- paste0("HNG_", multiples)
# Have we done all the things for this file? Then skip this iteration
skipit <- lapply(insts, function(inst) {
save_file <- paste0(new_df_dir, "/", inst, "-", suffix)
file.exists(save_file)
})
skipit <- do.call(c, skipit)
if (all(skipit)) return()
cat("Instsplit for", fname, "\n")
load(fname)
out <- lapply(insts, perInst, sub.est = sub.est, real = real, suffix = suffix)
names(out) <- insts
return(out)
}
rr <- c(1:20)
for (i in rr) {
fpat <- paste0("sub_est_new-1.")
files <- list.files(path = new_est_dir, pattern = fpat, full.names = T)
null <- c.lapply(files, perFile)
print(warnings())
}
|
#' @title goggles data from textbook
#' @docType data
#' @keywords datasets
#' @name goggles
#' @usage data(goggles)
#' @format A data frame with 48 rows and 3 variables:
#' \describe{
#' \item{gender}{Gender of participant}
#' \item{alcohol}{Amount alcohol consumed }
#' \item{attractiveness}{Perceived attractiveness}
#' }
#' @description A data set from Field et al (2012)
#' @references Field, A., Miles, J., & Field, Z. (2012) Discovering Statistics Using R. Sage: Chicago.
#' @source \url{http://studysites.sagepub.com/dsur/study/}
"goggles"
NULL
| /R/data-goggles.R | no_license | pa0/apaTables | R | false | false | 560 | r | #' @title goggles data from textbook
#' @docType data
#' @keywords datasets
#' @name goggles
#' @usage data(goggles)
#' @format A data frame with 48 rows and 3 variables:
#' \describe{
#' \item{gender}{Gender of participant}
#' \item{alcohol}{Amount alcohol consumed }
#' \item{attractiveness}{Perceived attractiveness}
#' }
#' @description A data set from Field et al (2012)
#' @references Field, A., Miles, J., & Field, Z. (2012) Discovering Statistics Using R. Sage: Chicago.
#' @source \url{http://studysites.sagepub.com/dsur/study/}
"goggles"
NULL
|
context("prcomp")
library(h2o)
conn <- h2o.init(ip=Sys.getenv("H2O_IP"), port=as.integer(Sys.getenv("H2O_PORT"), startH2O=FALSE))
ausPath <- system.file("extdata", "australia.csv", package="h2o")
australia.hex <- h2o.importFile(conn, path = ausPath)
model <- tryCatch({
h2o.prcomp(data = australia.hex, standardize = TRUE)
}, error = function(err) {
return(err)
})
if(is(model, "H2OPCAModel")) {
test_that("Correct # components returned: ", {
expect_equal(8, length(model@model$sdev))
})
} else {
test_that("Input permutation foo: ", fail(message=toString(model)))
}
| /h2o-r/src/test/R/acceptance/test-prcomp.R | permissive | mrgloom/h2o-3 | R | false | false | 578 | r | context("prcomp")
library(h2o)
conn <- h2o.init(ip=Sys.getenv("H2O_IP"), port=as.integer(Sys.getenv("H2O_PORT"), startH2O=FALSE))
ausPath <- system.file("extdata", "australia.csv", package="h2o")
australia.hex <- h2o.importFile(conn, path = ausPath)
model <- tryCatch({
h2o.prcomp(data = australia.hex, standardize = TRUE)
}, error = function(err) {
return(err)
})
if(is(model, "H2OPCAModel")) {
test_that("Correct # components returned: ", {
expect_equal(8, length(model@model$sdev))
})
} else {
test_that("Input permutation foo: ", fail(message=toString(model)))
}
|
library(PharmacoGx)
getCCLErawData <-
function(path.data=file.path("data", "CCLE"), result.type=c("array", "list")){
ccle.raw.drug.sensitivity <- read.csv("/pfs/downloadCCLESensRaw/CCLE_NP24.2009_Drug_data_2015.02.24.csv", stringsAsFactors=FALSE)
ccle.raw.drug.sensitivity.list <- do.call(c, apply(ccle.raw.drug.sensitivity, 1, list))
concentrations.no <- max(unlist(lapply(ccle.raw.drug.sensitivity[ , "Doses..uM."], function(x){length(unlist(strsplit(x, split = ",")))})))
if(result.type == "array"){
## create the ccle.drug.response object including information viablilities and concentrations for each cell/drug pair
obj <- array(NA, dim=c(length(unique(ccle.raw.drug.sensitivity[ , "Primary.Cell.Line.Name"])), length(unique(ccle.raw.drug.sensitivity[ , "Compound"])), 2, concentrations.no), dimnames=list(unique(ccle.raw.drug.sensitivity[ , "Primary.Cell.Line.Name"]), unique(ccle.raw.drug.sensitivity[ , "Compound"]), c("concentration", "viability"), 1:concentrations.no))
}
fnexperiment <-
function(values) {
cellline <- values["Primary.Cell.Line.Name"]
drug <- values["Compound"]
#doses <- as.numeric(unlist(strsplit(input.matrix["Doses (uM)"], split=", "))) #nature paper raw data
doses <- as.numeric(unlist(strsplit(values["Doses..uM."], split=","))) # micro molar
if(concentrations.no > length(doses)) {doses <- c(doses, rep(NA, concentrations.no - length(doses)))}
#responses <- as.numeric(unlist(strsplit(input.matrix["Activity Data\n(raw median data)"], split=","))) #nature paper raw data
responses <- as.numeric(unlist(strsplit(values["Activity.Data..median."], split=","))) + 100
if(concentrations.no > length(responses)) {responses <- c(responses, rep(NA, concentrations.no - length(responses)))}
if(result.type == "array"){
obj[cellline,drug, "concentration", 1:length(doses)] <<- doses
obj[cellline,drug, "viability", 1:length(responses)] <<- responses
}else{
return(list(cell=cellline, drug=drug, doses=doses, responses=responses))#paste(doses, collapse = ","), responses=paste(responses, collapse = ",")))
}
}
ccle.raw.drug.sensitivity.list <- do.call(c, apply(ccle.raw.drug.sensitivity, 1, list))
ccle.raw.drug.sensitivity.res <- mapply(fnexperiment, values=ccle.raw.drug.sensitivity.list)
if(result.type == "array"){
return(list("data"=obj, "concentrations.no"=concentrations.no))
}else{
return(list("data"=ccle.raw.drug.sensitivity.res, "concentrations.no"=concentrations.no))
}
}
raw.sensitivity <- getCCLErawData(result.type="list")
con_tested <- raw.sensitivity$concentrations.no
raw.sensitivity <- t(raw.sensitivity$data)
raw.sensitivity <- t(apply(raw.sensitivity,1, function(x){unlist(x)}))
## manual curation of drug names
##########################################################################
#raw.sensitivity <- read.csv(file.path(inst("PharmacoGx"), "extdata", "ccle_sensitivity_detail.csv"))
#raw.sensitivity[raw.sensitivity[ ,2]=="PF2341066",2] <- "CRIZOTINIB"
raw.sensitivity[raw.sensitivity[ ,2]=="ZD-6474",2] <- "Vandetanib"
raw.sensitivity[raw.sensitivity[ ,2]=="PF2341066",2] <- "PF-2341066"
##########################################################################
#raw.sensitivity[ ,2] <- gsub(pattern=badchars, replacement="", raw.sensitivity[ ,2])
#raw.sensitivity[ ,2] <- paste("drugid", toupper(raw.sensitivity[ ,2]), sep="_")
rownames(raw.sensitivity) <- sprintf("drugid_%s_%s",as.character(raw.sensitivity[ ,2]),as.character(raw.sensitivity[ ,1]))
raw.sensitivity <- raw.sensitivity[ ,-c(1,2)]
raw.sensitivity <- array(c(as.matrix(as.numeric(raw.sensitivity[ ,1:con_tested])), as.matrix(as.numeric(raw.sensitivity[ ,(con_tested+1):(2*con_tested)]))), c(nrow(raw.sensitivity), con_tested, 2),
dimnames=list(rownames(raw.sensitivity), colnames(raw.sensitivity[ ,1:con_tested]), c("Dose", "Viability")))
save(raw.sensitivity, con_tested, file="/pfs/out/drug_norm_post.RData")
raw.sensitivity.x <- parallel::splitIndices(nrow(raw.sensitivity), floor(nrow(raw.sensitivity)/1000))
dir.create("/pfs/out/slices/")
for(i in seq_along(raw.sensitivity.x)){
slce <- raw.sensitivity[raw.sensitivity.x[[i]],,]
saveRDS(slce, file=paste0("/pfs/out/slices/ccle_raw_sens_", i, ".rds"))
}
| /downloadSensData.R | no_license | BHKLAB-DataProcessing/downloadCCLESensRaw | R | false | false | 4,728 | r | library(PharmacoGx)
getCCLErawData <-
function(path.data=file.path("data", "CCLE"), result.type=c("array", "list")){
ccle.raw.drug.sensitivity <- read.csv("/pfs/downloadCCLESensRaw/CCLE_NP24.2009_Drug_data_2015.02.24.csv", stringsAsFactors=FALSE)
ccle.raw.drug.sensitivity.list <- do.call(c, apply(ccle.raw.drug.sensitivity, 1, list))
concentrations.no <- max(unlist(lapply(ccle.raw.drug.sensitivity[ , "Doses..uM."], function(x){length(unlist(strsplit(x, split = ",")))})))
if(result.type == "array"){
## create the ccle.drug.response object including information viablilities and concentrations for each cell/drug pair
obj <- array(NA, dim=c(length(unique(ccle.raw.drug.sensitivity[ , "Primary.Cell.Line.Name"])), length(unique(ccle.raw.drug.sensitivity[ , "Compound"])), 2, concentrations.no), dimnames=list(unique(ccle.raw.drug.sensitivity[ , "Primary.Cell.Line.Name"]), unique(ccle.raw.drug.sensitivity[ , "Compound"]), c("concentration", "viability"), 1:concentrations.no))
}
fnexperiment <-
function(values) {
cellline <- values["Primary.Cell.Line.Name"]
drug <- values["Compound"]
#doses <- as.numeric(unlist(strsplit(input.matrix["Doses (uM)"], split=", "))) #nature paper raw data
doses <- as.numeric(unlist(strsplit(values["Doses..uM."], split=","))) # micro molar
if(concentrations.no > length(doses)) {doses <- c(doses, rep(NA, concentrations.no - length(doses)))}
#responses <- as.numeric(unlist(strsplit(input.matrix["Activity Data\n(raw median data)"], split=","))) #nature paper raw data
responses <- as.numeric(unlist(strsplit(values["Activity.Data..median."], split=","))) + 100
if(concentrations.no > length(responses)) {responses <- c(responses, rep(NA, concentrations.no - length(responses)))}
if(result.type == "array"){
obj[cellline,drug, "concentration", 1:length(doses)] <<- doses
obj[cellline,drug, "viability", 1:length(responses)] <<- responses
}else{
return(list(cell=cellline, drug=drug, doses=doses, responses=responses))#paste(doses, collapse = ","), responses=paste(responses, collapse = ",")))
}
}
ccle.raw.drug.sensitivity.list <- do.call(c, apply(ccle.raw.drug.sensitivity, 1, list))
ccle.raw.drug.sensitivity.res <- mapply(fnexperiment, values=ccle.raw.drug.sensitivity.list)
if(result.type == "array"){
return(list("data"=obj, "concentrations.no"=concentrations.no))
}else{
return(list("data"=ccle.raw.drug.sensitivity.res, "concentrations.no"=concentrations.no))
}
}
raw.sensitivity <- getCCLErawData(result.type="list")
con_tested <- raw.sensitivity$concentrations.no
raw.sensitivity <- t(raw.sensitivity$data)
raw.sensitivity <- t(apply(raw.sensitivity,1, function(x){unlist(x)}))
## manual curation of drug names
##########################################################################
#raw.sensitivity <- read.csv(file.path(inst("PharmacoGx"), "extdata", "ccle_sensitivity_detail.csv"))
#raw.sensitivity[raw.sensitivity[ ,2]=="PF2341066",2] <- "CRIZOTINIB"
raw.sensitivity[raw.sensitivity[ ,2]=="ZD-6474",2] <- "Vandetanib"
raw.sensitivity[raw.sensitivity[ ,2]=="PF2341066",2] <- "PF-2341066"
##########################################################################
#raw.sensitivity[ ,2] <- gsub(pattern=badchars, replacement="", raw.sensitivity[ ,2])
#raw.sensitivity[ ,2] <- paste("drugid", toupper(raw.sensitivity[ ,2]), sep="_")
rownames(raw.sensitivity) <- sprintf("drugid_%s_%s",as.character(raw.sensitivity[ ,2]),as.character(raw.sensitivity[ ,1]))
raw.sensitivity <- raw.sensitivity[ ,-c(1,2)]
raw.sensitivity <- array(c(as.matrix(as.numeric(raw.sensitivity[ ,1:con_tested])), as.matrix(as.numeric(raw.sensitivity[ ,(con_tested+1):(2*con_tested)]))), c(nrow(raw.sensitivity), con_tested, 2),
dimnames=list(rownames(raw.sensitivity), colnames(raw.sensitivity[ ,1:con_tested]), c("Dose", "Viability")))
save(raw.sensitivity, con_tested, file="/pfs/out/drug_norm_post.RData")
raw.sensitivity.x <- parallel::splitIndices(nrow(raw.sensitivity), floor(nrow(raw.sensitivity)/1000))
dir.create("/pfs/out/slices/")
for(i in seq_along(raw.sensitivity.x)){
slce <- raw.sensitivity[raw.sensitivity.x[[i]],,]
saveRDS(slce, file=paste0("/pfs/out/slices/ccle_raw_sens_", i, ".rds"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{list_merge}
\alias{list_merge}
\title{Merge two lists and overwrite latter entries with former entries
if names are the same.}
\usage{
list_merge(list1, list2)
}
\arguments{
\item{list1}{list}
\item{list2}{list}
}
\value{
the merged list.
}
\description{
For example, \code{list_merge(list(a = 1, b = 2), list(b = 3, c = 4))}
will be \code{list(a = 1, b = 3, c = 4)}.
}
\examples{
stopifnot(identical(list_merge(list(a = 1, b = 2), list(b = 3, c = 4)),
list(a = 1, b = 3, c = 4)))
stopifnot(identical(list_merge(NULL, list(a = 1)), list(a = 1)))
}
| /man/list_merge.Rd | permissive | syberia/mungebits2 | R | false | true | 662 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{list_merge}
\alias{list_merge}
\title{Merge two lists and overwrite latter entries with former entries
if names are the same.}
\usage{
list_merge(list1, list2)
}
\arguments{
\item{list1}{list}
\item{list2}{list}
}
\value{
the merged list.
}
\description{
For example, \code{list_merge(list(a = 1, b = 2), list(b = 3, c = 4))}
will be \code{list(a = 1, b = 3, c = 4)}.
}
\examples{
stopifnot(identical(list_merge(list(a = 1, b = 2), list(b = 3, c = 4)),
list(a = 1, b = 3, c = 4)))
stopifnot(identical(list_merge(NULL, list(a = 1)), list(a = 1)))
}
|
library(AMCP)
### Name: chapter_15_table_1
### Title: The data used in Chapter 15, Table 1
### Aliases: chapter_15_table_1 C15T1 Chapter_15_Table_1 c15t1
### Keywords: datasets
### ** Examples
# Load the data
data(chapter_15_table_1)
# Or, alternatively load the data as
data(C15T1)
# View the structure
str(chapter_15_table_1)
| /data/genthat_extracted_code/AMCP/examples/chapter_15_table_1.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 338 | r | library(AMCP)
### Name: chapter_15_table_1
### Title: The data used in Chapter 15, Table 1
### Aliases: chapter_15_table_1 C15T1 Chapter_15_Table_1 c15t1
### Keywords: datasets
### ** Examples
# Load the data
data(chapter_15_table_1)
# Or, alternatively load the data as
data(C15T1)
# View the structure
str(chapter_15_table_1)
|
Footnotes <- setRefClass(
"Footnotes",
fields=list(
.notes="list"),
methods=list(
initialize=function() {
.notes <<- list()
},
clear=function() {
.notes <<- list()
},
addNote=function(message) {
# adds a footnote, returns the index (0-indexed)
for (i in seq_along(.notes)) {
if (message == .notes[[i]])
return(i-1)
}
.notes[[length(.notes)+1]] <<- message
return(length(.notes)-1)
})
)
Table <- setRefClass(
"Table",
contains="ResultElement",
fields=list(
.name="character",
.columns="list",
.rowCount="numeric",
.rowNames="list",
.rowsExpr="character",
.rowsValue="ANY",
.margin="numeric",
.padding="numeric",
.marstr="character",
.padstr="character",
.footnotes="Footnotes",
.swapRowsColumns="logical"),
methods=list(
initialize=function(name="", index=0, options=Options(), swapRowsColumns=FALSE) {
callSuper(name=name, options=options)
.index <<- as.integer(index)
.swapRowsColumns <<- swapRowsColumns
.columns <<- list()
.rowCount <<- 0
.rowsExpr <<- "1"
.rowNames <<- list()
.margin <<- 1
.marstr <<- spaces(.margin)
.padding <<- 2
.padstr <<- spaces(.padding)
.footnotes <<- Footnotes()
},
title=function(value) {
.options$eval(.title)
},
.setDef=function(name, value) {
if (name == "title")
setTitle(value)
else if (name == "columns")
.setColumnsDef(value)
else if (name == "rows")
.setRowsDef(value)
else
callSuper(name, value)
},
setTitle=function(value) {
.title <<- paste0(.options$eval(value, name=.name, index=.index))
},
.setRowsDef=function(value) {
.rowsExpr <<- paste0(value)
.updated <<- FALSE
},
.setColumnsDef=function(columnDefs) {
for (columnDef in columnDefs) {
if (is.null(columnDef$title))
columnDef$title <- columnDef$name
if (is.null(columnDef$content))
columnDef$content <- "."
if (is.null(columnDef$visible))
columnDef$visible <- TRUE
addColumn(columnDef$name, columnDef$title, columnDef$content, columnDef$visible)
}
},
.update=function() {
if (.updated)
return()
error <- NULL
rowsValue <- try(.options$eval(.rowsExpr, name=.name, index=.index), silent=TRUE)
if (inherits(rowsValue, "try-error")) {
error <- rowsValue
rowsValue <- 0
}
if (identical(rowsValue, .rowsValue))
return()
.rowsValue <<- rowsValue
oldNames <- .rowNames
oldRows <- getRows()
if (is.numeric(.rowsValue) && .rowsValue > 0) {
newNames <- paste(1:.rowsValue)
} else if (is.character(.rowsValue)) {
newNames <- .rowsValue
} else {
newNames <- character()
}
clearRows()
for (i in seq_along(newNames)) {
newName <- newNames[[i]]
index <- which(oldNames == newName)
if (length(index) > 0) {
newRow <- oldRows[[ index[1] ]]
addRow(newName, newRow)
} else {
addRow(newName)
}
}
if ( ! is.null(error))
rethrow(error)
.updated <<- TRUE
},
clearRows=function() {
.rowNames <<- list()
for (column in .columns)
column$.clear()
.rowCount <<- 0
.footnotes$clear()
},
addColumn=function(name, title=name, content=".", visible=TRUE) {
column <- Column(name=name, title=title, content=content, visible, options=.options)
i <- 1
while (i <= .rowCount) {
rowName <- .rowNames[[i]]
column$.addCell(name=rowName, index=i)
i <- i + 1
}
.columns[[name]] <<- column
},
addRow=function(name=NULL, values=NULL) {
.rowNames[length(.rowNames)+1] <<- list(name)
.rowCount <<- .rowCount + 1
for (column in .columns) {
if (column$.name %in% names(values))
column$.addCell(values[[column$.name]], name=name, index=.rowCount)
else
column$.addCell(name=name, index=.rowCount)
}
},
rowCount=function() {
.rowCount
},
setCell=function(rowNo, col, value) {
.columns[[col]]$.setCell(rowNo, value)
},
getCell=function(rowNo, col) {
column <- .columns[[col]]
if (is.null(column))
stop(format("Column '{}' does not exist in the table", col), call.=FALSE)
column$.getCell(rowNo)
},
getRows=function() {
rows <- list()
for (i in seq_len(.rowCount))
rows[[i]] <- getRow(i)
rows
},
getRow=function(row) {
v <- list()
if (is.character(row)) {
rowNo <- match(row, .rowNames)
if (is.na(index))
stop(format("Row '{}' does not exist in the table", row), call.=FALSE)
} else if (is.numeric(row)) {
rowNo <- row
} else {
stop(format("Table$getRow() expects a row name or a row number (character or numeric)", row), call.=FALSE)
}
if (rowNo > .rowCount)
stop(format("Row '{}' does not exist in the table", row), call.=FALSE)
for (column in .columns)
v[[column$.name]] <- column$.getCell(rowNo)
v
},
addFootnote=function(rowNo, colNo, note) {
index <- .footnotes$addNote(note)
.columns[[colNo]]$.addSup(rowNo, index)
},
width=function() {
if ( ! .swapRowsColumns) {
w <- 0
for (column in .columns) {
if (column$visible())
w <- w + .padding + column$width() + .padding
}
} else {
w <- .padding + .widthWidestHeader() + .padding
for (i in seq_len(.rowCount))
w <- w + .padding + .widthWidestCellInRow(i)$width + .padding
}
max(w, nchar(.title))
},
.widthWidestCellInRow=function(row) {
maxWidthWOSup <- 0
maxSupInRow <- 0 # widest superscripts
for (column in .columns) {
if (column$visible()) {
cell <- column$.getCell(row)
measurements <- silkyMeasureElements(list(cell))
widthWOSup <- measurements$width - measurements$supwidth
maxWidthWOSup <- max(maxWidthWOSup, widthWOSup)
maxSupInRow <- max(maxSupInRow, measurements$supwidth)
}
}
list(width=maxWidthWOSup + maxSupInRow, supwidth=maxSupInRow)
},
.widthWidestHeader=function() {
width <- 0
for (column in .columns) {
if (column$visible())
width <- max(width, nchar(column$.title))
}
width
},
asString=function() {
pieces <- character()
pieces <- c(pieces, .titleForPrint())
pieces <- c(pieces, .headerForPrint())
i <- 1
if ( ! .self$.swapRowsColumns) {
for (i in seq_len(.rowCount))
pieces <- c(pieces, .rowForPrint(i))
} else {
for (i in seq_along(.columns)) {
if (i == 1)
next() # the first is already printed in the header
if (.columns[[i]]$visible())
pieces <- c(pieces, .rowForPrint(i))
}
}
pieces <- c(pieces, .footerForPrint())
pieces <- c(pieces, '\n')
paste0(pieces, collapse="")
},
.titleForPrint=function() {
pieces <- character()
w <- nchar(.title)
wid <- width()
padright <- repstr(' ', wid - w)
pieces <- c(pieces, '\n')
pieces <- c(pieces, .marstr, .title, padright, .marstr, '\n')
pieces <- c(pieces, .marstr, repstr('\u2500', wid), .marstr, '\n')
paste0(pieces, collapse="")
},
.headerForPrint=function() {
pieces <- character()
wid <- width()
pieces <- c(pieces, .marstr)
if ( ! .swapRowsColumns) {
for (column in .columns) {
if (column$visible())
pieces <- c(pieces, .padstr, column$.titleForPrint(), .padstr)
}
} else {
column <- .columns[[1]]
pieces <- c(pieces, .padstr, spaces(.widthWidestHeader()), .padstr)
for (i in seq_len(.rowCount)) {
text <- paste(column$.getCell(i)$value)
rowWidth <- .widthWidestCellInRow(i)$width
w <- nchar(text)
pad <- spaces(max(0, rowWidth - w))
pieces <- c(pieces, .padstr, text, pad, .padstr)
}
}
pieces <- c(pieces, .marstr, '\n')
pieces <- c(pieces, .marstr, repstr('\u2500', wid), .marstr, '\n')
paste0(pieces, collapse="")
},
.footerForPrint=function() {
pieces <- character()
wid <- width()
pieces <- c(.marstr, repstr('\u2500', wid), .marstr, '\n')
for (i in seq_along(.footnotes$.notes)) {
# determine if the corresponding superscript is visible
supVisible <- FALSE
for (column in .columns) {
if (column$visible()) {
for (cell in column$.cells) {
if ((i-1) %in% cell$sups) {
supVisible <- TRUE
break()
}
}
}
if (supVisible)
break()
}
if (supVisible) {
note <- .footnotes$.notes[[i]]
lines <- strwrap(note, width=(wid-.padding-2))
first <- TRUE
for (line in lines) {
pieces <- c(pieces, .marstr)
if (first) {
pieces <- c(pieces, .SUPCHARS[i], ' ')
first <- FALSE
} else {
pieces <- c(pieces, ' ')
}
pieces <- c(pieces, line, .marstr, '\n')
}
}
}
paste0(pieces, collapse="")
},
.rowForPrint=function(i) {
pieces <- character()
pieces <- c(pieces, .marstr)
if ( ! .swapRowsColumns) {
for (column in .columns) {
if (column$visible())
pieces <- c(pieces, .padstr, column$.cellForPrint(i), .padstr)
}
} else {
column <- .columns[[i]]
width <- .widthWidestHeader()
pieces <- c(pieces, .padstr, column$.titleForPrint(width), .padstr)
for (j in seq_along(column$.cells)) {
widest <- .widthWidestCellInRow(j)
width <- widest$width
supwidth <- widest$supwidth
cell <- column$.cells[[j]]
measurements <- silkyMeasureElements(list(cell))
measurements$width <- max(measurements$width, width)
measurements$supwidth <- supwidth
pieces <- c(pieces, .padstr, column$.cellForPrint(j, measurements), .padstr)
}
}
pieces <- c(pieces, .marstr, '\n')
paste0(pieces, collapse="")
},
asProtoBuf=function() {
initProtoBuf()
table <- RProtoBuf::new(silkycoms.ResultsTable)
for (column in .columns)
table$add("columns", column$asProtoBuf())
element <- RProtoBuf::new(silkycoms.ResultsElement,
name=.name,
title=.title,
table=table)
element
}
)
)
Tables <- setRefClass(
"Tables",
contains="ResultElement",
fields=c(
.tables="list",
.tableNames="character",
.template="list",
.tablesExpr="character",
.tablesValue="ANY"),
methods=list(
initialize=function(name="", index=0, options=Options()) {
callSuper(name, index, options)
.tablesExpr <<- "1"
},
get=function(name) {
index <- which(name == .tableNames)
if (length(index) > 0)
table <- .tables[[ index[1] ]]
else
table <- NULL
table
},
.setDef=function(name, value) {
if (name == "tables")
.setTablesDef(value)
else if (name == "template")
.setTemplateDef(value)
else
callSuper(name, value)
},
.setTemplateDef=function(templateDef) {
.template <<- templateDef
.updated <<- FALSE
},
.setTablesDef=function(tablesExpr) {
.tablesExpr <<- paste0(tablesExpr)
.updated <<- FALSE
},
.update=function() {
if (.updated)
return()
if (length(.template) == 0)
return()
error <- NULL
tablesValue <- try(.options$eval(.tablesExpr, name=.name, index=.index), silent=TRUE)
if (inherits(tablesValue, "try-error")) {
error <- tablesValue
tablesValue <- 0
}
.tablesValue <<- tablesValue
oldNames <- .tableNames
oldTables <- .tables
if (is.numeric(.tablesValue) && .tablesValue > 0) {
newNames <- paste(1:.tablesValue)
} else if (is.character(.tablesValue)) {
newNames <- .tablesValue
} else {
newNames <- character()
}
.tableNames <<- newNames
.tables <<- list()
for (i in seq_along(newNames)) {
newName <- newNames[[i]]
index <- which(oldNames == newName)
if (length(index) > 0) {
table <- oldTables[[ index[1] ]]
table$.update()
.tables[[i]] <<- table
} else {
table <- Table(newName, i, .options)
table$.setup(.template)
table$.update()
.tables[[i]] <<- table
}
}
if ( ! is.null(error))
rethrow(error)
.updated <<- TRUE
},
clear=function() {
.tableNames <<- character()
.tables <<- list()
},
asString=function() {
pieces <- c(' ', .title, '\n')
for (table in .tables) {
if (table$visible())
pieces <- c(pieces, table$asString())
}
return(paste0(pieces, collapse=""))
},
asProtoBuf=function() {
initProtoBuf()
group <- RProtoBuf::new(silkycoms.ResultsGroup)
for (table in .tables)
group$add("elements", table$asProtoBuf())
RProtoBuf::new(silkycoms.ResultsElement,
name=.name,
title=.title,
group=group)
})
)
| /R/table.R | no_license | dcaunce/silkyR-old | R | false | false | 18,657 | r |
Footnotes <- setRefClass(
"Footnotes",
fields=list(
.notes="list"),
methods=list(
initialize=function() {
.notes <<- list()
},
clear=function() {
.notes <<- list()
},
addNote=function(message) {
# adds a footnote, returns the index (0-indexed)
for (i in seq_along(.notes)) {
if (message == .notes[[i]])
return(i-1)
}
.notes[[length(.notes)+1]] <<- message
return(length(.notes)-1)
})
)
Table <- setRefClass(
"Table",
contains="ResultElement",
fields=list(
.name="character",
.columns="list",
.rowCount="numeric",
.rowNames="list",
.rowsExpr="character",
.rowsValue="ANY",
.margin="numeric",
.padding="numeric",
.marstr="character",
.padstr="character",
.footnotes="Footnotes",
.swapRowsColumns="logical"),
methods=list(
initialize=function(name="", index=0, options=Options(), swapRowsColumns=FALSE) {
callSuper(name=name, options=options)
.index <<- as.integer(index)
.swapRowsColumns <<- swapRowsColumns
.columns <<- list()
.rowCount <<- 0
.rowsExpr <<- "1"
.rowNames <<- list()
.margin <<- 1
.marstr <<- spaces(.margin)
.padding <<- 2
.padstr <<- spaces(.padding)
.footnotes <<- Footnotes()
},
title=function(value) {
.options$eval(.title)
},
.setDef=function(name, value) {
if (name == "title")
setTitle(value)
else if (name == "columns")
.setColumnsDef(value)
else if (name == "rows")
.setRowsDef(value)
else
callSuper(name, value)
},
setTitle=function(value) {
.title <<- paste0(.options$eval(value, name=.name, index=.index))
},
.setRowsDef=function(value) {
.rowsExpr <<- paste0(value)
.updated <<- FALSE
},
.setColumnsDef=function(columnDefs) {
for (columnDef in columnDefs) {
if (is.null(columnDef$title))
columnDef$title <- columnDef$name
if (is.null(columnDef$content))
columnDef$content <- "."
if (is.null(columnDef$visible))
columnDef$visible <- TRUE
addColumn(columnDef$name, columnDef$title, columnDef$content, columnDef$visible)
}
},
.update=function() {
if (.updated)
return()
error <- NULL
rowsValue <- try(.options$eval(.rowsExpr, name=.name, index=.index), silent=TRUE)
if (inherits(rowsValue, "try-error")) {
error <- rowsValue
rowsValue <- 0
}
if (identical(rowsValue, .rowsValue))
return()
.rowsValue <<- rowsValue
oldNames <- .rowNames
oldRows <- getRows()
if (is.numeric(.rowsValue) && .rowsValue > 0) {
newNames <- paste(1:.rowsValue)
} else if (is.character(.rowsValue)) {
newNames <- .rowsValue
} else {
newNames <- character()
}
clearRows()
for (i in seq_along(newNames)) {
newName <- newNames[[i]]
index <- which(oldNames == newName)
if (length(index) > 0) {
newRow <- oldRows[[ index[1] ]]
addRow(newName, newRow)
} else {
addRow(newName)
}
}
if ( ! is.null(error))
rethrow(error)
.updated <<- TRUE
},
clearRows=function() {
.rowNames <<- list()
for (column in .columns)
column$.clear()
.rowCount <<- 0
.footnotes$clear()
},
addColumn=function(name, title=name, content=".", visible=TRUE) {
column <- Column(name=name, title=title, content=content, visible, options=.options)
i <- 1
while (i <= .rowCount) {
rowName <- .rowNames[[i]]
column$.addCell(name=rowName, index=i)
i <- i + 1
}
.columns[[name]] <<- column
},
addRow=function(name=NULL, values=NULL) {
.rowNames[length(.rowNames)+1] <<- list(name)
.rowCount <<- .rowCount + 1
for (column in .columns) {
if (column$.name %in% names(values))
column$.addCell(values[[column$.name]], name=name, index=.rowCount)
else
column$.addCell(name=name, index=.rowCount)
}
},
rowCount=function() {
.rowCount
},
setCell=function(rowNo, col, value) {
.columns[[col]]$.setCell(rowNo, value)
},
getCell=function(rowNo, col) {
column <- .columns[[col]]
if (is.null(column))
stop(format("Column '{}' does not exist in the table", col), call.=FALSE)
column$.getCell(rowNo)
},
getRows=function() {
rows <- list()
for (i in seq_len(.rowCount))
rows[[i]] <- getRow(i)
rows
},
getRow=function(row) {
v <- list()
if (is.character(row)) {
rowNo <- match(row, .rowNames)
if (is.na(index))
stop(format("Row '{}' does not exist in the table", row), call.=FALSE)
} else if (is.numeric(row)) {
rowNo <- row
} else {
stop(format("Table$getRow() expects a row name or a row number (character or numeric)", row), call.=FALSE)
}
if (rowNo > .rowCount)
stop(format("Row '{}' does not exist in the table", row), call.=FALSE)
for (column in .columns)
v[[column$.name]] <- column$.getCell(rowNo)
v
},
addFootnote=function(rowNo, colNo, note) {
index <- .footnotes$addNote(note)
.columns[[colNo]]$.addSup(rowNo, index)
},
width=function() {
if ( ! .swapRowsColumns) {
w <- 0
for (column in .columns) {
if (column$visible())
w <- w + .padding + column$width() + .padding
}
} else {
w <- .padding + .widthWidestHeader() + .padding
for (i in seq_len(.rowCount))
w <- w + .padding + .widthWidestCellInRow(i)$width + .padding
}
max(w, nchar(.title))
},
.widthWidestCellInRow=function(row) {
maxWidthWOSup <- 0
maxSupInRow <- 0 # widest superscripts
for (column in .columns) {
if (column$visible()) {
cell <- column$.getCell(row)
measurements <- silkyMeasureElements(list(cell))
widthWOSup <- measurements$width - measurements$supwidth
maxWidthWOSup <- max(maxWidthWOSup, widthWOSup)
maxSupInRow <- max(maxSupInRow, measurements$supwidth)
}
}
list(width=maxWidthWOSup + maxSupInRow, supwidth=maxSupInRow)
},
.widthWidestHeader=function() {
width <- 0
for (column in .columns) {
if (column$visible())
width <- max(width, nchar(column$.title))
}
width
},
asString=function() {
pieces <- character()
pieces <- c(pieces, .titleForPrint())
pieces <- c(pieces, .headerForPrint())
i <- 1
if ( ! .self$.swapRowsColumns) {
for (i in seq_len(.rowCount))
pieces <- c(pieces, .rowForPrint(i))
} else {
for (i in seq_along(.columns)) {
if (i == 1)
next() # the first is already printed in the header
if (.columns[[i]]$visible())
pieces <- c(pieces, .rowForPrint(i))
}
}
pieces <- c(pieces, .footerForPrint())
pieces <- c(pieces, '\n')
paste0(pieces, collapse="")
},
.titleForPrint=function() {
pieces <- character()
w <- nchar(.title)
wid <- width()
padright <- repstr(' ', wid - w)
pieces <- c(pieces, '\n')
pieces <- c(pieces, .marstr, .title, padright, .marstr, '\n')
pieces <- c(pieces, .marstr, repstr('\u2500', wid), .marstr, '\n')
paste0(pieces, collapse="")
},
.headerForPrint=function() {
pieces <- character()
wid <- width()
pieces <- c(pieces, .marstr)
if ( ! .swapRowsColumns) {
for (column in .columns) {
if (column$visible())
pieces <- c(pieces, .padstr, column$.titleForPrint(), .padstr)
}
} else {
column <- .columns[[1]]
pieces <- c(pieces, .padstr, spaces(.widthWidestHeader()), .padstr)
for (i in seq_len(.rowCount)) {
text <- paste(column$.getCell(i)$value)
rowWidth <- .widthWidestCellInRow(i)$width
w <- nchar(text)
pad <- spaces(max(0, rowWidth - w))
pieces <- c(pieces, .padstr, text, pad, .padstr)
}
}
pieces <- c(pieces, .marstr, '\n')
pieces <- c(pieces, .marstr, repstr('\u2500', wid), .marstr, '\n')
paste0(pieces, collapse="")
},
.footerForPrint=function() {
pieces <- character()
wid <- width()
pieces <- c(.marstr, repstr('\u2500', wid), .marstr, '\n')
for (i in seq_along(.footnotes$.notes)) {
# determine if the corresponding superscript is visible
supVisible <- FALSE
for (column in .columns) {
if (column$visible()) {
for (cell in column$.cells) {
if ((i-1) %in% cell$sups) {
supVisible <- TRUE
break()
}
}
}
if (supVisible)
break()
}
if (supVisible) {
note <- .footnotes$.notes[[i]]
lines <- strwrap(note, width=(wid-.padding-2))
first <- TRUE
for (line in lines) {
pieces <- c(pieces, .marstr)
if (first) {
pieces <- c(pieces, .SUPCHARS[i], ' ')
first <- FALSE
} else {
pieces <- c(pieces, ' ')
}
pieces <- c(pieces, line, .marstr, '\n')
}
}
}
paste0(pieces, collapse="")
},
.rowForPrint=function(i) {
pieces <- character()
pieces <- c(pieces, .marstr)
if ( ! .swapRowsColumns) {
for (column in .columns) {
if (column$visible())
pieces <- c(pieces, .padstr, column$.cellForPrint(i), .padstr)
}
} else {
column <- .columns[[i]]
width <- .widthWidestHeader()
pieces <- c(pieces, .padstr, column$.titleForPrint(width), .padstr)
for (j in seq_along(column$.cells)) {
widest <- .widthWidestCellInRow(j)
width <- widest$width
supwidth <- widest$supwidth
cell <- column$.cells[[j]]
measurements <- silkyMeasureElements(list(cell))
measurements$width <- max(measurements$width, width)
measurements$supwidth <- supwidth
pieces <- c(pieces, .padstr, column$.cellForPrint(j, measurements), .padstr)
}
}
pieces <- c(pieces, .marstr, '\n')
paste0(pieces, collapse="")
},
asProtoBuf=function() {
initProtoBuf()
table <- RProtoBuf::new(silkycoms.ResultsTable)
for (column in .columns)
table$add("columns", column$asProtoBuf())
element <- RProtoBuf::new(silkycoms.ResultsElement,
name=.name,
title=.title,
table=table)
element
}
)
)
Tables <- setRefClass(
"Tables",
contains="ResultElement",
fields=c(
.tables="list",
.tableNames="character",
.template="list",
.tablesExpr="character",
.tablesValue="ANY"),
methods=list(
initialize=function(name="", index=0, options=Options()) {
callSuper(name, index, options)
.tablesExpr <<- "1"
},
get=function(name) {
index <- which(name == .tableNames)
if (length(index) > 0)
table <- .tables[[ index[1] ]]
else
table <- NULL
table
},
.setDef=function(name, value) {
if (name == "tables")
.setTablesDef(value)
else if (name == "template")
.setTemplateDef(value)
else
callSuper(name, value)
},
.setTemplateDef=function(templateDef) {
.template <<- templateDef
.updated <<- FALSE
},
.setTablesDef=function(tablesExpr) {
.tablesExpr <<- paste0(tablesExpr)
.updated <<- FALSE
},
.update=function() {
if (.updated)
return()
if (length(.template) == 0)
return()
error <- NULL
tablesValue <- try(.options$eval(.tablesExpr, name=.name, index=.index), silent=TRUE)
if (inherits(tablesValue, "try-error")) {
error <- tablesValue
tablesValue <- 0
}
.tablesValue <<- tablesValue
oldNames <- .tableNames
oldTables <- .tables
if (is.numeric(.tablesValue) && .tablesValue > 0) {
newNames <- paste(1:.tablesValue)
} else if (is.character(.tablesValue)) {
newNames <- .tablesValue
} else {
newNames <- character()
}
.tableNames <<- newNames
.tables <<- list()
for (i in seq_along(newNames)) {
newName <- newNames[[i]]
index <- which(oldNames == newName)
if (length(index) > 0) {
table <- oldTables[[ index[1] ]]
table$.update()
.tables[[i]] <<- table
} else {
table <- Table(newName, i, .options)
table$.setup(.template)
table$.update()
.tables[[i]] <<- table
}
}
if ( ! is.null(error))
rethrow(error)
.updated <<- TRUE
},
clear=function() {
.tableNames <<- character()
.tables <<- list()
},
asString=function() {
pieces <- c(' ', .title, '\n')
for (table in .tables) {
if (table$visible())
pieces <- c(pieces, table$asString())
}
return(paste0(pieces, collapse=""))
},
asProtoBuf=function() {
initProtoBuf()
group <- RProtoBuf::new(silkycoms.ResultsGroup)
for (table in .tables)
group$add("elements", table$asProtoBuf())
RProtoBuf::new(silkycoms.ResultsElement,
name=.name,
title=.title,
group=group)
})
)
|
library(shiny)
library(dplyr)
library(quanteda)
library(data.table)
shinyServer(
function(input,output) {
#Display text user provided
txtReturn <- eventReactive(input$button1, {
input$impText
})
output$inputText <- renderText({txtReturn()})
#Get a table of predicted words and score
predWords <- eventReactive(input$button1, {
head(stupidBackoffPredFunction(input$impText), input$impWords)
})
output$predTable <- renderTable({predWords()})
}
) | /server.R | no_license | j-p-courneya/wordPredictionApp | R | false | false | 500 | r | library(shiny)
library(dplyr)
library(quanteda)
library(data.table)
shinyServer(
function(input,output) {
#Display text user provided
txtReturn <- eventReactive(input$button1, {
input$impText
})
output$inputText <- renderText({txtReturn()})
#Get a table of predicted words and score
predWords <- eventReactive(input$button1, {
head(stupidBackoffPredFunction(input$impText), input$impWords)
})
output$predTable <- renderTable({predWords()})
}
) |
## function to calculate probabilities of getting a specific unit on n rolls
#big assumption
#how it works -> one roll decides which COST of unit you got, and an independent roll decides which specific unit you got (correct me if wrong)
#assumption 1 : probabilities are based off the probability of getting that cost of unit
#e.g the probability of getting ANY legendary does not reduce if someone takes a legendary, just that specific one. and will increase the chances of getting others
#starting pool size is a constant subtract from it to get the number of units still in the pool.
poolsize <- data.frame(onecost=39, twocost=26, threecost=21, fourcost=13, fivecost=10)
#base probabilties
baseProbs <- data.frame(onecost=c(1, 1, 0.7, 0.55, 0.4, 0.29, 0.24, 0.2, 0.1),
twocost=c(0,0,0.3,0.3,0.3,0.295,0.28,0.24,0.19),
threecost=c(0,0,0,0.15,0.25,0.31,0.31,0.31,0.31),
fourcost=c(0,0,0,0,0.05,0.1,0.15,0.2,0.3),
fivecost=c(0,0,0,0,0,0.005,0.02,0.05,0.1))
#gonna flip it because i manually entered it the rong way kappa
baseProbMatrix <- data.matrix(baseProbs)
baseProbMatrix2 <- t(baseProbMatrix)
baseProbs2 <- data.frame(baseProbMatrix2)
summary(baseProbs2)
#we should map unit name -> unitid and unitid -> cost of unit. for now we can calculate general RANGE and extend further after
unit_costs <- c(1,2,3,4,5)
sample(outcomes, 5,T,prob = baseProbMatrix2[,9])
sample(c(1,2,3,4,5),5, prob=c(0.5,0.5,0,0,0))
calcOneRoll <- function(unit_cost, player_level,probMatrix){
#unitcost is either 1,2,3,4,5
#player level is from 1-9
probability = probMatrix[unit_cost,player_level]
unit_outcomes <- c(1,2,3,4,5)
dbinom(unit_outcomes,5,probability)
}
library(readr)
units <- read_csv("~/iloveyein/tft/units.csv")
View(units)
unitpool =
calcOneRoll(5,9,baseProbMatrix2)
| /main.R | no_license | adria-n/tft-roller | R | false | false | 1,948 | r | ## function to calculate probabilities of getting a specific unit on n rolls
#big assumption
#how it works -> one roll decides which COST of unit you got, and an independent roll decides which specific unit you got (correct me if wrong)
#assumption 1 : probabilities are based off the probability of getting that cost of unit
#e.g the probability of getting ANY legendary does not reduce if someone takes a legendary, just that specific one. and will increase the chances of getting others
#starting pool size is a constant subtract from it to get the number of units still in the pool.
poolsize <- data.frame(onecost=39, twocost=26, threecost=21, fourcost=13, fivecost=10)
#base probabilties
baseProbs <- data.frame(onecost=c(1, 1, 0.7, 0.55, 0.4, 0.29, 0.24, 0.2, 0.1),
twocost=c(0,0,0.3,0.3,0.3,0.295,0.28,0.24,0.19),
threecost=c(0,0,0,0.15,0.25,0.31,0.31,0.31,0.31),
fourcost=c(0,0,0,0,0.05,0.1,0.15,0.2,0.3),
fivecost=c(0,0,0,0,0,0.005,0.02,0.05,0.1))
#gonna flip it because i manually entered it the rong way kappa
baseProbMatrix <- data.matrix(baseProbs)
baseProbMatrix2 <- t(baseProbMatrix)
baseProbs2 <- data.frame(baseProbMatrix2)
summary(baseProbs2)
#we should map unit name -> unitid and unitid -> cost of unit. for now we can calculate general RANGE and extend further after
unit_costs <- c(1,2,3,4,5)
sample(outcomes, 5,T,prob = baseProbMatrix2[,9])
sample(c(1,2,3,4,5),5, prob=c(0.5,0.5,0,0,0))
calcOneRoll <- function(unit_cost, player_level,probMatrix){
#unitcost is either 1,2,3,4,5
#player level is from 1-9
probability = probMatrix[unit_cost,player_level]
unit_outcomes <- c(1,2,3,4,5)
dbinom(unit_outcomes,5,probability)
}
library(readr)
units <- read_csv("~/iloveyein/tft/units.csv")
View(units)
unitpool =
calcOneRoll(5,9,baseProbMatrix2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrix.R
\name{plotmatrix}
\alias{plotmatrix}
\title{Make charts with matrices}
\usage{
plotmatrix(matrix, title = "", subtitle = "", low_color = "#132B43",
high_color = "#56B1F7", legend_position = "top", xlabel = "",
ylabel = "", title_size = 8, subtitle_size = 7, height = 0.9,
width = 0.9, color = "black", key_size = 1.5, colour = "black",
text_include = FALSE)
}
\arguments{
\item{matrix}{named data.frame or matrix}
\item{subtitle}{subtitle of the Plot}
\item{low_color}{is the color assigned to the lowest value}
\item{high_color}{is the color assigned to the highest value}
\item{legend_position}{position of legend}
\item{xlabel}{x label for the chart}
\item{ylabel}{ylabel for the chart}
\item{title_size}{size of the title text}
\item{subtitle_size}{size of the subtitle text}
\item{height}{height of each cell}
\item{width}{width of each cell}
\item{color}{color of the cell borders}
\item{key_size}{size of legend keys}
\item{colour}{colour of the labels}
\item{text_include}{if text labels should be include; by default it is FALSE}
\item{header}{header of the Plot}
}
\description{
Given a named matrix or a data frame, this function will return a ggplot object that
represents a tile chart with the values of the matrix. The library gives the user the
choice to change the color , title and subtitle and the legend position in the plot.
}
\details{
If a data frame is used, the column will be taken as the column of the matrix and the rows
will be taken as the rows of the matrix and the function will plot accordingly.
A matrix input is also allowed.
Chart title and x-axis and y-axis labels are optional.
plotmatrix is inspired by hrbrmstr's waffle package (@hrbrmstr)
}
\examples{
data_matrix <- data.frame(a=c(1,0,0),b=c(0,1,0),c=c(0,0,1))
plotmatrix(data_matrix)
# Plotting matrix with high and low color
plotmatrix(data_matrix,low_color="#f6efb9",high_color="#d6efd9")
}
| /man/plotmatrix.Rd | no_license | adhok/plotmatrix | R | false | true | 2,014 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrix.R
\name{plotmatrix}
\alias{plotmatrix}
\title{Make charts with matrices}
\usage{
plotmatrix(matrix, title = "", subtitle = "", low_color = "#132B43",
high_color = "#56B1F7", legend_position = "top", xlabel = "",
ylabel = "", title_size = 8, subtitle_size = 7, height = 0.9,
width = 0.9, color = "black", key_size = 1.5, colour = "black",
text_include = FALSE)
}
\arguments{
\item{matrix}{named data.frame or matrix}
\item{subtitle}{subtitle of the Plot}
\item{low_color}{is the color assigned to the lowest value}
\item{high_color}{is the color assigned to the highest value}
\item{legend_position}{position of legend}
\item{xlabel}{x label for the chart}
\item{ylabel}{ylabel for the chart}
\item{title_size}{size of the title text}
\item{subtitle_size}{size of the subtitle text}
\item{height}{height of each cell}
\item{width}{width of each cell}
\item{color}{color of the cell borders}
\item{key_size}{size of legend keys}
\item{colour}{colour of the labels}
\item{text_include}{if text labels should be include; by default it is FALSE}
\item{header}{header of the Plot}
}
\description{
Given a named matrix or a data frame, this function will return a ggplot object that
represents a tile chart with the values of the matrix. The library gives the user the
choice to change the color , title and subtitle and the legend position in the plot.
}
\details{
If a data frame is used, the column will be taken as the column of the matrix and the rows
will be taken as the rows of the matrix and the function will plot accordingly.
A matrix input is also allowed.
Chart title and x-axis and y-axis labels are optional.
plotmatrix is inspired by hrbrmstr's waffle package (@hrbrmstr)
}
\examples{
data_matrix <- data.frame(a=c(1,0,0),b=c(0,1,0),c=c(0,0,1))
plotmatrix(data_matrix)
# Plotting matrix with high and low color
plotmatrix(data_matrix,low_color="#f6efb9",high_color="#d6efd9")
}
|
#' @title delete_points
#' @description Delete groups from scatterplots
#' @param raw_data data
delete_group <- function(raw_data){
ids <- unique(raw_data$id)
remove <- utils::menu(ids)
raw_data <- subset(raw_data, raw_data$id != ids[remove])
raw_data$id <- droplevels(raw_data$id)
return(raw_data)
}
#' @title edit_group
#' @description Edit group points in scatterplots
#' @param raw_data data
#' @param group_id group_id
#' @param calpoints The calibration points
#' @param cex point size
#' @param ... other functions to pass to internal_redraw
edit_group <- function(raw_data, group_id, calpoints, cex, ...){
cols <- rep(c("red", "green", "purple"),length.out=90)
pchs <- rep(rep(c(19, 17, 15),each=3),length.out=90)
box_y <- c(mean(calpoints$y[3:4]), mean(calpoints$y[3:4]),0,0,mean(calpoints$y[3:4]))/2
box_x <- c(0,mean(calpoints$x[1:2]), mean(calpoints$x[1:2]),0,0)/2
if(!is.null(group_id)) {
group_data <- data.frame()
i <- if(nrow(raw_data)==0){ 1 }else{ max(raw_data$group) + 1 }
add_removeQ <- "a"
}else{
group_id <- unique(raw_data$id)[ utils::menu(unique(raw_data$id)) ]
group_data <- subset(raw_data, raw_data$id==group_id)
i <- unique(group_data$group)
add_removeQ <- "b"
raw_data <- subset(raw_data, raw_data$id != group_id)
idQ <- user_options("Change group identifier? (y/n) ",c("y","n"))
if(idQ=="y"){
group_id <- user_unique("\nGroup identifier: ", unique(raw_data$id))
group_data$id <- group_id
}
}
while(add_removeQ!="c"){
if(add_removeQ=="a"){
graphics::polygon(box_x,box_y, col="red", border=NA,xpd=TRUE)
cat("\nClick on points you want to add.\nIf you want to remove a point, or are finished with a group, \nexit by clicking on red box in bottom left corner, then follow prompts\n")
}
while(add_removeQ=="a"){
select_points <-
locator_mD(1,line=FALSE, lwd=2, col=cols[i], pch=pchs[i], cex=cex)
#graphics::locator(1,type="p", lwd=2, col=cols[i], pch=pchs[i])
if( select_points$x<max(box_x) & select_points$y<max(box_y) & select_points$x>min(box_x) & select_points$y>min(box_y)) {
add_removeQ <- "b"
}
else{
group_data <- rbind(group_data, data.frame(id=group_id, x=select_points$x, y=select_points$y, group=i, col=cols[i], pch=pchs[i]) )
}
}
if(add_removeQ=="d"){
cat("\nClick on point you want to delete\n")
remove <- graphics::identify(group_data$x,group_data$y, n=1)
if(length(remove)>0) {
graphics::points(group_data$x[remove], group_data$y[remove],cex=cex, col="white", pch=19)
group_data <- group_data[-remove,]
}
}
internal_redraw(...,calpoints=calpoints,cex=cex,raw_data=rbind(raw_data, group_data), calibration=TRUE, points=TRUE)
add_removeQ <- readline("\nAdd or Delete points to this group, or Continue? (a/d/c) \n")
}
raw_data <- rbind(raw_data, group_data)
return(raw_data)
}
#' @title group_scatter_extract
#' @description Extraction of data from scatterplots
#' @param edit logical; whether in edit mode
#' @param raw_data raw data
#' @param cex point size
#' @param ... arguments passed to internal_redraw
group_scatter_extract <- function(edit=FALSE, raw_data = data.frame(), cex, ...){
editQ <- if(edit){ "b" }else{ "a" }
if(!edit) cat("\nIf there are multiple groups, enter unique group identifiers (otherwise press enter)")
while(editQ != "f"){
group_id <- NULL
if(editQ=="a"){
group_id <- user_unique("\nGroup identifier: ", unique(raw_data$id))
editQ <- "e"
}
if(editQ == "e") raw_data <- edit_group(raw_data, group_id, cex=cex, ...)
if(editQ == "d") raw_data <- delete_group(raw_data)
internal_redraw(...,raw_data=raw_data, calibration=TRUE, points=TRUE, cex=cex)
editQ <- readline("\nAdd group, Edit group, Delete group, or Finish plot? (a/e/d/f) \n")
}
return(raw_data)
}
| /R/S_extract.R | no_license | devanmcg/metaDigitise | R | false | false | 3,815 | r |
#' @title delete_points
#' @description Delete groups from scatterplots
#' @param raw_data data
delete_group <- function(raw_data){
ids <- unique(raw_data$id)
remove <- utils::menu(ids)
raw_data <- subset(raw_data, raw_data$id != ids[remove])
raw_data$id <- droplevels(raw_data$id)
return(raw_data)
}
#' @title edit_group
#' @description Edit group points in scatterplots
#' @param raw_data data
#' @param group_id group_id
#' @param calpoints The calibration points
#' @param cex point size
#' @param ... other functions to pass to internal_redraw
edit_group <- function(raw_data, group_id, calpoints, cex, ...){
cols <- rep(c("red", "green", "purple"),length.out=90)
pchs <- rep(rep(c(19, 17, 15),each=3),length.out=90)
box_y <- c(mean(calpoints$y[3:4]), mean(calpoints$y[3:4]),0,0,mean(calpoints$y[3:4]))/2
box_x <- c(0,mean(calpoints$x[1:2]), mean(calpoints$x[1:2]),0,0)/2
if(!is.null(group_id)) {
group_data <- data.frame()
i <- if(nrow(raw_data)==0){ 1 }else{ max(raw_data$group) + 1 }
add_removeQ <- "a"
}else{
group_id <- unique(raw_data$id)[ utils::menu(unique(raw_data$id)) ]
group_data <- subset(raw_data, raw_data$id==group_id)
i <- unique(group_data$group)
add_removeQ <- "b"
raw_data <- subset(raw_data, raw_data$id != group_id)
idQ <- user_options("Change group identifier? (y/n) ",c("y","n"))
if(idQ=="y"){
group_id <- user_unique("\nGroup identifier: ", unique(raw_data$id))
group_data$id <- group_id
}
}
while(add_removeQ!="c"){
if(add_removeQ=="a"){
graphics::polygon(box_x,box_y, col="red", border=NA,xpd=TRUE)
cat("\nClick on points you want to add.\nIf you want to remove a point, or are finished with a group, \nexit by clicking on red box in bottom left corner, then follow prompts\n")
}
while(add_removeQ=="a"){
select_points <-
locator_mD(1,line=FALSE, lwd=2, col=cols[i], pch=pchs[i], cex=cex)
#graphics::locator(1,type="p", lwd=2, col=cols[i], pch=pchs[i])
if( select_points$x<max(box_x) & select_points$y<max(box_y) & select_points$x>min(box_x) & select_points$y>min(box_y)) {
add_removeQ <- "b"
}
else{
group_data <- rbind(group_data, data.frame(id=group_id, x=select_points$x, y=select_points$y, group=i, col=cols[i], pch=pchs[i]) )
}
}
if(add_removeQ=="d"){
cat("\nClick on point you want to delete\n")
remove <- graphics::identify(group_data$x,group_data$y, n=1)
if(length(remove)>0) {
graphics::points(group_data$x[remove], group_data$y[remove],cex=cex, col="white", pch=19)
group_data <- group_data[-remove,]
}
}
internal_redraw(...,calpoints=calpoints,cex=cex,raw_data=rbind(raw_data, group_data), calibration=TRUE, points=TRUE)
add_removeQ <- readline("\nAdd or Delete points to this group, or Continue? (a/d/c) \n")
}
raw_data <- rbind(raw_data, group_data)
return(raw_data)
}
#' @title group_scatter_extract
#' @description Extraction of data from scatterplots
#' @param edit logical; whether in edit mode
#' @param raw_data raw data
#' @param cex point size
#' @param ... arguments passed to internal_redraw
group_scatter_extract <- function(edit=FALSE, raw_data = data.frame(), cex, ...){
editQ <- if(edit){ "b" }else{ "a" }
if(!edit) cat("\nIf there are multiple groups, enter unique group identifiers (otherwise press enter)")
while(editQ != "f"){
group_id <- NULL
if(editQ=="a"){
group_id <- user_unique("\nGroup identifier: ", unique(raw_data$id))
editQ <- "e"
}
if(editQ == "e") raw_data <- edit_group(raw_data, group_id, cex=cex, ...)
if(editQ == "d") raw_data <- delete_group(raw_data)
internal_redraw(...,raw_data=raw_data, calibration=TRUE, points=TRUE, cex=cex)
editQ <- readline("\nAdd group, Edit group, Delete group, or Finish plot? (a/e/d/f) \n")
}
return(raw_data)
}
|
library(testthat)
library(vdiffr)
library(dplyr)
## Plotting tests.
# Since it is difficult to test specific output of plots, these tests instead
# use vdiffr to ensure that plots match a saved reference version. The tests
# below simple build simple graphs of each type, thus exercising the different
# plotting options and ensuring they all function.
# Contexts are no longer required or recommended in testthat, but vdiffr still
# wants one to place the figure files correctly. See
# https://github.com/r-lib/vdiffr/issues/71
context("plot")
test_that("simple line graph", {
fake_data <- structure(data.frame(
value = 1:10,
time_value = seq.Date(as.Date("2020-01-01"), as.Date("2020-01-10"),
by = "day"),
issue = as.Date("2020-02-01"),
geo_value = "pa",
stderr = 0.5),
class = c("covidcast_signal", "data.frame")
)
expect_doppelganger("simple line graph", plot(
fake_data,
plot_type = "line",
range = c(-1, 11),
title = "Penguins!",
line_params = list(
xlab = "Day",
ylab = "Penguinocity",
stderr_bands = TRUE,
stderr_alpha = 0.3
)
))
})
test_that("state line graphs", {
fb_state <- readRDS(test_path("data/survey-data-state.rds"))
expect_doppelganger("default state line graph",
plot(fb_state, plot_type = "line"))
expect_doppelganger("state line graph with stderrs",
plot(filter(fb_state, geo_value %in% c("pa", "tx", "ny")),
plot_type = "line",
line_params = list(stderr_bands = TRUE)))
expect_doppelganger("state line graph with range",
plot(fb_state, plot_type = "line",
range = c(0, 10)))
})
test_that("simple state choropleths", {
fb_state <- readRDS(test_path("data/survey-data-state.rds"))
expect_doppelganger("default state choropleth",
plot(fb_state, plot_type = "choro"))
expect_doppelganger("default state choropleth with include",
plot(fb_state, plot_type = "choro",
include = c("pa", "OH", "in", "KY")))
expect_doppelganger("default state choropleth with range",
plot(fb_state, plot_type = "choro",
range = c(0, 4)))
fb_county <- readRDS(test_path("data/survey-data-county.rds"))
expect_doppelganger("default county choropleth",
plot(fb_county, plot_type = "choro"))
expect_doppelganger("default county choropleth with include",
plot(fb_county, plot_type = "choro",
include = c("pa", "OH", "in", "KY")))
# Work-in-progress signals may not have metadata, so we should preserve the
# ability to plot them by manually specifying range
attributes(fb_state)$metadata <- NULL
attributes(fb_state)$metadata$geo_type <- "state"
expect_doppelganger("state choropleth with no metadata",
plot(fb_state, plot_type = "choro",
range = c(0, 2)))
})
test_that("state bubble plot with both missing and 0 values", {
fake_data <- structure(data.frame(
value = c(1, 2, 0, 3),
geo_value = c("pa", "in", "tx", "wy"),
time_value = as.Date("2020-01-01"),
issue = as.Date("2020-02-01"),
stderr = 0.5),
class = c("covidcast_signal", "data.frame"),
metadata = list(geo_type = "state")
)
# we suppress the warning about missing data
expect_doppelganger("bubble plot with 0 and missing",
suppressWarnings(
plot(fake_data, plot_type = "bubble",
range = c(0, 3))))
})
test_that("simple county bubble plot", {
fb_county <- readRDS(test_path("data/survey-data-county.rds"))
expect_doppelganger("simple county bubble plot",
suppressWarnings(
plot(fb_county, plot_type = "bubble")))
})
| /R-packages/covidcast/tests/testthat/test-plot.R | no_license | yelselmiao/covidcast | R | false | false | 3,972 | r | library(testthat)
library(vdiffr)
library(dplyr)
## Plotting tests.
# Since it is difficult to test specific output of plots, these tests instead
# use vdiffr to ensure that plots match a saved reference version. The tests
# below simple build simple graphs of each type, thus exercising the different
# plotting options and ensuring they all function.
# Contexts are no longer required or recommended in testthat, but vdiffr still
# wants one to place the figure files correctly. See
# https://github.com/r-lib/vdiffr/issues/71
context("plot")
test_that("simple line graph", {
fake_data <- structure(data.frame(
value = 1:10,
time_value = seq.Date(as.Date("2020-01-01"), as.Date("2020-01-10"),
by = "day"),
issue = as.Date("2020-02-01"),
geo_value = "pa",
stderr = 0.5),
class = c("covidcast_signal", "data.frame")
)
expect_doppelganger("simple line graph", plot(
fake_data,
plot_type = "line",
range = c(-1, 11),
title = "Penguins!",
line_params = list(
xlab = "Day",
ylab = "Penguinocity",
stderr_bands = TRUE,
stderr_alpha = 0.3
)
))
})
test_that("state line graphs", {
fb_state <- readRDS(test_path("data/survey-data-state.rds"))
expect_doppelganger("default state line graph",
plot(fb_state, plot_type = "line"))
expect_doppelganger("state line graph with stderrs",
plot(filter(fb_state, geo_value %in% c("pa", "tx", "ny")),
plot_type = "line",
line_params = list(stderr_bands = TRUE)))
expect_doppelganger("state line graph with range",
plot(fb_state, plot_type = "line",
range = c(0, 10)))
})
test_that("simple state choropleths", {
fb_state <- readRDS(test_path("data/survey-data-state.rds"))
expect_doppelganger("default state choropleth",
plot(fb_state, plot_type = "choro"))
expect_doppelganger("default state choropleth with include",
plot(fb_state, plot_type = "choro",
include = c("pa", "OH", "in", "KY")))
expect_doppelganger("default state choropleth with range",
plot(fb_state, plot_type = "choro",
range = c(0, 4)))
fb_county <- readRDS(test_path("data/survey-data-county.rds"))
expect_doppelganger("default county choropleth",
plot(fb_county, plot_type = "choro"))
expect_doppelganger("default county choropleth with include",
plot(fb_county, plot_type = "choro",
include = c("pa", "OH", "in", "KY")))
# Work-in-progress signals may not have metadata, so we should preserve the
# ability to plot them by manually specifying range
attributes(fb_state)$metadata <- NULL
attributes(fb_state)$metadata$geo_type <- "state"
expect_doppelganger("state choropleth with no metadata",
plot(fb_state, plot_type = "choro",
range = c(0, 2)))
})
test_that("state bubble plot with both missing and 0 values", {
fake_data <- structure(data.frame(
value = c(1, 2, 0, 3),
geo_value = c("pa", "in", "tx", "wy"),
time_value = as.Date("2020-01-01"),
issue = as.Date("2020-02-01"),
stderr = 0.5),
class = c("covidcast_signal", "data.frame"),
metadata = list(geo_type = "state")
)
# we suppress the warning about missing data
expect_doppelganger("bubble plot with 0 and missing",
suppressWarnings(
plot(fake_data, plot_type = "bubble",
range = c(0, 3))))
})
test_that("simple county bubble plot", {
fb_county <- readRDS(test_path("data/survey-data-county.rds"))
expect_doppelganger("simple county bubble plot",
suppressWarnings(
plot(fb_county, plot_type = "bubble")))
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.